コード例 #1
0
ファイル: test_sparse.py プロジェクト: muzzynine/examples-1
    def test_block_conversions(self):
        from ipu_sparse_ops import sparse
        a = np.kron([[1, 0], [1, 0]], [[1, 2], [3, 4]])
        b = np.kron([[0, 0], [1, 0]], [[4, 4], [4, 4]])
        dense = a + b
        bs = 2
        spec = sparse.matmul_spec_from_max(2 * bs, [1, 2 * bs],
                                           2,
                                           block_size=bs,
                                           dtype=tf.float32)
        blocks = np.reshape([1, 2, 3, 4, 5, 6, 7, 8], [2, bs, bs])
        t = ([0, 1], [0, 0], blocks)
        n = sparse.dense_from_triplets(spec, *t)
        assert_equal(dense, n)

        # Check that mask from dense and mask from triplets
        # return the same result:
        mask_dense = np.zeros_like(dense)
        mask_dense[np.nonzero(dense)] = 1
        mask_trips = sparse.mask_from_triplets(spec, *t)
        assert_equal(mask_dense, mask_trips)

        # Check triplets from dense returns same triplets:
        td = sparse.triplets_from_dense(dense, bs)
        assert_equal(t[0], td.row_indices)
        assert_equal(t[1], td.col_indices)
        assert_equal(t[2], td.values)
コード例 #2
0
def make_triplets_test_inputs(args):
    input_size = args.input_size
    output_size = args.output_size
    batch_size = args.batch_size
    weights_type = tf.float16 if args.data_type == 'fp16' else tf.float32

    if args.pattern == 'fixed':
        rhs_values = np.random.rand(input_size, output_size)
        sparse_mask = np.identity(input_size)
        sparse_mask[1, 3] = 1
        sparse_mask[0, 7] = 1
        masked_rhs = np.multiply(sparse_mask[:, 0:output_size], rhs_values)
        triplets = sparse.triplets_from_dense(masked_rhs)
        fc = layers.SparseFcLayer.from_triplets(
            args.output_size, [args.batch_size, args.input_size], *triplets,
            matmul_options={"metaInfoBucketOversizeProportion": 0.1},
            name='sparse_fc_from_triplets',
            dtype=weights_type,
            bias=False, relu=False)
    elif args.pattern == 'random_sign_ones':
        indices_random_gen = np.random.default_rng(seed=random_seed)
        fc = layers.SparseFcLayer.from_random_generator(
            args.output_size, [args.batch_size, args.input_size], args.density,
            random_sign_ones_generator, indices_random_gen,
            matmul_options={"metaInfoBucketOversizeProportion": 0.1},
            name='sparse_fc_from_random_sign_ones', bias=False, relu=False)
        masked_rhs = sparse.dense_from_triplets(fc.weights.spec, *fc.weights.triplets)
    elif args.pattern == "random_orthogonal":
        fc = layers.SparseFcLayer.from_random_orthonormal_generator(
            args.output_size, [args.batch_size, args.input_size], args.density,
            matmul_options={"metaInfoBucketOversizeProportion": 0.1},
            name='sparse_fc_from_random_orthogonal', dtype=weights_type,
            bias=False, relu=False)
        masked_rhs = sparse.dense_from_triplets(fc.weights.spec, *fc.weights.triplets)
    else:
        random_gen = np.random.default_rng(seed=random_seed)
        indices_random_gen = np.random.default_rng(seed=random_seed)
        fc = layers.SparseFcLayer.from_random_generator(
            args.output_size, [args.batch_size, args.input_size], args.density,
            random_gen.standard_normal, indices_random_gen,
            matmul_options={"metaInfoBucketOversizeProportion": 0.1},
            name='sparse_fc_from_random',
            dtype=weights_type,
            bias=False, relu=False)
        masked_rhs = sparse.dense_from_triplets(fc.weights.spec, *fc.weights.triplets)
    return fc, masked_rhs.astype(weights_type.as_numpy_dtype())
コード例 #3
0
ファイル: test_sparse.py プロジェクト: inejc/examples
 def test_conversions(self):
     from ipu_sparse_ops import sparse
     m = np.array([[10, 0], [0, 20]])
     t = sparse.triplets_from_dense(m)
     assert_equal(t[0], [0, 1])
     assert_equal(t[1], [0, 1])
     assert_equal(t[2], [10, 20])
     spec = sparse.matmul_spec_from_max(2, [1, 2], 2, tf.float32)
     n = sparse.dense_from_triplets(spec, *t)
     assert_equal(n, m)
     o = sparse.mask_from_triplets(spec, *t)
     assert_equal(o, np.array([[1, 0], [0, 1]]))
コード例 #4
0
ファイル: test_sparse.py プロジェクト: muzzynine/examples-1
 def test_representation_round_trip_elements(self):
     from ipu_sparse_ops import sparse
     bs = 16
     block_mask = np.array([[1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1]])
     mask = np.kron(block_mask, np.ones(shape=[bs, bs])).astype(int)
     n_els = np.count_nonzero(mask)
     dense = np.zeros_like(mask)
     dense[np.nonzero(mask)] = np.arange(n_els)
     opts = {"metaInfoBucketOversizeProportion": 1}
     t = sparse.triplets_from_dense(dense)
     spec = sparse.matmul_spec_from_max(dense.shape[1], [2, dense.shape[0]],
                                        max_non_zeros=n_els,
                                        block_size=1,
                                        dtype=tf.float32)
     r = sparse.representation_from_triplets(spec, *t, opts)
     t_rt = sparse.triplets_from_representation(spec, r, opts)
     dense_rt = sparse.dense_from_triplets(spec, *t_rt)
     assert_equal(dense, dense_rt)
コード例 #5
0
ファイル: test_sparse.py プロジェクト: muzzynine/examples-1
    def test_representation_round_trip_blocks(self):
        from ipu_sparse_ops import sparse
        for bs in [4, 8, 16]:
            # Create a mask that describes the non-zero block structure:
            block_mask = np.array([[1, 1, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
            n_blocks = np.count_nonzero(block_mask)
            # From that produce an element-wise mask using a Kronecker product:
            mask = np.kron(block_mask, np.ones(shape=[bs, bs])).astype(int)
            n_els = np.count_nonzero(mask)
            # Make a dense matrix from the element-wise mask and fill with random values:
            dense = np.zeros_like(mask, dtype=np.float32)
            values = np.random.rand(n_els)
            dense[np.nonzero(mask)] = values
            # Make the spec for the sparse matmul:
            opts = {"metaInfoBucketOversizeProportion": 1}
            spec = sparse.matmul_spec_from_max(dense.shape[1],
                                               [2, dense.shape[0]],
                                               max_non_zeros=n_blocks,
                                               block_size=bs,
                                               dtype=tf.float32)
            # Make triplets indices from the block mask:
            t = sparse.triplets_from_dense(block_mask)
            # Then fill in triplet's values by extracting the blocks
            # from the dense matrix (this can't be done by reshaping):
            t_block = sparse.Triplets(
                t.row_indices, t.col_indices,
                sparse.blocks_at_indices(t.row_indices, t.col_indices, bs,
                                         dense))
            # Convert to on device representation and back and check the
            # result is the dense matrix we sytarted with:
            r = sparse.representation_from_triplets(spec, *t_block, opts)
            t_rt = sparse.triplets_from_representation(spec, r, opts)
            dense_rt = sparse.dense_from_triplets(spec, *t_rt)
            assert_equal(dense, dense_rt)

            # Check triplets from dense returns original triplets:
            td = sparse.triplets_from_dense(dense_rt, bs)
            assert_equal(t_block.row_indices, td.row_indices)
            assert_equal(t_block.col_indices, td.col_indices)
            assert_equal(t_block.values, td.values)
コード例 #6
0
def make_triplets_test_inputs(args, data_type):
    input_size = args.input_size
    output_size = args.output_size
    batch_size = args.batch_size
    num_groups = 1
    topk_ratio = 0.5
    if args.pattern == 'fixed':
        rhs_values = np.random.rand(input_size, output_size)
        sparse_mask = np.identity(input_size)
        sparse_mask[1, 3] = 1
        sparse_mask[0, 7] = 1
        masked_rhs = np.multiply(sparse_mask[:, 0:output_size], rhs_values)
        triplets = sparse.triplets_from_dense(masked_rhs)
        fc = layers.SparseFcLayer.from_triplets(
            args.output_size, [args.batch_size, args.input_size], topk_ratio,
            *triplets)
    else:
        random_gen = np.random.default_rng(seed=random_seed)
        fc = layers.SparseFcLayer.from_random_generator(
            args.output_size, [args.batch_size, args.input_size], args.density,
            topk_ratio, random_gen.standard_normal, random_seed)
        masked_rhs = sparse.dense_from_triplets(fc.spec, *fc.triplets)
    return fc, masked_rhs
コード例 #7
0
def prune_and_grow(name,
                   triplets,
                   shape,
                   spec,
                   max_non_zeros,
                   slot_triplets,
                   prune_schedule,
                   prune_ratio: float,
                   grad_w: np.array,
                   grow_method='rigl',
                   random_gen=None,
                   ipu_pooling_type='NONE'):
    """
    Performs the pruning and growing of the weights to update the sparsity pattern, for the current fc layer
    :param name: A debug name
    :param triplets: Current triplets to prune and grow
    :param shape: Shape of the dense matrix
    :param spec: Specs of the sparse matmul
    :param max_non_zeros: Maximum number of non-zeros values
    :slot_triplets: Triplets for the current slots
    :param prune_schedule: a function which given max prune count returns the number of weights to update
    :param prune_ratio: the maximum percentage of this layer's weights to update
    :param grad_w: A numpy array containing the dense gradients for each sub-layer
    :param grow_method: Method used to regrow the weights, either rigl or random selection
    :param random_gen: Random number generator to be used if the grow_method is 'random'
    """
    if isinstance(grad_w, list):
        grad_w = grad_w[0]
    if isinstance(grad_w, dict):
        grad_w = [grad for grad in grad_w.values()][0]

    # Compute the prune count using the provides schedule, the max number of zeros in the
    # layer and the pruning ratio
    prune_count = prune_schedule(
        max_pruned=int(np.ceil(prune_ratio * max_non_zeros)))
    if prune_count == 0:
        logger.info("Nothing to prune according to prune schedule.")
        return None

    logger.info(
        f"Triplet stats before prune and grow for {name}: {sparse.triplet_stats(*triplets)}"
    )

    if logger.level <= logging.DEBUG:
        abs_nz_values = np.abs(triplets[2])
        if len(abs_nz_values.shape) > 1:
            abs_nz_values = abs_nz_values.sum(-1).sum(-1)
            block_input_size = spec.input_size // spec.block_size
            block_output_size = spec.output_size // spec.block_size
            block_spec = sparse.MatmulSpec(
                block_size=1,
                input_size=block_input_size,
                output_size=block_output_size,
                num_groups=spec.num_groups,
                batch_size=spec.batch_size,
                data_type=spec.data_type,
                max_non_zero_blocks=spec.max_non_zero_blocks,
                pooling_type=spec.pooling_type)
            dense_abs_weights = sparse.dense_from_triplets(
                block_spec, triplets[0], triplets[1], abs_nz_values)
            plot_and_log_matrix(name + "/abs_block_weights", dense_abs_weights)

    # Prune bottom k weights
    logging.debug(
        f"Pruning and grow also applies to these slot vars: {slot_triplets.keys()}"
    )
    slot_values = {
        name: triplet.values
        for name, triplet in slot_triplets.items()
    }

    remaining_triplets, remaining_slot_values = prune_bottom_k_weights(
        *triplets, slot_values, prune_count, name)

    # regrow weights
    logger.debug(
        f"Regrowing non-zeros for layer {name} using '{grow_method}' method.")
    if grow_method == 'rigl':
        weights_shape = np.array(triplets[2]).shape
        block_size = 1 if len(weights_shape) == 1 else weights_shape[-1]
        # Grow back new indices using Rig-L: (https://arxiv.org/abs/1911.11134)
        if (shape != grad_w.shape
                and (ipu_pooling_type == "NONE" or block_size == 1)):
            raise RuntimeError(
                f"Dense weight gradient has unexpected shape.Expected {shape}, got {grad_w.shape}"
            )
        new_triplets = regrow_rigl(triplets, grad_w, zero_values_generator,
                                   prune_count, ipu_pooling_type == "NONE",
                                   name)
    if grow_method == 'random':
        # Random replacement strategy: add back random indices
        # Gen some replacement random indices excluding all the existing
        # ones then we will swap for the pruned ones:
        new_triplets = sparse.random_triplets(
            spec,
            indices_initialiser_gen=random_gen,
            value_generator=zero_values_generator,
            excluded_indices=(triplets[0], triplets[1]),
            count=prune_count)

    grown_triplets, grown_slots = join_triplets(remaining_triplets,
                                                new_triplets,
                                                remaining_slot_values,
                                                prune_count)
    if len(grown_triplets[0]) != max_non_zeros:
        raise ValueError(
            f"Grown row count {len(grown_triplets[0])} does not match expected count {max_non_zeros}"
        )
    if len(grown_triplets[1]) != max_non_zeros:
        raise ValueError(
            f"Grown col count {len(grown_triplets[1])} does not match expected count {max_non_zeros}"
        )
    if len(grown_triplets[2]) != max_non_zeros:
        raise ValueError(
            f"Grown col count {len(grown_triplets[2])} does not match expected count {max_non_zeros}"
        )
    for grown_slot in grown_slots.values():
        if len(grown_slot) != max_non_zeros:
            raise ValueError(
                f"Grown col count {len(grown_slot)} does not match expected count {max_non_zeros}"
            )

    grown_triplets = sparse.Triplets(grown_triplets[0], grown_triplets[1],
                                     grown_triplets[2])
    grown_slots = {
        name: sparse.Triplets(grown_triplets[0], grown_triplets[1], grown_slot)
        for name, grown_slot in grown_slots.items()
    }

    logger.info(
        f"Triplet stats after prune and grow for {name}: {sparse.triplet_stats(*grown_triplets)}"
    )

    return {
        'gt': grown_triplets,
        'nt': new_triplets,
        'rt': remaining_triplets,
        'gs': grown_slots,
        'name': name
    }
コード例 #8
0
def make_fc_layer_and_test_inputs(args):
    input_size = args.input_size
    output_size = args.output_size
    batch_size = args.batch_size
    weights_type = tf.float16 if args.data_type == 'fp16' else tf.float32
    matmul_opts = {"metaInfoBucketOversizeProportion": args.meta_info_oversize}

    if args.pattern == 'fixed':
        in_blocks = input_size // args.block_size
        out_blocks = output_size // args.block_size
        identity_size = max(in_blocks, out_blocks)
        block_mask = np.identity(identity_size)[0:in_blocks, 0:out_blocks]
        block_mask[1, 3] = 1
        block_mask[0, 7] = 1
        n_blocks = np.count_nonzero(block_mask)
        el_mask = sparse.block_mask_to_element(block_mask, args.block_size)
        n_els = np.count_nonzero(el_mask)
        masked_rhs = np.zeros_like(el_mask, dtype=np.float32)
        values = np.random.rand(n_els)
        masked_rhs[np.nonzero(el_mask)] = values

        if args.block_size == 1:
            triplets = sparse.triplets_from_dense(masked_rhs)
        else:
            triplets = sparse.triplets_from_dense(block_mask)
            triplets = sparse.Triplets(
                triplets.row_indices, triplets.col_indices,
                sparse.blocks_at_indices(triplets.row_indices,
                                         triplets.col_indices, args.block_size,
                                         masked_rhs))
        fc = layers.SparseFcLayer.from_triplets(
            args.output_size, [args.batch_size, args.input_size],
            *triplets,
            matmul_options=matmul_opts,
            name='sparse_fc_from_triplets',
            dtype=weights_type,
            use_bias=False,
            relu=False,
            pooling_type=args.pooling_type)
    elif args.pattern == 'random_sign_ones':
        indices_random_gen = np.random.default_rng(seed=random_seed)
        fc = layers.SparseFcLayer.from_random_generator(
            args.output_size, [args.batch_size, args.input_size],
            args.density,
            args.block_size,
            random_sign_ones_generator,
            indices_random_gen,
            matmul_options=matmul_opts,
            name='sparse_fc_from_random_sign_ones',
            use_bias=False,
            relu=False,
            pooling_type=args.pooling_type)
        masked_rhs = sparse.dense_from_triplets(fc.weights.spec,
                                                *fc.weights.triplets)
    elif args.pattern == "random_orthogonal":
        if args.input_size != args.output_size:
            raise ValueError(
                "random_orthogonal pattern requires square matrix")

        matrix, max_non_zeros = sparse.gen_sparse_rand_orthog_mat(
            args.output_size, args.density, args.block_size)
        triplets = sparse.triplets_from_dense(matrix, args.block_size)

        fc = layers.SparseFcLayer.from_triplets(
            args.output_size, [args.batch_size, args.input_size],
            *triplets,
            matmul_options=matmul_opts,
            name='sparse_fc_random_orthogonal',
            dtype=weights_type,
            use_bias=False,
            relu=False,
            pooling_type=args.pooling_type)

        masked_rhs = sparse.dense_from_triplets(fc.weights.spec,
                                                *fc.weights.triplets)
    else:
        random_gen = np.random.default_rng(seed=random_seed)
        indices_random_gen = np.random.default_rng(seed=random_seed)
        fc = layers.SparseFcLayer.from_random_generator(
            args.output_size, [args.batch_size, args.input_size],
            args.density,
            args.block_size,
            random_gen.standard_normal,
            indices_random_gen,
            matmul_options=matmul_opts,
            name='sparse_fc_from_random',
            dtype=weights_type,
            use_bias=False,
            relu=False,
            pooling_type=args.pooling_type)
        masked_rhs = fc.weights.extract_dense()
    return fc, masked_rhs.astype(weights_type.as_numpy_dtype())
コード例 #9
0
ファイル: layers.py プロジェクト: shyamalschandra/examples
 def extract_dense(self):
     return sparse.dense_from_triplets(self.spec, *self.triplets)
コード例 #10
0
 def extract_dense(self) -> np.ndarray:
     return sparse.dense_from_triplets(self.spec, *self.triplets)