Exemple #1
0
def _get_generator(proto):
    if proto.type == 'Normal':
        return NormalInitializer(sigma=proto.multiplier)
    elif proto.type == 'Uniform':
        return UniformInitializer(lim=(-proto.multiplier, proto.multiplier))
    elif proto.type == 'Range':
        return RangeInitializer(start=0, step=proto.multiplier)
    elif proto.type == 'Constant':
        return ConstantInitializer(value=proto.multiplier)
    else:
        raise ValueError('Generator type "' + proto.type +
                         '" is not supported.')
Exemple #2
0
def _create_variable(v, name, shape, rng):
    # Create and initialize variables
    class Variable:
        pass

    parameter = v.type == "Parameter"
    variable_instance = None
    if parameter:
        if v.initializer.type == 'Normal':
            initializer = NormalInitializer(v.initializer.multiplier, rng=rng)
        elif v.initializer.type == 'NormalAffineHe' or v.initializer.type == 'NormalAffineHeForward':
            initializer = (lambda shape: NormalInitializer(
                calc_normal_std_he_forward(shape[0], np.prod(shape[1:])),
                rng=rng)(shape) * v.initializer.multiplier)
        elif v.initializer.type == 'NormalAffineHeBackward':
            initializer = (lambda shape: NormalInitializer(
                calc_normal_std_he_backward(shape[0], np.prod(shape[1:])),
                rng=rng)(shape) * v.initializer.multiplier)
        elif v.initializer.type == 'NormalAffineGlorot':
            initializer = (lambda shape: NormalInitializer(
                calc_normal_std_glorot(shape[0], np.prod(shape[1:])), rng=rng)
                           (shape) * v.initializer.multiplier)
        elif v.initializer.type == 'NormalConvolutionHe' or v.initializer.type == 'NormalConvolutionHeForward':
            initializer = (
                lambda shape: NormalInitializer(calc_normal_std_he_forward(
                    shape[-3], shape[0], kernel=shape[-2:]),
                                                rng=rng)
                (shape) * v.initializer.multiplier)
        elif v.initializer.type == 'NormalConvolutionHeBackward':
            initializer = (
                lambda shape: NormalInitializer(calc_normal_std_he_backward(
                    shape[-3], shape[0], kernel=shape[-2:]),
                                                rng=rng)
                (shape) * v.initializer.multiplier)
        elif v.initializer.type == 'NormalConvolutionGlorot':
            initializer = (lambda shape: NormalInitializer(
                calc_normal_std_glorot(shape[-3], shape[0], kernel=shape[-2:]),
                rng=rng)(shape) * v.initializer.multiplier)
        elif v.initializer.type == 'NormalCLConvHe' or v.initializer.type == 'NormalCLConvHeForward':
            initializer = (
                lambda shape: NormalInitializer(calc_normal_std_he_forward(
                    shape[-1], shape[0], kernel=shape[1:3]),
                                                rng=rng)
                (shape) * v.initializer.multiplier)
        elif v.initializer.type == 'NormalCLConvHeBackward':
            initializer = (
                lambda shape: NormalInitializer(calc_normal_std_he_backward(
                    shape[-1], shape[0], kernel=shape[1:3]),
                                                rng=rng)
                (shape) * v.initializer.multiplier)
        elif v.initializer.type == 'NormalCLConvGlorot':
            initializer = (lambda shape: NormalInitializer(
                calc_normal_std_glorot(shape[-1], shape[0], kernel=shape[1:3]),
                rng=rng)(shape) * v.initializer.multiplier)
        elif v.initializer.type == 'Uniform':
            initializer = UniformInitializer(
                lim=[-v.initializer.multiplier, v.initializer.multiplier],
                rng=rng)
        elif v.initializer.type == 'UniformAffineGlorot':
            initializer = (lambda shape: UniformInitializer(
                calc_uniform_lim_glorot(shape[0], np.prod(shape[1:])), rng=rng)
                           (shape) * v.initializer.multiplier)
        elif v.initializer.type == 'UniformConvolutionGlorot':
            initializer = (
                lambda shape: UniformInitializer(calc_uniform_lim_glorot(
                    shape[-3], shape[0], kernel=shape[-2:]),
                                                 rng=rng)
                (shape) * v.initializer.multiplier)
        elif v.initializer.type == 'UniformCLConvGlorot':
            initializer = (
                lambda shape: UniformInitializer(calc_uniform_lim_glorot(
                    shape[-1], shape[0], kernel=shape[1:3]),
                                                 rng=rng)
                (shape) * v.initializer.multiplier)
        elif v.initializer.type == 'Range':
            initializer = (lambda shape: RangeInitializer(0, 1)
                           (shape) * v.initializer.multiplier)
        elif v.initializer.type == 'Constant':
            initializer = ConstantInitializer(value=v.initializer.multiplier)
        else:
            initializer = None
        print("create parameter: {}".format(name))
        variable_instance = get_parameter_or_create(name, shape, initializer)
    else:
        # create empty variable, memory will be allocated in network.setup()
        # after network optimization
        variable_instance = nn.Variable()

    variable = Variable()
    variable.name = name
    variable.parameter = parameter
    variable.shape = shape
    variable.variable_instance = variable_instance

    return variable