Exemplo n.º 1
0
    def create_variables(self, input_spaces, action_space=None):
        in_space = input_spaces["inputs[0]"]
        assert in_space.rank > 0, "ERROR: Must have input Space ({}) with rank larger 0!".format(
            in_space)

        # Create weights matrix and (maybe) biases vector.
        weights_shape = (in_space.shape[0], self.units)
        self.weights_init = Initializer.from_spec(
            shape=weights_shape, specification=self.weights_spec)
        biases_shape = (self.units, )
        self.biases_init = Initializer.from_spec(
            shape=biases_shape, specification=self.biases_spec)

        # Wrapper for backend.
        if get_backend() == "tf":
            self.layer = tf.layers.Dense(
                units=self.units,
                activation=get_activation_function(self.activation,
                                                   *self.activation_params),
                kernel_initializer=self.weights_init.initializer,
                use_bias=(self.biases_spec is not False),
                bias_initializer=(self.biases_init.initializer
                                  or tf.zeros_initializer()),
                trainable=(False if self.trainable is False else True),
                _reuse=tf.AUTO_REUSE)

            # Now build the layer so that its variables get created.
            self.layer.build(in_space.get_shape(with_batch_rank=True))
            # Register the generated variables with our registry.
            self.register_variables(*self.layer.variables)
        elif get_backend() == "pytorch":
            # N.b. activation must be added as a separate 'layer' when assembling a network.
            # In features is the num of input channels.
            apply_bias = (self.biases_spec is not False)
            in_features = in_space.shape[1] if in_space.shape[
                0] == 1 else in_space.shape[0]
            # print("name = {}, ndim = {}, in space.shape = {}, in_features = {}, units = {}".format(
            #     self.name, ndim, in_space.shape, in_features, self.units))
            self.layer = nn.Linear(
                # In case there is a batch dim here due to missing preprocessing.
                in_features=in_features,
                out_features=self.units,
                bias=apply_bias)
            # Apply weight initializer
            if self.weights_init.initializer is not None:
                # Must be a callable in PyTorch
                self.weights_init.initializer(self.layer.weight)
            if apply_bias:
                if self.biases_spec is not None and self.biases_init.initializer is not None:
                    self.biases_init.initializer(self.layer.bias)
                else:
                    # Fill with zeros.
                    self.layer.bias.data.fill_(0)
            if self.activation is not None:
                # Activation function will be used in apply.
                self.activation_fn = get_activation_function(
                    self.activation, *self.activation_params)
            # Use unique scope as name.
            self.register_variables(
                PyTorchVariable(name=self.global_scope, ref=self.layer))
Exemplo n.º 2
0
    def create_variables(self, input_spaces, action_space=None):
        # Create weights matrix and (maybe) biases vector.
        shape = (self.vocab_size, self.embed_dim)
        self.initializer = Initializer.from_spec(shape=shape, specification=self.initializer_spec)
        # TODO: For IMPALA partitioner is not needed. Do this later.
        self.embedding_matrix = self.get_variable(
            name="embedding-matrix", shape=shape, dtype=convert_dtype("float"), initializer=self.initializer.initializer,
            #partitioner=self.partitioners, regularizer=self.regularizers,
            trainable=self.trainable
        )

        self.ids_space = input_spaces["ids"]
Exemplo n.º 3
0
    def get_variable(self, name, is_input_feed=False, add_batch_rank=None, add_time_rank=None,
                     time_major=None, is_python=False, local=False, **kwargs):
        add_batch_rank = self.has_batch_rank if add_batch_rank is None else add_batch_rank
        batch_rank = () if add_batch_rank is False else (None,) if add_batch_rank is True else (add_batch_rank,)

        add_time_rank = self.has_time_rank if add_time_rank is None else add_time_rank
        time_rank = () if add_time_rank is False else (None,) if add_time_rank is True else (add_time_rank,)

        time_major = self.time_major if time_major is None else time_major

        if time_major is False:
            shape = batch_rank + time_rank + self.shape
        else:
            shape = time_rank + batch_rank + self.shape

        if is_python is True or get_backend() == "python":
            if isinstance(add_batch_rank, int):
                if isinstance(add_time_rank, int):
                    if time_major:
                        var = [[0 for _ in range_(add_batch_rank)] for _ in range_(add_time_rank)]
                    else:
                        var = [[0 for _ in range_(add_time_rank)] for _ in range_(add_batch_rank)]
                else:
                    var = [0 for _ in range_(add_batch_rank)]
            elif isinstance(add_time_rank, int):
                var = [0 for _ in range_(add_time_rank)]
            else:
                var = []

            # Un-indent and just directly construct pytorch?
            if get_backend() == "pytorch" and is_input_feed:
                # Convert to PyTorch tensors as a faux placehodler.
                return torch.zeros(shape, dtype=convert_dtype(dtype=self.dtype, to="pytorch"))
            else:
                # TODO also convert?
                return var

        elif get_backend() == "tf":
            # TODO: re-evaluate the cutting of a leading '/_?' (tf doesn't like it)
            name = re.sub(r'^/_?', "", name)
            if is_input_feed:
                variable = tf.placeholder(dtype=convert_dtype(self.dtype), shape=shape, name=name)
            else:
                init_spec = kwargs.pop("initializer", None)
                # Bools should be initializable via 0 or not 0.
                if self.dtype == np.bool_ and isinstance(init_spec, (int, float)):
                    init_spec = (init_spec != 0)

                if self.dtype == np.str_ and init_spec == 0:
                    initializer = None
                else:
                    initializer = Initializer.from_spec(shape=shape, specification=init_spec).initializer

                variable = tf.get_variable(
                    name, shape=shape, dtype=convert_dtype(self.dtype), initializer=initializer,
                    collections=[tf.GraphKeys.GLOBAL_VARIABLES if local is False else tf.GraphKeys.LOCAL_VARIABLES],
                    **kwargs
                )
            # Add batch/time rank flags to the op.
            if self.has_batch_rank:
                variable._batch_rank = 0 if self.time_major is False else 1
            if self.has_time_rank:
                variable._time_rank = 1 if self.time_major is False else 0
            return variable
Exemplo n.º 4
0
    def create_variables(self, input_spaces, action_space=None):
        in_space = input_spaces["inputs[0]"]

        # Create kernel and biases initializers.
        self.kernel_init = Initializer.from_spec(
            shape=self.kernel_size, specification=self.kernel_spec)
        self.biases_init = Initializer.from_spec(
            shape=self.kernel_size, specification=self.biases_spec)

        # Wrapper for backend.
        if get_backend() == "tf":
            self.layer = tf.layers.Conv2D(
                filters=self.filters,
                kernel_size=self.kernel_size,
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                activation=get_activation_function(self.activation,
                                                   *self.activation_params),
                use_bias=(self.biases_spec is not False),
                kernel_initializer=self.kernel_init.initializer,
                bias_initializer=(self.biases_init.initializer
                                  or tf.zeros_initializer()),
                trainable=(False if self.trainable is False else True),
                _reuse=tf.AUTO_REUSE)

            # Now build the layer so that its variables get created.
            self.layer.build(in_space.get_shape(with_batch_rank=True))
            # Register the generated variables with our registry.
            self.register_variables(*self.layer.variables)
        elif get_backend() == "pytorch":
            shape = in_space.shape
            num_channels = get_input_channels(shape)
            apply_bias = (self.biases_spec is not False)

            # print("Defining conv2d layer with shape = {} and channels {}".format(
            #     shape, num_channels
            # ))
            if self.padding == "same":
                # N.b. there is no 'same' or 'valid' padding for PyTorch so need custom layer.
                self.layer = SamePaddedConv2d(
                    in_channels=num_channels,
                    out_channels=self.filters,
                    # Only support square kernels.
                    kernel_size=self.kernel_size[0],
                    stride=self.strides,
                    bias=apply_bias)
            else:
                self.layer = nn.Conv2d(in_channels=num_channels,
                                       out_channels=self.filters,
                                       kernel_size=self.kernel_size,
                                       stride=self.strides,
                                       padding=0,
                                       bias=apply_bias)
            # Apply weight initializer
            if self.kernel_init.initializer is not None:
                # Must be a callable in PyTorch
                self.kernel_init.initializer(self.layer.weight)
            if apply_bias:
                if self.biases_spec is not None and self.biases_init.initializer is not None:
                    self.biases_init.initializer(self.layer.bias)
                else:
                    # Fill with zeros.
                    self.layer.bias.data.fill_(0)
            if self.activation is not None:
                # Activation function will be used in `call`.
                self.activation_fn = get_activation_function(
                    self.activation, *self.activation_params)
            self.register_variables(
                PyTorchVariable(name=self.global_scope, ref=self.layer))