示例#1
0
    def get_output_shape_for(self, inputs_shape):

        # Import dimensions
        (max_atoms, max_degree, num_atom_features, num_bond_features,
         num_samples) = mol_shapes_to_dims(mol_shapes=inputs_shape)

        return (num_samples, self.fp_length)
示例#2
0
    def build(self, inputs_shape):

        # Import dimensions
        (max_atoms, max_degree, num_atom_features, num_bond_features,
         num_samples) = mol_shapes_to_dims(mol_shapes=inputs_shape)

        self.max_degree = max_degree

        # Add the dense layers (that contain trainable params)
        #   (for each degree we convolve with a different weight matrix)
        self.trainable_weights = []
        self.inner_3D_layers = []
        for degree in range(max_degree):

            # Initialise inner layer, and rename it
            inner_layer = self.create_inner_layer_fn()
            inner_layer_type = inner_layer.__class__.__name__.lower()
            inner_layer.name = self.name + '_inner_' + inner_layer_type + '_' + str(
                degree)

            # Initialise TimeDistributed layer wrapper in order to parallelise
            #   dense layer across atoms (3D)
            inner_3D_layer_name = self.name + '_inner_timedistributed_' + str(
                degree)
            inner_3D_layer = layers.TimeDistributed(inner_layer,
                                                    name=inner_3D_layer_name)

            # Build the TimeDistributed layer (which will build the Dense layer)
            inner_3D_layer.build(
                (None, max_atoms, num_atom_features + num_bond_features))

            # Store inner_3D_layer and it's weights
            self.inner_3D_layers.append(inner_3D_layer)
            self.trainable_weights += inner_3D_layer.trainable_weights
示例#3
0
    def build(self, input_shape):

        # Import dimensions
        (max_atoms, max_degree, num_atom_features, num_bond_features,
         num_samples) = mol_shapes_to_dims(mol_shapes=input_shape)

        # Add the dense layer that contains the trainable parameters
        # Initialise dense layer with specified params (kwargs) and name
        with tf.name_scope("dense_degree_" + str(degree)):
            inner_layer = self.create_inner_layer_fn()
            inner_layer_type = inner_layer.__class__.__name__.lower()
            inner_layer._name = self.name + '_inner_' + inner_layer_type

            # Initialise TimeDistributed layer wrapper in order to parallelise
            #   dense layer across atoms
            inner_3D_layer_name = self.name + '_inner_timedistributed'
            self.inner_3D_layer = layers.TimeDistributed(
                inner_layer, name=inner_3D_layer_name)

            # Build the TimeDistributed layer (which will build the Dense layer)
            self.inner_3D_layer.build(
                (None, max_atoms, num_atom_features + num_bond_features))

            # Store dense_3D_layer and it's weights
            self.train_weights = self.inner_3D_layer.trainable_weights
示例#4
0
    def compute_output_shape(self, input_shape):

        # Import dimensions
        (max_atoms, max_degree, num_atom_features, num_bond_features,
         num_samples) = mol_shapes_to_dims(mol_shapes=input_shape)

        return (num_samples, max_atoms, self.conv_width)
    def compute_output_shape(self, inputs_shape):

        # Import dimensions
        (max_atoms, _, _, _,
         num_samples) = mol_shapes_to_dims(mol_shapes=inputs_shape)

        if self.encode:
            return (num_samples, max_atoms, self.fp_length)
        else:
            return (num_samples, max_atoms, self.original_atom_bond_features)
    def compute_output_shape(self, inputs_shape):

        # Import dimensions
        inputs_shape[0] = (None, int(inputs_shape[0][1]), inputs_shape[0][2])

        (max_atoms, _, _, _,
         num_samples) = mol_shapes_to_dims(mol_shapes=inputs_shape)

        if self.encode_only:
            return (num_samples, max_atoms, self.conv_width)
        else:
            return [(num_samples, max_atoms, self.original_atom_bond_features),
                    (num_samples, max_atoms, self.original_atom_bond_features)]
    def build(self, inputs_shape):
        # Set the index for the DenseTied weight values
        # Import dimensions
        (max_atoms, _, num_atom_features, num_bond_features,
         _) = mol_shapes_to_dims(mol_shapes=inputs_shape)

        # Add the dense layer that contains the trainable parameters
        # Initialise dense layer with specified params (kwargs) and name
        self.trainable_weights = []
        self.non_trainable_weights = []

        inner_layer = self.create_inner_layer_fn()
        inner_layer_type = inner_layer.__class__.__name__.lower()
        inner_layer.name = self.name + '_inner_' + inner_layer_type

        # Initialise TimeDistributed layer wrapper in order to parallelise
        #   dense layer across atoms
        inner_3D_layer_name = self.name + '_inner_timedistributed'
        self.inner_3D_layer = TimeDistributed(inner_layer,
                                              name=inner_3D_layer_name)

        # Build the TimeDistributed layer (which will build the Dense layer)
        if self.encode:
            self.inner_3D_layer.build(
                (None, max_atoms, num_atom_features + num_bond_features))
        else:
            self.inner_3D_layer.build((None, max_atoms, self.fp_length))

        # Store dense_3D_layer and it's weights

        if self.tied_to is not None:
            self.non_trainable_weights.append(self.inner_3D_layer.layer.kernel)
            if self.bias:
                self.trainable_weights.append(self.inner_3D_layer.layer.bias)
        else:
            self.trainable_weights = self.inner_3D_layer.trainable_weights
    def build(self, inputs_shape):
        # Import dimensions
        (max_atoms, max_degree, num_atom_features, num_bond_features,
         _) = mol_shapes_to_dims(mol_shapes=inputs_shape)

        # Add the dense layers (that contain trainable params)
        #   (for each degree we convolve with a different weight matrix)
        self.trainable_weights = []
        self.non_trainable_weights = []
        self.inner_3D_layers = []
        self.all_layers = []

        self.idx = max_degree
        self_layer = self.create_inner_layer_fn()
        self_layer_type = self_layer.__class__.__name__.lower()
        self_layer.name = self.name + '_self_' + self_layer_type + '_'

        #Time Distributed layer wrapper
        self.self_3D_layer_name = self.name + '_self_timedistributed'
        self.self_3D_layer = TimeDistributed(self_layer,
                                             name=self.self_3D_layer_name)
        if self.encode_only:
            self.self_3D_layer.build(
                (None, max_atoms, num_atom_features + num_bond_features))
        else:
            self.self_3D_layer.build((None, max_atoms, self.conv_width))

        for degree in range(max_degree):
            self.idx = degree
            # Initialise inner layer, and rename it
            inner_layer = self.create_inner_layer_fn()
            inner_layer_type = inner_layer.__class__.__name__.lower()
            inner_layer.name = self.name + '_inner_' + inner_layer_type + '_' + str(
                degree)

            # Initialise TimeDistributed layer wrapper in order to parallelise
            #   dense layer across atoms (3D)
            inner_3D_layer_name = self.name + '_inner_timedistributed_' + str(
                degree)
            inner_3D_layer = TimeDistributed(inner_layer,
                                             name=inner_3D_layer_name)

            # Build the TimeDistributed layer (which will build the Dense layer)
            if self.encode_only:
                inner_3D_layer.build(
                    (None, max_atoms, num_bond_features + num_atom_features))
            else:
                inner_3D_layer.build((None, max_atoms, self.conv_width))

            # Store inner_3D_layer and it's weights
            self.inner_3D_layers.append(inner_3D_layer)
            self.all_layers.append(inner_3D_layer)
            if self.tied_to is not None:
                self.non_trainable_weights.append(inner_3D_layer.layer.kernel)
                if self.bias:
                    self.trainable_weights.append(inner_3D_layer.layer.bias)
            else:
                self.trainable_weights += inner_3D_layer.trainable_weights

        if self.tied_to is not None:
            self.trainable_weights.append(self.self_3D_layer.layer.bias)
            self.non_trainable_weights.append(self.self_3D_layer.layer.kernel)
        else:
            self.trainable_weights += self.self_3D_layer.trainable_weights

        self.all_layers.append(self_layer)