def make_discriminator(): """ () -> (tf.Placeholder, tf.Placeholder, FeedforwardNetwork) """ constraint_shape = Params.environment.constraint_shape() solution_shape = Params.environment.solution_shape() joint_shape = constraint_shape[:] joint_shape[0] += solution_shape[0] constraint_input = placeholder_node("constraint_input", constraint_shape, 1) solution_input = placeholder_node("solution_input", solution_shape, 1) joint_input = tf.concat([constraint_input, solution_input], 1) return ( constraint_input, solution_input, FeedforwardNetwork( name="artificial_discriminator", session=Params.session, input_shape=joint_shape, layer_shapes=Params.internal_layer_shapes + [[1]], activations=Params.activation, input_node=joint_input, save_location=Params.save_location, ), )
def _initialise(self): """ () -> () """ self.noise_vector = placeholder_node(self.extend_name('noise'), [self.noise_dimension], dynamic_dimensions=1) self.real_input = placeholder_node(self.extend_name('real_input'), [self.latent_dimension], dynamic_dimensions=1) if type(self.generator_builder) == type([]): self.generator_builder = GAN.default_network( self.generator_builder) self.generator, self.fake_input = self.generator_builder( self.extend_name('generator'), self.get_session(), self.noise_dimension, self.noise_vector, self.latent_dimension) self.switch = tf.placeholder(tf.bool, shape=[None], name=self.extend_name('switch')) self.input_is_real = tf.cast(self.switch, tf.float32) self.discriminator_input = tf.where(self.switch, self.real_input, self.fake_input) if type(self.discriminator_builder) == type([]): self.discriminator_builder = GAN.default_network( self.discriminator_builder) self.discriminator, self.discriminator_output = \ self.discriminator_builder( self.extend_name('discriminator'), self.get_session(), self.latent_dimension, self.discriminator_input, 1 )
def build(self, name, session, environment): self.constraint_shape = [environment.constraint_dimension()] self.solution_shape = [environment.solution_dimension()] self.joint_shape = self.constraint_shape[:] self.joint_shape[0] += self.solution_shape[0] self.environment_type = environment.__name__ self.constraint_input = placeholder_node(name + '.constraint_input', self.constraint_shape, 1) self.solution_input = placeholder_node(name + '.solution_input', self.solution_shape, 1) self.joint_input = tf.concat([self.constraint_input, self.solution_input], 1)
def _initialise(self): """ () -> () Initialise the network's layers. """ if self.input_node is None: self.input_node = placeholder_node(self.extend_name('input_node'), self.input_shape, dynamic_dimensions=1) full_layers = [self.input_shape] + self.layer_shapes layer_params = zip(full_layers[:-1], full_layers[1:]) constructors = self.layer_constructors_generator(len( self.layer_shapes)) self.layers = [] previous_layer_output = self.get_input_node() for constructor, (in_shape, out_shape) in zip(constructors, layer_params): self.layers.append( constructor( self.extend_name('layer_{}'.format(len(self.layers))), self.get_session(), in_shape, out_shape, previous_layer_output)) previous_layer_output = self.layers[-1].get_output_node() self.output_node = previous_layer_output self.output_shape = self.layer_shapes[-1]
def _initialise(self): """ () -> () """ if self.input_node is None: self.input_node = placeholder_node(self.extend_name('input_node'), self.input_shape, dynamic_dimensions=1) self.encoder = VariationalNetwork( name=self.extend_name('encoder'), session=self.get_session(), input_shape=self.input_shape, layer_shapes=self.encoder_layer_shapes, internal_activations=self.encoder_activations, means_activation=self.encoder_means_activation, stddevs_activation=self.encoder_stddevs_activation, input_node=self.input_node) self.means_output_node = self.encoder.means_output_node self.stddevs_output_node = self.encoder.stddevs_output_node self.sample_node = self.encoder.sample_node self.decoder = FeedforwardNetwork( name=self.extend_name('decoder'), session=self.get_session(), input_shape=self.encoder_layer_shapes[-1], layer_shapes=self.decoder_layer_shapes, activations=self.decoder_activations, input_node=self.sample_node) self.reconstruction_node = self.decoder.output_node
def target_node(self): """ () -> tf.Placeholder Return a target node suitable for training the embeddings. """ return placeholder_node(self.extend_name('target'), [self.output_dimension], dynamic_dimensions=1)
def discriminator_training_nodes(self, discriminator): """ Dict -> (tf.Node, tf.Node) Return a target node and loss node useful for training the discriminator. """ target = placeholder_node( self.extend_name("discriminator_target"), [], dynamic_dimensions=1 ) loss = tf.losses.mean_squared_error(target, discriminator["output"]) return target, loss
def build_input_nodes(self): """ () -> () Construct input placeholder nodes for the tensorflow graph. """ self.solution_input = placeholder_node( self.extend_name("solution_input"), [self.solution_dimension], dynamic_dimensions=1, ) self.latent_input = placeholder_node( self.extend_name("latent_input"), [self.latent_dimension], dynamic_dimensions=1, ) self.constraint_input = placeholder_node( self.extend_name("constraint_input"), [self.constraint_dimension], dynamic_dimensions=1, )
def regression_metrics(output_node_shape, output_node, name, variables=None): """ [Int] -> tf.Tensor -> String -> [tf.Variable]? -> (TargetNode, LossNode, Optimiser) Create metrics - target node, loss node, and optimiser - for a regression model. """ target_node = placeholder_node(name + ".target", output_node_shape, dynamic_dimensions=1) loss_node = tf.losses.mean_squared_error(target_node, output_node) optimiser = default_adam_optimiser(loss_node, name, variables=variables) return target_node, loss_node, optimiser
def make_discriminator(): """ () -> (tf.Placeholder, FeedforwardNetwork) """ image_shape = Params.environment.image_shape(Params.fidelity) image_input = placeholder_node("image_input", image_shape, 1) return ( image_input, FeedforwardNetwork( name="pixel_discriminator", session=Params.session, input_shape=image_shape, layer_shapes=Params.internal_layer_shapes + [[1]], activations=Params.activation, input_node=image_input, save_location=Params.save_location, ), )
def classification_metrics_with_initialiser(output_node_shape, output_node, name, variables=None, target=None): """ [Int] -> tf.Tensor -> String -> [tf.Variable]? -> (TargetNode, LossNode, Accuracy, Optimiser) Create metrics - target node, loss node, accuracy node, and optimiser - for a classification model. """ target_node = (placeholder_node( name + ".target", output_node_shape, dynamic_dimensions=1) if target is None else target) loss_node = tf.losses.log_loss(target_node, output_node) accuracy_node = accuracy(output_node, target_node, name + ".accuracy") optimiser, initialiser = default_adam_optimiser_with_initialiser( loss_node, name, variables=variables) return target_node, loss_node, accuracy_node, optimiser, initialiser
def __init__(self, name, session, input_shape, output_shape, input_node=None, save_location=None): """ String -> tf.Session -> [Int] -> [Int] -> tf.Tensor? -> String? -> Layer """ super().__init__(name, session, save_location) self.input_shape = input_shape self.output_shape = output_shape self.input_node = input_node self.output_node = None if self.input_node is None: self.input_node = placeholder_node(self.extend_name('input_node'), self.input_shape, 1)