def forward_discriminator2(self,y): ch2 = self.inv_transform(lbann.Identity(y)) y = lbann.Concatenation(lbann.Identity(y),ch2,axis=0) img = lbann.Reshape(y, dims='2 128 128') x = lbann.LeakyRelu(self.d2_conv[0](img), negative_slope=0.2) x = lbann.LeakyRelu(self.d2_conv[1](x), negative_slope=0.2) x = lbann.LeakyRelu(self.d2_conv[2](x), negative_slope=0.2) x = lbann.LeakyRelu(self.d2_conv[3](x), negative_slope=0.2) return self.d2_fc(lbann.Reshape(x,dims='32768'))
def forward_discriminator2(self, img): ''' Discriminator 2. Weights are frozen as part of Adversarial network = Stacked G + D ''' x = lbann.LeakyRelu(self.d2_conv[0](img), negative_slope=0.2) x = lbann.LeakyRelu(self.d2_conv[1](x), negative_slope=0.2) x = lbann.LeakyRelu(self.d2_conv[2](x), negative_slope=0.2) x = lbann.LeakyRelu(self.d2_conv[3](x), negative_slope=0.2) dims = 32768 #dims=25088 ## for padding=1 y = self.d2_fc(lbann.Reshape(x, dims=str(dims))) return y
def forward_discriminator2(self, img): ''' Discriminator 2. Weights are frozen as part of Adversarial network = Stacked G + D ''' for count, lyr in enumerate(self.d2_conv): if count == 0: x = lbann.LeakyRelu(lyr(img), negative_slope=0.2) else: x = lbann.LeakyRelu(lyr(x), negative_slope=0.2) dims = 32768 #dims=25088 ## for padding=1 y = self.d2_fc(lbann.Reshape(x, dims=str(dims))) return y
def forward_discriminator1(self, img): ''' Discriminator 1 ''' for count, lyr in enumerate(self.d1_conv): if count == 0: x = lbann.LeakyRelu(lyr(img), negative_slope=0.2) else: x = lbann.LeakyRelu(lyr(x), negative_slope=0.2) #x = lbann.LeakyRelu(lbann.BatchNormalization(self.d1_conv[0](x),decay=0.9,scale_init=1.0,epsilon=1e-5),negative_slope=0.2) dims = 32768 #dims=25088 ## for padding=1 y = self.d1_fc(lbann.Reshape(x, dims=str(dims))) return y
def forward_discriminator1(self, img): ''' Discriminator 1 ''' print('D1 - input Img', img.__dict__) x = lbann.LeakyRelu(self.d1_conv[0](img), negative_slope=0.2) x = lbann.LeakyRelu(self.d1_conv[1](x), negative_slope=0.2) x = lbann.LeakyRelu(self.d1_conv[2](x), negative_slope=0.2) x = lbann.LeakyRelu(self.d1_conv[3](x), negative_slope=0.2) #x = lbann.LeakyRelu(lbann.BatchNormalization(self.d1_conv[0](x),decay=0.9,scale_init=1.0,epsilon=1e-5),negative_slope=0.2) dims = 32768 #dims=25088 ## for padding=1 y = self.d1_fc(lbann.Reshape(x, dims=str(dims))) return y
def forward_discriminator2(self,img): ''' Discriminator 2. Weights are frozen as part of Adversarial network = Stacked G + D ''' bn_wts=[lbann.Weights(initializer=lbann.ConstantInitializer(value=1.0)), lbann.Weights(initializer=lbann.ConstantInitializer(value=0.0))] for count,lyr in enumerate(self.d2_conv): if count==0: x=lbann.LeakyRelu(lyr(img), negative_slope=0.2) else : x = lbann.LeakyRelu(lyr(x), negative_slope=0.2) #### without convbrlu # if count==0: x = lbann.LeakyRelu(lbann.BatchNormalization(lyr(img),weights=bn_wts,statistics_group_size=-1),negative_slope=0.2) # else: x = lbann.LeakyRelu(lbann.BatchNormalization(lyr(x),weights=bn_wts,statistics_group_size=-1),negative_slope=0.2) dims=524288 y= self.d2_fc(lbann.Reshape(x,dims=str(dims))) return y
def forward_discriminator1(self, img): ''' Discriminator 1 ''' bn_wts = [ lbann.Weights(initializer=lbann.ConstantInitializer(value=1.0)), lbann.Weights(initializer=lbann.ConstantInitializer(value=0.0)) ] for count, lyr in enumerate(self.d1_conv): if count == 0: x = lbann.LeakyRelu(lyr(img), negative_slope=0.2) else: x = lbann.LeakyRelu(lyr(x), negative_slope=0.2) #### without convbrlu # if count==0: x = lbann.LeakyRelu(lbann.BatchNormalization(lyr(img),weights=bn_wts,statistics_group_size=-1),negative_slope=0.2) # else: x = lbann.LeakyRelu(lbann.BatchNormalization(lyr(x),weights=bn_wts,statistics_group_size=-1),negative_slope=0.2) dims = 32768 #dims=25088 ## for padding=1 y = self.d1_fc(lbann.Reshape(x, dims=str(dims))) return y
def gen_layers(latent_dim, number_of_atoms): ''' Generates the model for the 3D Convolutional Auto Encoder. returns the Directed Acyclic Graph (DAG) that the lbann model will run on. ''' input_ = lbann.Input(target_mode="reconstruction") tensors = lbann.Identity(input_) tensors = lbann.Reshape(tensors, dims="11 32 32 32", name="Sample") # Input tensor shape is (number_of_atoms)x32x32x32 # Encoder x = lbann.Identity(tensors) for i in range(4): out_channels = latent_dim // (2**(3 - i)) x = lbann.Convolution(x, num_dims=3, num_output_channels=out_channels, num_groups=1, conv_dims_i=4, conv_strides_i=2, conv_dilations_i=1, conv_pads_i=1, has_bias=True, name="Conv_{0}".format(i)) x = lbann.BatchNormalization(x, name="Batch_NORM_{0}".format(i + 1)) x = lbann.LeakyRelu(x, name="Conv_{0}_Activation".format(i + 1)) # Shape: (latent_dim)x2x2x2 encoded = lbann.Convolution(x, num_dims=3, num_output_channels=latent_dim, num_groups=1, conv_dims_i=2, conv_strides_i=2, conv_dilations_i=1, conv_pads_i=0, has_bias=True, name="encoded") # Shape: (latent_dim)1x1x1 # Decoder x = lbann.Deconvolution(encoded, num_dims=3, num_output_channels=number_of_atoms * 16, num_groups=1, conv_dims_i=4, conv_pads_i=0, conv_strides_i=2, conv_dilations_i=1, has_bias=True, name="Deconv_1") x = lbann.BatchNormalization(x, name="BN_D1") x = lbann.Tanh(x, name="Deconv_1_Activation") for i in range(3): out_channels = number_of_atoms * (2**(2 - i)) x = lbann.Deconvolution(x, num_dims=3, num_output_channels=out_channels, num_groups=1, conv_dims_i=4, conv_pads_i=1, conv_strides_i=2, conv_dilations_i=1, has_bias=True, name="Deconv_{0}".format(i + 2)) x = lbann.BatchNormalization(x, name="BN_D{0}".format(i + 2)) if ( i != 2 ): #Save the last activation layer because we want to dump the outputs x = lbann.Tanh(x, name="Deconv_{0}_Activation".format(i + 2)) decoded = lbann.Tanh(x, name="decoded") img_loss = lbann.MeanSquaredError([decoded, tensors]) metrics = [lbann.Metric(img_loss, name='recon_error')] # ---------------------------------- # Set up DAG # ---------------------------------- layers = lbann.traverse_layer_graph(input_) #Generate Model DAG return layers, img_loss, metrics