tower_2 = Conv2D(192, (1, 1), activation='relu')(model) tower_2 = Conv2D(384, (3, 3), padding='same', activation='relu')(tower_2) tower_3 = Conv2D(48, (1, 1), activation='relu')(model) tower_3 = Conv2D(128, (5, 5), padding='same', activation='relu')(tower_3) tower_4 = MaxPooling2D((3, 3), strides=1, padding='same')(model) tower_4 = Conv2D(128, (1, 1), activation='relu')(tower_4) model = Concatenate(axis=-1)([tower_1, tower_2, tower_3, tower_4]) model = AveragePooling2D((1, 1), strides=1, padding='valid')(model) model = Flatten()(model) model = Dropout(0.4)(model) model = Dense(mnist_classes, activation='linear')(model) output = Dense(mnist_classes, activation='softmax')(model) model = Model(input, output) model.summary() """# Implementation of Models on Datasets""" import tensorflow as tf from tensorflow.keras import datasets import matplotlib.pyplot as plt """## MNIST""" (train_images_mnist, train_labels_mnist), (test_images_mnist, test_labels_mnist) = datasets.mnist.load_data( path='mnist.npz') # Normalize pixel values to be between 0 and 1 train_images_mnist, test_images_mnist = train_images_mnist / 255.0, test_images_mnist / 255.0
def compileComponent(self,verbose=False,component_type=None): if(component_type=='discriminator'): stream = open(self.layout_dir+"/discriminator_layout.yaml","r+") elif(component_type=='generator'): stream = open(self.layout_dir+"/generator_layout.yaml","r+") data = yaml.load(stream, Loader=yaml.FullLoader) g1_instructions= data['join'][0] g2_instructions= data['join'][1] gc_instructions = data['layers'] if(component_type=='discriminator'): g_in_1 = Input((1,)) elif(component_type=='generator'): g_in_1 = Input((self.noise,)) g_in_2=[] for i in range(self.dimensionality): g_in_2.append(Input((1,))) g1 = (g_in_1) for layer_info in g1_instructions['layers']: if(self.overrides.get('activation')): activation = self.overrides.get('activation') else: activation = layer_info.get('activation') if(component_type=='discriminator' and self.overrides.get('d_nodes')): units = self.overrides.get('d_nodes') elif(component_type=='generator' and self.overrides.get('g_nodes')): units = self.overrides.get('g_nodes') else: units = layer_info.get('nodes') if(self.overrides.get('dropout_amount')): rate = self.overrides.get('dropout_amount') else: rate = layer_info.get('dropout_amount') if(self.overrides.get('leaky_amount')): alpha = self.overrides.get('leaky_amount') else: alpha = layer_info.get('leaky_amount') if(layer_info['layer_type']=='dense'): g1=Dense(units=units,activation=activation)(g1) elif(layer_info['layer_type']=='dropout'): g1=Dropout(rate=rate)(g1) elif(layer_info['layer_type']=='selu'): g1=LeakyReLU(alpha=alpha)(g1) elif(layer_info['layer_type']=='batchnorm'): g1=BatchNormalization()(g1) g2=[] for i in range(self.dimensionality): g2_current = (g_in_2[i]) for layer_info in g2_instructions['layers']: if(self.overrides.get('activation')): activation = self.overrides.get('activation') else: activation = layer_info.get('activation') if(component_type=='discriminator' and self.overrides.get('d_nodes')): units = self.overrides.get('d_nodes') elif(component_type=='generator' and self.overrides.get('g_nodes')): units = self.overrides.get('g_nodes') else: units = layer_info.get('nodes') if(self.overrides.get('dropout_amount')): rate = self.overrides.get('dropout_amount') else: rate = layer_info.get('dropout_amount') if(self.overrides.get('leaky_amount')): alpha = self.overrides.get('leaky_amount') else: alpha = layer_info.get('leaky_amount') if(layer_info['layer_type']=='dense'): g2_current=Dense(units=units,activation=activation)(g2_current) elif(layer_info['layer_type']=='dropout'): g2_current=Dropout(rate=rate)(g2_current) elif(layer_info['layer_type']=='selu'): g2_current=LeakyReLU(alpha=alpha)(g2_current) elif(layer_info['layer_type']=='batchnorm'): g2_current=BatchNormalization()(g2_current) g2.append(g2_current) gc = Concatenate()([g1]+g2) for layer_info in gc_instructions: if(self.overrides.get('activation')): activation = self.overrides.get('activation') else: activation = layer_info.get('activation') if(component_type=='discriminator' and self.overrides.get('d_nodes')): units = self.overrides.get('d_nodes') elif(component_type=='generator' and self.overrides.get('g_nodes')): units = self.overrides.get('g_nodes') else: units = layer_info.get('nodes') if(self.overrides.get('dropout_amount')): rate = self.overrides.get('dropout_amount') else: rate = layer_info.get('dropout_amount') if(self.overrides.get('leaky_amount')): alpha = self.overrides.get('leaky_amount') else: alpha = layer_info.get('leaky_amount') if(layer_info['layer_type']=='dense'): gc=Dense(units=units,activation=activation)(gc) elif(layer_info['layer_type']=='dropout'): gc=Dropout(rate=rate)(gc) elif(layer_info['layer_type']=='selu'): gc=LeakyReLU(alpha=alpha)(gc) elif(layer_info['layer_type']=='batchnorm'): gc=BatchNormalization()(gc) if(component_type=='generator'): gc = Dense(1, activation="sigmoid")(gc) gc = Model(name="Generator", inputs=[g_in_1]+g_in_2, outputs=[gc]) elif(component_type=='discriminator'): gc = Dense(2, activation="softmax")(gc) gc = Model(name="Discriminator", inputs=[g_in_1]+g_in_2, outputs=[gc]) gc.compile(loss="categorical_crossentropy", optimizer=Adam(self.d_training_rate, beta_1=self.d_beta1), metrics=["accuracy"]) if(verbose): gc.summary() return gc