def generator_outputs(inputs, sizes): latent = inputs[0] input_energy = inputs[1] h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)]) img_layer0 = build_generator(h, sizes[0], sizes[1]) img_layer1 = build_generator(h, sizes[2], sizes[3]) img_layer2 = build_generator(h, sizes[4], sizes[5]) avgpool = AveragePooling2D(pool_size=(1, 8)) zero2one = avgpool(UpSampling2D(size=(4, 1))(img_layer0)) img_layer1 = inpainting_attention(img_layer1, zero2one) one2two = AveragePooling2D(pool_size=(1, 2))(img_layer1) img_layer2 = inpainting_attention(img_layer2, one2two) outputs = [Activation('relu')(img_layer0), Activation('relu')(img_layer1), Activation('relu')(img_layer2)] return outputs
# constrain w/ a tanh to dampen the unbounded nature of energy-space mbd_energy = Activation('tanh')(minibatch_featurizer(K_energy)) # absolute deviation away from input energy. Technically we can learn # this, but since we want to get as close as possible to conservation of # energy, just coding it in is better energy_well = Lambda(lambda x: K.abs(x[0] - x[1]))( [total_energy, input_energy]) # binary y/n if it is over the input energy well_too_big = Lambda(lambda x: 10 * K.cast(x > 5, K.floatx()))( energy_well) p = concatenate([ features, scale(energies, 10), scale(total_energy, 100), energy_well, well_too_big, mbd_energy ]) fake = Dense(1, activation='sigmoid', name='fakereal_output')(p) discriminator_outputs = [fake, total_energy] discriminator_losses = ['binary_crossentropy', 'mae'] # ACGAN case if nb_classes > 1: logger.info('running in ACGAN for discriminator mode since found {} ' 'classes'.format(nb_classes)) aux = Dense(1, activation='sigmoid', name='auxiliary_output')(p) discriminator_outputs.append(aux) # change the loss depending on how many outputs on the auxiliary task
# showers to generate input_folder = sys.argv[1] output_folder = sys.argv[2] epochs = int(sys.argv[3]) image_sets = int(sys.argv[4]) showers_to_generate = int(sys.argv[5]) latent_size = 1024 # input placeholders latent = Input(shape=(latent_size, ), name='z') # noise input_energy = Input( shape=(1, ), dtype='float32') # requested energy of the particle shower generator_inputs = [latent, input_energy] # multiply the (scaled) energy into the latent space h = Lambda(lambda x: x[0] * x[1])([latent, scale(input_energy, 100)]) # build three LAGAN-style generators (checkout out `build_generator` in architectures.py) img_layer0 = build_generator(h, 3, 96) img_layer1 = build_generator(h, 12, 12) img_layer2 = build_generator(h, 12, 6) # inpainting # 0 --> 1 zero2one = AveragePooling2D(pool_size=(1, 8))(UpSampling2D(size=(4, 1))(img_layer0)) img_layer1 = inpainting_attention(img_layer1, zero2one) # this function is in ops.py # 1 --> 2 one2two = AveragePooling2D(pool_size=(1, 2))(img_layer1)
# constrain w/ a tanh to dampen the unbounded nature of energy-space #mbd_energy = Activation('tanh')(minibatch_featurizer(K_energy)) # absolute deviation away from input energy. Technically we can learn # this, but since we want to get as close as possible to conservation of # energy, just coding it in is better #energy_well = Lambda( # lambda x: K.abs(x[0] - x[1]) #)([total_energy, input_energy]) # binary y/n if it is over the input energy #well_too_big = Lambda(lambda x: 10 * K.cast(x > 5, K.floatx()))(energy_well) p = concatenate([ features, scale(energies, 10), scale(total_energy, 100) #energy_well, #well_too_big, #mbd_energy ]) fake = Dense(1, activation='sigmoid', name='fakereal_output')(p) discriminator_outputs = [fake, total_energy] discriminator_losses = ['binary_crossentropy', 'mae'] # ACGAN case if nb_classes > 1: logger.info('running in ACGAN for discriminator mode since found {} ' 'classes'.format(nb_classes)) aux = Dense(1, activation='sigmoid', name='auxiliary_output')(p)