x_train_private_gen = batch_generator(settings, x_train_private, private=True) x_val_public_gen = batch_generator(settings, x_val_public) x_val_private_gen = batch_generator(settings, x_val_private, private=True) train_steps_per_epoch = np.floor(x_train_private.shape[0] / settings['batch_size']).astype(np.int32) val_steps_per_epoch = np.floor(x_val_private.shape[0] / settings['batch_size']).astype(np.int32) for iteration in range(3): folder = 'results/propAdd_blind_dataset3/{}'.format(iteration) obfuscator, obfuscator_part = get_model.load_model(settings) if not os.path.exists(folder): os.makedirs(folder) plot_model(obfuscator, to_file=folder + '\model.png', show_shapes=True) obfuscator.compile(optimizer=Adam(lr=settings['learning_rate']), loss=settings['loss']) obfuscator_part.compile(optimizer=Adam(lr=settings['learning_rate']), loss=['mae', settings['loss']]) #create metrics dictionary metrics = dict() metrics['train_public_loss'] = np.zeros(settings['epochs']) metrics['train_private_loss'] = np.zeros(settings['epochs']) metrics['val_public_loss'] = np.zeros(settings['epochs']) metrics['val_private_loss'] = np.zeros(settings['epochs']) metrics['train_obj_loss'] = np.zeros(settings['epochs'])
depth = [3] initial_filters = [8] extra_block = [True, False, 'skip'] num_models = len(batchnorm) * len(same_batch) * len(depth) * len( initial_filters) * len(extra_block) train_steps_per_epoch = np.floor(x_train_private.shape[0] / settings['batch_size']).astype(np.int32) val_steps_per_epoch = np.floor(x_val_private.shape[0] / settings['batch_size']).astype(np.int32) for iteration in range(3): folder = 'results/dataset3/{}'.format(iteration) obfuscator = get_model.load_model(settings) if not os.path.exists(folder): os.makedirs(folder) plot_model(obfuscator, to_file=folder + '\model.png', show_shapes=True) obfuscator.compile(optimizer=Adam(lr=settings['learning_rate']), loss='mean_absolute_error') #create metrics dictionary metrics = dict() metrics['train_public_loss'] = np.zeros(settings['epochs']) metrics['train_private_loss'] = np.zeros(settings['epochs']) metrics['val_public_loss'] = np.zeros(settings['epochs']) metrics['val_private_loss'] = np.zeros(settings['epochs']) metrics['train_obj_loss'] = np.zeros(settings['epochs']) metrics['val_obj_loss'] = np.zeros(settings['epochs'])
settings['batch_size']).astype( np.int32) val_steps_per_epoch = np.floor(x_val_private.shape[0] / settings['batch_size']).astype( np.int32) #determine loss weights per loss obf_loss_weight = K.variable(settings['loss_weight_obf']) att_loss_weight = K.variable(1 - settings['loss_weight_obf']) folder = 'results/direct_approach3_dataset1/weight_{}_number_{}'.format( settings['loss_weight_obf'], number) if not os.path.exists(folder): os.makedirs(folder) obfuscator, comb = get_model.load_model(settings) plot_model(obfuscator, to_file=folder + '\obfuscator.png', show_shapes=True) obfuscator.compile( optimizer=Adam(lr=settings['learning_rate_obf']), loss='mean_absolute_error') comb.compile(optimizer=Adam(lr=settings['learning_rate_att']), loss=['mean_absolute_error', 'binary_crossentropy'], loss_weights=[obf_loss_weight, att_loss_weight]) #create metrics dictionary metrics = dict() metrics['train_public_loss'] = np.zeros(settings['epochs']) metrics['train_private_loss'] = np.zeros(settings['epochs']) metrics['val_public_loss'] = np.zeros(settings['epochs'])