def evaluate_and_save_performance_power(seed, lr_power): # Setup quaternion_predicted = np.load("data/predicted_quaternions2.npy") angles_predicted = quaternion2euler(quaternion_predicted) angles_true = np.load("data/angles_true.npy") NUM_PROJECTIONS = len(angles_true) kwargs = { 'm': [1.0, 1.0, 1.0, 1.0], 'steps': 1000, 'batch_size': 256, 'projection_idx': range(NUM_PROJECTIONS), 'angles_true': angles_true, 'angles_predicted': angles_predicted } optimizer = Ftrl(learning_rate_power=lr_power, learning_rate=0.1) opt_name = optimizer.__class__.__name__ # Optimization algorithm rotation, loss, collect_data, trajectory = training_angle_alignment( optimizer=optimizer, seed=seed, save_id=f"{lr_power}_power_{seed}", **kwargs) # Save loss loss_file_path = f'results/alignment/loss_lr_power.csv' if not os.path.exists(loss_file_path): df = pd.DataFrame(columns=['lr', 'final_loss']) else: df = pd.read_csv(loss_file_path) df = df.append({'lr': lr_power, 'final_loss': loss}, ignore_index=True) df.to_csv(loss_file_path, index=False)
def test_initialize(self): self._compare_initialize_values(Adam(), 4, "m", init_ops.constant_initializer(0.0)) self._compare_initialize_values( Ftrl(initial_accumulator_value=0.5), 4, "accumulator", init_ops.constant_initializer(0.5), ) self._compare_initialize_values( Adagrad(initial_accumulator_value=0.5), 4, "accumulator", init_ops.constant_initializer(0.5), )
def test_allowed_slot_names(self): opt_and_slots_pairs = [ (SGD(), []), (SGD(momentum=0.2), ["momentum"]), (Adam(), ["m", "v"]), (Adam(amsgrad=True), ["m", "v", "vhat"]), (Adamax(), ["m", "v"]), (Nadam(), ["m", "v"]), (Adadelta(), ["accum_grad", "accum_var"]), (Adagrad(), ["accumulator"]), (Ftrl(), ["accumulator", "linear"]), (RMSprop(), ["rms"]), (RMSprop(momentum=0.2), ["rms", "momentum"]), (RMSprop(centered=True), ["rms", "mg"]), (RMSprop(momentum=0.2, centered=True), ["rms", "momentum", "mg"]), ] for opt, expected_slots in opt_and_slots_pairs: self._compare_slot_names(opt, expected_slots)
def _get_optimizer(opt_type, learning_rate): if opt_type == 'adam': return Adam(learning_rate=learning_rate) elif opt_type == 'nadam': return Nadam(learning_rate=learning_rate) elif opt_type == 'sgd': return SGD(learning_rate=learning_rate) elif opt_type == 'adadelta': return Adadelta(learning_rate=learning_rate) elif opt_type == 'adagrad': return Adagrad(learning_rate=learning_rate) elif opt_type == 'adamax': return Adamax(learning_rate=learning_rate) elif opt_type == 'ftrl': return Ftrl(learning_rate=learning_rate) elif opt_type == 'rmsprop': return RMSprop(learning_rate=learning_rate) else: raise NotImplementedError('Optimizer type %s is not implemented.' % opt_type)
# store confusion matrix with open(result_folder + '/' + model_name + '_cm_' + str(num_classes), 'wb') as file_pi: pickle.dump(confusion_matrix(labels, predictions), file_pi) del model, history, predictions, labels, result, test_labels, result_fix optimizers = { 'sgd': SGD(lr=0.001, momentum=0.9), 'adam': Adam(), 'adamax': Adamax(), 'adadelta': Adadelta(), 'adagrad': Adagrad(), 'ftrl': Ftrl(), 'nadam': Nadam(), 'rmsprop': RMSprop() } initializers = [ 'constant', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform', 'lecun_normal', 'lecun_uniform', 'random_normal', 'truncated_normal' ] # Try lots of different small models, see what works best fit_evaluate_model(mymodels.get_simple_cnn(IMG_DIM, num_classes), 'mymodel', 'baseline') fit_evaluate_model(mymodels.vgg1(IMG_DIM, num_classes), 'mymodel', 'vgg1') fit_evaluate_model(mymodels.vgg2(IMG_DIM, num_classes), 'mymodel', 'vgg2') fit_evaluate_model(mymodels.vgg3(IMG_DIM, num_classes), 'mymodel', 'vgg3')