def main(params):

    initialize_results_dir(params.get('results_dir'))
    backup_params(params, params.get('results_dir'))

    print('>>> loading data...')

    X_train, y_train, X_test, y_test = LoaderFactory().create(
        name=params.get('dataset'),
        root=params.get('dataset_dir'),
        random=True,
        seed=params.getint('split_seed'))()

    print('<<< data loaded')

    print('>>> computing psd matrix...')

    if params.get('algorithm') == 'identity':
        psd_matrix = np.identity(X_train.shape[1], dtype=X_train.dtype)

    elif params.get('algorithm') == 'nca':
        nca = NCA(init='auto',
                  verbose=True,
                  random_state=params.getint('algorithm_seed'))
        nca.fit(X_train, y_train)
        psd_matrix = nca.get_mahalanobis_matrix()

    elif params.get('algorithm') == 'lmnn':
        lmnn = LMNN(init='auto',
                    verbose=True,
                    random_state=params.getint('algorithm_seed'))
        lmnn.fit(X_train, y_train)
        psd_matrix = lmnn.get_mahalanobis_matrix()

    elif params.get('algorithm') == 'itml':
        itml = ITML_Supervised(verbose=True,
                               random_state=params.getint('algorithm_seed'))
        itml.fit(X_train, y_train)
        psd_matrix = itml.get_mahalanobis_matrix()

    elif params.get('algorithm') == 'lfda':

        lfda = LFDA()
        lfda.fit(X_train, y_train)
        psd_matrix = lfda.get_mahalanobis_matrix()

    elif params.get('algorithm') == 'arml':
        learner = TripleLearner(
            optimizer=params.get('optimizer'),
            optimizer_params={
                'lr': params.getfloat('lr'),
                'momentum': params.getfloat('momentum'),
                'weight_decay': params.getfloat('weight_decay'),
            },
            criterion=params.get('criterion'),
            criterion_params={'calibration': params.getfloat('calibration')},
            n_epochs=params.getint('n_epochs'),
            batch_size=params.getint('batch_size'),
            random_initialization=params.getboolean('random_initialization',
                                                    fallback=False),
            update_triple=params.getboolean('update_triple', fallback=False),
            device=params.get('device'),
            seed=params.getint('learner_seed'))

        psd_matrix = learner(X_train,
                             y_train,
                             n_candidate_mins=params.getint('n_candidate_mins',
                                                            fallback=1))

    else:
        raise Exception('unsupported algorithm')

    print('<<< psd matrix got')

    np.savetxt(os.path.join(params.get('results_dir'), 'psd_matrix.txt'),
               psd_matrix)
 def test_nca(self):
   n = self.X.shape[0]
   nca = NCA(max_iter=(100000 // n))
   nca.fit(self.X, self.y)
   L = nca.components_
   assert_array_almost_equal(L.T.dot(L), nca.get_mahalanobis_matrix())
 def test_nca(self):
   n = self.X.shape[0]
   nca = NCA(max_iter=(100000//n))
   nca.fit(self.X, self.y)
   L = nca.transformer_
   assert_array_almost_equal(L.T.dot(L), nca.get_mahalanobis_matrix())