예제 #1
0
def _create_cp_logger(fname, ex_name, store_frequency, X, rank, init="random"):
    init_factors, _ = cp.initialize_factors(X, rank, init)
    init_loss = cp.cp_loss(init_factors, X)
    X_norm = np.linalg.norm(X.ravel())
    del init_factors

    def loss(factors):
        return cp.cp_loss(factors, X)

    def gradient(factors):
        return np.linalg.norm(np.concatenate(cp.cp_grad(factors, X)))

    def cp_opt_fit(factors):
        return 1 - cp.cp_loss(factors, X) / init_loss

    def cp_als_fit(factors):
        err = X - base.ktensor(*factors)
        return 1 - np.linalg.norm(err.ravel()) / X_norm

    args = tuple()

    log_metrics = {
        'loss': loss,
        'gradient': gradient,
        'cp_opt_fit': cp_opt_fit,
        'cp_als_fit': cp_als_fit,
    }

    return HDF5Logger(fname, ex_name, store_frequency, args, **log_metrics)
예제 #2
0
def _cp_opt_final_eval(X, experiment_params, outputs):
    num_factors = len(X.shape)
    factors = [outputs[f'factor_mode_{i}'] for i in range(num_factors)]
    init_factors = [outputs[f'init_factor_mode_{i}'] for i in range(num_factors)]
    rank = experiment_params['rank']
    init = experiment_params['init']

    init_loss = cp.cp_loss(init_factors, X)
    X_norm = np.linalg.norm(X.ravel())
    err = X - base.ktensor(*factors)
    return {
        'final_loss': cp.cp_loss(factors, X),
        'final_gradient': np.linalg.norm(np.concatenate(cp.cp_grad(factors, X))),
        'final_cp_opt_fit': 1 - cp.cp_loss(factors, X)/init_loss,
        'final_cp_als_fit': 1 - np.linalg.norm(err.ravel())/X_norm,
    }
예제 #3
0
def _cp_als_final_eval(X, experiment_params, outputs):
    num_factors = len(outputs) - 1
    weights = outputs['weights']
    factors = [outputs[f'factor_mode_{i}'] for i in range(num_factors)]
    factors = [f * weights[np.newaxis]**(1 / len(factors)) for f in factors]
    rank = experiment_params['rank']
    init = experiment_params['init']

    init_factors, _ = cp.initialize_factors(X, rank, init)
    init_loss = cp.cp_loss(init_factors, X)
    X_norm = np.linalg.norm(X.ravel())
    err = X - base.ktensor(*factors)
    return {
        'final_loss': cp.cp_loss(factors, X),
        'final_gradient': np.linalg.norm(np.concatenate(cp.cp_grad(factors,
                                                                   X))),
        'final_cp_opt_fit': 1 - cp.cp_loss(factors, X) / init_loss,
        'final_cp_als_fit': 1 - np.linalg.norm(err.ravel()) / X_norm,
    }
예제 #4
0
def _create_cp_opt_logger(fname, ex_name, store_frequency, X, rank, init="random"):
    init_factors, _ = cp.initialize_factors(X, rank, init)
    init_loss = cp.cp_loss(init_factors, X)
    X_norm = np.linalg.norm(X.ravel())
    del init_factors

    def loss(parameters):
        return cp._cp_loss_scipy(parameters, rank, X.shape, X )
    
    def gradient(parameters):
        return np.linalg.norm(np.ravel(cp._cp_grad_scipy(parameters, rank, X.shape, X )))
    
    args = tuple()
    
    log_metrics = {
        'loss': loss,
        'gradient': gradient,
    }

    return HDF5Logger(
        fname, ex_name, store_frequency, args, **log_metrics
    )
예제 #5
0
 def cp_opt_fit(factors):
     return 1 - cp.cp_loss(factors, X) / init_loss
예제 #6
0
 def loss(factors):
     return cp.cp_loss(factors, X)