Ejemplo n.º 1
0
def sparse_sift(image, fraction=1.0):
    """ sparse oriented SIFT at Harris-LaPlace and Difference of Gaussians keypoints 
        use VLFEAT vl_covdet through octave; expects a grayscale image
    """
    octave.eval("addpath ~/CC/vlfeat/vlfeat-0.9.20/toolbox")
    octave.eval("vl_setup")
    octave.push("im", image)
    octave.eval("im = single(im);")
    octave.eval(
        "[kp,sift_hl] = vl_covdet(im, 'method', 'HarrisLaplace', 'EstimateOrientation', true); "
    )
    octave.eval(
        "[kp,sift_dog] = vl_covdet(im, 'method', 'DoG', 'EstimateOrientation', true); "
    )
    octave.eval("descrs = [sift_hl, sift_dog];")
    descriptors = octave.pull("descrs")
    kp = octave.pull("kp")

    # flip from column-major to row-major
    descriptors = descriptors.T

    if fraction < 1.0:
        descriptors = random_sample(descriptors, fraction)

    return kp, descriptors
 def getData(self, filepath):
     octave.eval(
         "load('" + filepath +
         "', '-mat')")  #todo: separate octave instanz für jeden user
     data = octave.pull('Data')
     #todo: fehler check + memoisierung
     return data
Ejemplo n.º 3
0
 def get(self, expid, variables):
     '''
 Retrieve one or more variables from the workspace of the current Octave session
 '''
     toReturn = {}
     n = len(variables)
     for i in range(n):
         name = variables[i]
         try:
             toReturn[name] = octave.pull(name)
         except:
             pass
     return toReturn
Ejemplo n.º 4
0
    def fit(self, K, y):
        """Learn a low-rank kernel approximation.

        :param K: (``numpy.ndarray``) or of (``Kinterface``). The kernel to be approximated with G.

        :param y: (``numpy.ndarray``) Class labels :math:`y_i \in {-1, 1}` or regression targets.
        """

        # Convert to explicit form
        K = K[:, :]
        y = y.reshape((len(y), 1))

        # Call original implementation
        octave.push(["K", "y", "rank", "centering", "kappa", "delta", "tol"], [
            K, y, self.rank, self.centering, self.kappa, self.delta, self.tol
        ])
        octave.eval(
            "[G, P, Q, R, error1, error2, error, predicted_gain, true_gain] = csi(K, y, rank, centering, kappa, delta, tol)",
            verbose=False)
        G, P, Q, R, error1, error2, error, predicted_gain, true_gain = \
            octave.pull(["G", "P", "Q", "R", "error1", "error2", "error", "predicted_gain", "true_gain"])
        R = np.atleast_2d(np.array(R))

        # Octave indexes from 1
        P = P.ravel().astype(int) - 1

        # Resort rows to respect the order
        n, k = K.shape[0], self.rank
        self.I = self.active_set_ = list(P[:k])

        Go = np.zeros((n, k))
        Qo = np.zeros((n, k))
        Ro = np.zeros((k, k))
        km = min(k, G.shape[1])
        Go[P, :km] = G[:, :km]
        Qo[P, :km] = Q[:, :km]
        Ro[:km, :km] = R[:km, :km]
        self.G = Go[:, :self.rank]
        self.P = P[:self.rank]
        self.Q = Qo[:, :]
        self.R = Ro[:, :self.rank]
        self.error1 = error1
        self.error1 = error2
        self.error = error
        self.predicted_gain = predicted_gain
        self.true_gain = true_gain
        self.trained = True
        self.active_set_ = self.I[:self.rank]
Ejemplo n.º 5
0
def plot_calibration_mapping(calibration_model,
                             min_score,
                             max_score,
                             resolution=1000,
                             file_name='calibration_mapping.png'):
    # Function for plotting what probabilities different scores get mapped to.
    # "General purpose prediction function"
    # PERHAPS ADD PROBABILITY DISTRIBUTION OF TRAINING DATA (OR TESTING?)?
    # WOULD INDICATE HOW MANY SAMPLES FALL INTO ONE BIN.
    diff = max_score - min_score
    scores = [
        min_score + i * diff / float(resolution) for i in range(resolution + 1)
    ]
    try:  # IR model
        probabilities = calibration_model.predict(scores)
    except:
        try:  # ENIR
            import rpy2.robjects as robjects
            from rpy2.robjects.packages import importr
            enir = importr('enir')
            r = robjects.r
            # Automatic conversion or numpy arrays to R-vectors
            import rpy2.robjects.numpy2ri
            rpy2.robjects.numpy2ri.activate()
            # ENIR-MODEL MIGHT NEED TO BE PUSHED TO R-ENVIRONMENT?
            probabilities = enir.enir_predict(calibration_model,
                                              robjects.FloatVector(scores))
            probabilities = np.array(probabilities)
        except:
            try:  # BBQ
                from oct2py import octave
                octave.eval("addpath('./calibration/BBQ/')", verbose=False)
                octave.push('scores', scores, verbose=False)
                octave.push('calibration_model',
                            calibration_model,
                            verbose=False)
                octave.eval(
                    'probabilities = predict(calibration_model, scores, 1)',
                    verbose=False)
                probabilities = octave.pull('probabilities', verbose=False)
                probabilities = np.array([item[0] for item in probabilities])
            except:
                pass  # Continue with BIR and WABIR? RCIR?
    # Plot score vs. probability:
    plt.plot(scores, probabilities)
    plt.title("Calibration mapping")
    plt.savefig(file_name)
    plt.gcf().clear()
Ejemplo n.º 6
0
def dense_sift(image, fraction=1.0):
    """ dense SIFT
        use VLFEAT vl_phow through octave; expects a grayscale image
    """
    octave.push("im", image)
    octave.eval("im = single(im);")
    octave.eval("[kp,siftd] = vl_phow(im); ")
    descriptors = octave.pull("siftd")

    # flip from column-major to row-major
    descriptors = descriptors.T

    if fraction < 1.0:
        descriptors = random_sample(descriptors, fraction)

    return descriptors
os.environ[
    "OCTAVE_EXECUTABLE"] = "C:\\Octave\\Octave-4.2.2\\bin\\octave-cli.exe"

from oct2py import octave
from flask import Flask
from dash import Dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc

flask_app = Flask(__name__)
dash_app = Dash(__name__, server=flask_app)

filepath = 'C:\\Users\\SEC\\Documents\\Alperia\\HydroptModel\\vsm.mod'
octave.eval("load('" + filepath + "', '-mat')")
globalData = octave.pull('Data')

dash_app.layout = html.Div(id='page-content',
                           children=[dcc.Location(id='url', refresh=False)])


@dash_app.callback(Output('page-content', 'children'),
                   [Input('url', 'pathname')])
def display_page(pathname):
    return dash_router(pathname)


def dash_router(url):
    children = render_cockpit(globalData)
    return children
Ejemplo n.º 8
0
from oct2py  import octave
from numpy import matrix
from numpy import linalg
from numpy import ma
from numpy import sum
#script ouputs possible combos of boggle board
#arbitrary point arithmetic is nice
octave.addpath('.')
octave.eval('boggleAdjentMatrix')
AdjentMatrix = octave.pull('boggleAdj')
AdjentMatrix = matrix(AdjentMatrix)

SumAdjentMatrix = matrix(ma.zeros((16,16),dtype=int))

for n in range(2,16): #len(word) >= 3 and len(word) == len(path+1)
	SumAdjentMatrix+=AdjentMatrix**n
	
NPaths = sum(SumAdjentMatrix)
print(NPaths)
Ejemplo n.º 9
0
    # Naeini's model
    # Comparison to Naeini's model (it's a matlab model, using Octave).
    print(
        "Training Naeini-model. This might take a while (~20 minutes on a laptop)."
    )
    octave.push('training_scores', naeini_training_scores, verbose=False)
    octave.push('training_class', naeini_training_class, verbose=False)
    octave.eval("options.N0 = 2", verbose=False)
    octave.eval(
        "BBQ_model = build(training_scores', training_class', options)",
        verbose=False)
    # In the following, '1' indicates model averaging, as done in the paper by Naeini & al.
    octave.eval("training_bbq_prob = predict(BBQ_model, training_scores, 1)",
                verbose=False)
    training_bbq_prob = octave.pull("training_bbq_prob", verbose=False)
    training_bbq_prob = np.array([item[0] for item in training_bbq_prob])
    octave.push('test_scores', test_scores, verbose=False)
    octave.eval("test_bbq_prob = predict(BBQ_model, test_scores, 0)",
                verbose=False)
    test_bbq_prob = octave.pull("test_bbq_prob", verbose=False)
    test_bbq_prob = np.array([item[0] for item in test_bbq_prob])
    naeini_bins = len(np.unique(training_bbq_prob))
    naeini_mse = sum((test_bbq_prob - test_class)**2) / len(test_bbq_prob)
    naeini_auc_roc = roc_auc_score(test_class, test_bbq_prob)
    samples_per_bin = int(len(naeini_training_class) / naeini_bins)
    naeini_p = max(training_bbq_prob)  # Highest predicted value.
    # Estimate credible intervals. np.unique() returns a list of two lists. The first one contains probabilities,
    # the second counts. Their product corresponds to 'k', whereas the counts correspond to 'n'.
    # For some reason, the library produces some negative values. These are set to zero.
    naeini_data = np.unique(training_bbq_prob, return_counts=True)
Ejemplo n.º 10
0
    test_class = data_class[:n_rows * 1 // 3]
    test_scores = data_scores[:n_rows * 1 // 3]
    training_class = data_class[n_rows * 1 // 3:]
    training_scores = data_scores[n_rows * 1 // 3:]

    # Create BBQ-model
    octave.push('training_scores', training_scores, verbose=False)
    octave.push('training_class', training_class, verbose=False)
    octave.eval('options.N0 = 2', verbose=False)
    octave.eval(
        "bbq_model = build(training_scores', training_class', options)",
        verbose=False)
    octave.push('test_scores', test_scores)
    octave.eval("test_prob = predict(bbq_model, test_scores, 1)",
                verbose=False)
    bbq_prob = octave.pull('test_prob', verbose=False)
    bbq_prob = np.array([item[0] for item in bbq_prob])
    bbq_metrics.append(isotonic.get_metrics(test_class, bbq_prob, k=k))
    # Create isotonic regression model
    ir_model = IsotonicRegression(y_min=y_min,
                                  y_max=y_max,
                                  out_of_bounds='clip')
    ir_model.fit(X=training_scores, y=training_class)
    ir_prob = isotonic.predict(ir_model, test_scores)
    ir_metrics.append(isotonic.get_metrics(test_class, ir_prob, k=k))
    # Create ENIR model using R:
    enir_model = enir.enir_build(
        robjects.FloatVector(training_scores.tolist()),
        robjects.BoolVector(training_class.tolist()))
    enir_prob = enir.enir_predict(enir_model,
                                  robjects.FloatVector(test_scores.tolist()))
Ejemplo n.º 11
0
def load_hydopt_data(filepath):
	octave.eval("load('" + filepath + "', '-mat')") #todo: separate octave instanz für jeden user
	data = octave.pull('Data')
	return data
Ejemplo n.º 12
0
def index():
    octave.eval('x = struct("y", {1, 2}, "z", {3, 4});')
    x = octave.pull('x')
    return str(x[0, 1].z)
Ejemplo n.º 13
0
import os
os.environ["OCTAVE_EXECUTABLE"] = "C:\\Octave\\Octave-4.2.2\\bin\\octave-cli.exe"

from oct2py import octave

filepath = 'C:\\Users\\SEC\\Documents\\Alperia\\HydroptModel\\vsm.mod'
octave.eval("load('" + filepath + "', '-mat')")
#x = octave.pull('x')
#return str(x[0, 1].z)
octave.eval("revenue = Data.Asset(1).ScenarioWaterManager.Result.OverallRevenue;")
x = octave.pull('revenue')
print(str(x))
#x = octave.pull('Data.Asset(1).ScenarioWaterManager.Result.OverallRevenue') --> erzeugt fehler
data = octave.pull('Data')
x = data.Asset.ScenarioWaterManager.Result.OverallRevenue
print(str(x))
Ejemplo n.º 14
0
    # Create isotonic regression model
    ir_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds='clip')
    ir_model.fit(X=training_scores, y=training_class)

    # Create BBQ-model
    octave.push('training_scores', training_scores, verbose=False)
    octave.push('training_class', training_class, verbose=False)
    octave.eval('options.N0 = 2', verbose=False)
    octave.eval(
        "bbq_model = build(training_scores', training_class', options)",
        verbose=False)
    octave.push('test_scores', test_scores)
    octave.eval("test_prob = predict(bbq_model, test_scores, 1)",
                verbose=False)
    bbq_test_prob = octave.pull('test_prob', verbose=False)
    bbq_test_prob = [item[0] for item in bbq_test_prob]

    # Create RCIR model with d = .20 (?)
    # QUICK HACK TO TEST RCIR-CV INSTEAD OF RCIR WITH SOME d:
    # NOTE THAT RCIR-CV GETS _MORE_ _DATA_ THAN
    # rcir_model = isotonic.train_rcir_cv(training_scores, training_class , d=.1)
    rcir_model = isotonic.train_rcir_cv(training_class, training_scores,
                                        validation_class, validation_scores)

    # Create bootstrap IR model
    bir_model = isotonic.bootstrap_isotonic_regression(training_class,
                                                       training_scores,
                                                       sampling_rate=.95,
                                                       n_models=1000)
Ejemplo n.º 15
0
from oct2py import octave
octave.addpath('/home/iso/PycharmProjects/git_dcstest/matVM')
octave.addpath('/home/iso/PycharmProjects/git_dcstest/matVM/matpower6.0')
octave.addpath('/home/iso/PycharmProjects/git_dcstest/matVM/matpower6.0/t')

octave.addpath('/home/iso/dcstest/matVM')
octave.addpath('/home/iso/dcstest/matVM/matpower6.0')
octave.addpath('/home/iso/dcstest/matVM/matpower6.0/t')

import numpy as np
from pprint import pprint as ppr
import sys

octave.eval("y=1")
#print(octave.eval("y"))
print(octave.pull("y"))

mpc = octave.case33bw_dcs2()
r = octave.runopf(mpc)

ppr(r["gen"])
Ejemplo n.º 16
0
def snmf_fhals(A, k, init='normal'):
    """
    Nonnegative Matrix Factorization.
    
    Hierarchical alternating least squares algorithm
    for computing the approximate low-rank nonnegative matrix factorization of 
    a square `(m, m)` matrix `A`. Given the target rank `k << m`, 
    the input matrix `A` is factored as `A = W Wt`. The nonnegative factor 
    matrices `W` and `Wt` are of dimension `(m, k)` and `(k, m)`, respectively.
           
    
    Parameters
    ----------
    A : array_like, shape `(m, m)`.
        Real nonnegative input matrix.
    
    k : integer, `k << m`.
        Target rank.
    
    init : str `{'normal'}`. 
        'normal' : Factor matrices are initialized with nonnegative 
                   Gaussian random numbers.
            
    tol : float, default: `tol=1e-4`.
        Tolerance of the stopping condition.
        
    maxiter : integer, default: `maxiter=100`.
        Number of iterations.   
        
    verbose : boolean, default: `verbose=False`.
        The verbosity level.        
    
    
    Returns
    -------
    W:  array_like, `(m, k)`.
        Solution to the non-negative least squares problem.
    """    
    
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # Error catching
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~     
    m, n = A.shape
    assert m == n
    
    if (A < 0).any():
        raise ValueError("Input matrix with nonnegative elements is required.")    
    
    if  A.dtype == sci.float32: 
        data_type = sci.float32
        
    elif A.dtype == sci.float64: 
        data_type = sci.float64  

    else:
        raise ValueError("A.dtype is not supported.")    
    

    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~                            
    # Initialization methods for factor matrices W and H
    # 'normal': nonnegative standard normal random init
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 
    
    if init == 'normal':
        n, _ = A.shape
        H = 2 * np.sqrt(np.mean(np.mean(A)) / k) * np.random.rand(n, k)
        maxiter = 10000
        tol = 1e-3
        alpha = np.max(H)**2
        W = H.copy()
        I_k = alpha * np.identity(k)
    else:
        raise ValueError('Initialization method is not supported.')
    #End if

    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # Iterate the HALS algorithm until convergence or maxiter is reached
    # i)   Update factor matrix H and normalize columns   
    # ii)  Update low-dimensional factor matrix W
    # iii) Compute fit log( ||A-WH|| )
    #   -> break if fit <-5 or fit_change < tol
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~    

    projnorm = float('inf')
    left = H.T.dot(H)
    right = A.dot(H)

    octave.push('A', A)
    octave.push('k', k)

    for niter in range(maxiter):
    	octave.push('W', W)
    	octave.push('H', H)
    	octave.push('left', left)
    	octave.push('right', right)

    	octave.eval("[W, H, left, right, violation] = symnmf_anls_iter(A, W, H, left, right, k)", verbose=False)

    	W = octave.pull("W")
    	H = octave.pull("H")
    	left = octave.pull("left")
    	right = octave.pull("right")
    	violation = octave.pull("violation")

    	if niter == 0:
    		initgrad = violation
    	else:
    		projnorm = violation

    	if projnorm < tol * initgrad:
    		break

    return H