示例#1
0
    def fit(self, S, solver='L-BFGS-B', tol=1e-5, Kx=None, Ks=None):
        """
        Fit regressor

        Parameters
        ----------
        S: spatiotempovk.spatiotempdata.SpatioTempData
            The data to fit the regressor on

        """
        self.S = S
        # Inherits sameloc flag from the data it is fitted on
        # if sameloc is true it is exploited to speed up computations
        if self.S.sameloc:
            self.sameloc = True
        else:
            self.sameloc = False
        if Kx is None:
            # Exploit sameloc=True by using the RepSymMatrix container to avoid storing a huge redundant matrix
            if self.sameloc:
                Kx = repmat.RepSymMatrix(self.kernelx.compute_K(S["x"][0]), rep=(S.get_T(), S.get_T()))
            else:
                Kx = self.kernelx.compute_K(S["x_flat"])
        if Ks is None:
            Ks = self.kernels.compute_K(S["xy_tuple"])
        # alpha0 = np.zeros(S.get_T() * S.get_barM())
        alpha0 = np.random.normal(0, 1, S.get_T() * S.get_barM())
        obj = self.objective_func(S.get_Ms(), S["y_flat"], Kx, Ks)
        grad = self.objective_grad_func(S.get_Ms(), S["y_flat"], Kx, Ks)
        sol = optimize.minimize(fun=obj, x0=alpha0, jac=grad, tol=tol, method=solver)
        self.alpha = sol["x"].reshape((S.get_T(), S.get_barM()))
        print(sol["success"])
示例#2
0
 def fit(self, S, V, solver='L-BFGS-B', tol=1e-5, Kx=None, Ks=None):
     # Memorize training set
     # if not isinstance(S, spatiotempdata.LocObsSet):
     #     self.training_input = spatiotempdata.LocObsSet(S)
     # else:
     #     self.training_input = S
     # if not isinstance(V, spatiotempdata.LocObsSet):
     #     self.training_output = spatiotempdata.LocObsSet(V)
     # else :
     #     self.training_output = V
     self.training_input = S
     self.training_output = V
     # Check if same location can be exploited in the locations
     if self.training_output.sameloc:
         self.sameloc = True
     if Kx is None:
         # Exploit sameloc=True by using the RepSymMatrix container to avoid storing a huge redundant matrix
         if self.sameloc:
             Kx = repmat.RepSymMatrix(self.kernelx.compute_K(V["x"][0]), rep=(V.get_T(), V.get_T()))
         else:
             Kx = self.kernelx.compute_K(V["x_flat"])
     if Ks is None:
         Ks = self.kernels.compute_K(S["xy_tuple"])
     # alpha0 = np.zeros(S.get_T() * S.get_barM())
     alpha0 = np.random.normal(0, 1, V.get_T() * V.get_barM())
     obj = self.objective_func(V.get_Ms(), V["y_flat"], Kx, Ks)
     grad = self.objective_grad_func(V.get_Ms(), V["y_flat"], Kx, Ks)
     record = []
     sol = optimize.minimize(fun=obj, x0=alpha0, jac=grad, tol=tol,
                             method=solver, callback=lambda X: record.append(obj(X)))
     self.alpha = sol["x"].reshape((V.get_T(), V.get_barM()))
     sol["record"] = record
     return sol
示例#3
0
 def predict(self, Slast, Xnew):
     # Exploit sameloc=True by using the RepSymMatrix container to avoid storing a huge redundant matrix
     if self.sameloc:
         Kxnew = repmat.RepSymMatrix(self.kernelx.compute_K(self.S["x"][0]), rep=(self.S.get_T(), 1))
     else:
         Kxnew = self.kernelx.compute_Knew(self.S["x_flat"], Xnew)
     Ksnew = self.kernels.compute_Knew(self.S["xy_tuple"], Slast["xy_tuple"])
     # Weird order of matrix product for compatibility with algebra.repeated_matrix.RepSymMatrix
     return (Kxnew.transpose().dot(self.alpha.T).dot(Ksnew)).T
dataout = dataout.extract_subseq(0, Ntrain)

# Kernels
kernelx = kernels.GaussianKernel(sigma=0.3)
Kxin = kernelx.compute_K(locs)
kernely = kernels.GaussianKernel(sigma=0.5)
Kyin = kernely.compute_K(datain["y_flat"])
kers = kernels.ConvKernel(kernelx, kernely, Kxin, Kyin, sameloc=True)
Ks = kers.compute_K_from_mat(datain.Ms)

# Build regressor
l2 = losses.L2Loss()
lamb = 0
mu = 0
smoothreg = regularizers.TikhonovSpace()
globalreg = regularizers.TikhonovTime()
regressor = regressors.DiffLocObsOnFuncReg(l2, smoothreg, globalreg, mu, lamb,
                                           kernelx, kers)

# Fit regressor
Kxout = repmat.RepSymMatrix(Kxin, (Ntrain, Ntrain))
solu = regressor.fit(datain, dataout, Kx=Kxout, Ks=Ks)

# Predictions
predtrain = regressor.predict(datain.extract_subseq(0, 10), datain["x"][0])
predtest = regressor.predict(dataintest, datain["x"][0])

# Dump results
with open(os.getcwd() + "/output.pkl", "wb") as outfile:
    pickle.dump((predtrain, predtest, solu, Ks, Kxout), outfile)
    objs.append(obj_func(alpha))
    gradnorms.append(np.linalg.norm(grad))
    print(i)

test_reg = regressors.DiffLocObsOnFuncReg(loss, spacereg, timereg, mu, lamb,
                                          gausskerx, convkers)

datafitting = functools.partial(test_reg.data_fitting, Strain_output.Ms,
                                Strain_output["y_flat"], Kx, Ks)
datafitting_prime = functools.partial(test_reg.data_fitting_prime,
                                      Strain_output.Ms,
                                      Strain_output["y_flat"], Kx, Ks)

datafitting = functools.partial(
    test_reg.data_fitting, Strain_input.Ms, Strain_output["y_flat"],
    repmat.RepSymMatrix(Kx, (ntrain - 1, ntrain - 1)), Ks)
datafitting_prime = functools.partial(
    test_reg.data_fitting_prime, Strain_input.Ms, Strain_output["y_flat"],
    repmat.RepSymMatrix(Kx, (ntrain - 1, ntrain - 1)), Ks)
regspace = functools.partial(test_reg.smoothreg,
                             repmat.RepSymMatrix(Kx, (ntrain - 1, ntrain - 1)),
                             Ks)
regspace_prime = functools.partial(
    test_reg.smoothreg.prime,
    repmat.RepSymMatrix(Kx, (ntrain - 1, ntrain - 1)), Ks)
regtime = functools.partial(test_reg.globalreg.prime,
                            repmat.RepSymMatrix(Kx, (ntrain - 1, ntrain - 1)),
                            Ks)
regtime_prime = functools.partial(
    test_reg.globalreg, repmat.RepSymMatrix(Kx, (ntrain - 1, ntrain - 1)), Ks)
示例#6
0
# Compute convolution kernel matrix
# Ks = convkers.compute_K_from_mat(Ms)
Ks = convkers.compute_K(Strain["xy_tuple"])

# Define loss
loss = losses.L2Loss()

# Define regularizers and regularization params
spacereg = regularizers.TikhonovSpace()
timereg = regularizers.TikhonovTime()
mu = 0.00001
lamb = 0.00001

# Initialize and train regressor
reg = regressors.DiffSpatioTempRegressor(loss, spacereg, timereg, mu, lamb, gausskerx, convkers)
reg.fit(Strain, Ks=Ks, Kx=repmat.RepSymMatrix(Kx, (ntrain, ntrain)))

# Predict at new locations
# Xnew = np.array(list(itertools.product(range(nx), range(ny))))
Xnew = Strain["x_flat"][:125, :]
Ypred = reg.predict(Slast, Xnew)
Ytrainpred = reg.predict(Strain_input, Xnew)

end = time.time()
print(end - start)

# ################## TESTS #############################################################################################

# Time plots
inp0 = np.array([Strain_input["y"][i][0, 0] for i in range(ntrain - 1)])