def runtest(self):
     model = RegressionModel.load(os.path.join(self.modelfile, "linear"),
                                  "linear")
     betas, stats, resid = model.fit(self.rdd)
     result = stats.map(lambda (_, v): float16(v)).collect()
     savemat(self.savefile + "tmp.mat",
             mdict={"tmp": result},
             oned_as='column')
Esempio n. 2
0
 def test_linear_regress(self):
     data = self.sc.parallelize([(1, array([1.5, 2.3, 6.2, 5.1, 3.4, 2.1]))])
     x = array([
         array([1, 0, 0, 0, 0, 0]),
         array([0, 1, 0, 0, 0, 0])
     ])
     model = RegressionModel.load(x, "linear")
     betas, stats, resid = model.fit(data)
     assert(allclose(betas.map(lambda (_, v): v).collect()[0], array([-2.7, -1.9])))
     assert(allclose(stats.map(lambda (_, v): v).collect()[0], array([0.42785299])))
     assert(allclose(resid.map(lambda (_, v): v).collect()[0], array([0, 0, 2, 0.9, -0.8, -2.1])))
Esempio n. 3
0
 def test_linear_regress(self):
     data = self.sc.parallelize([(1, array([1.5, 2.3, 6.2, 5.1, 3.4,
                                            2.1]))])
     x = array([array([1, 0, 0, 0, 0, 0]), array([0, 1, 0, 0, 0, 0])])
     model = RegressionModel.load(x, "linear")
     betas, stats, resid = model.fit(data)
     assert (allclose(
         betas.map(lambda (_, v): v).collect()[0], array([-2.7, -1.9])))
     assert (allclose(
         stats.map(lambda (_, v): v).collect()[0], array([0.42785299])))
     assert (allclose(
         resid.map(lambda (_, v): v).collect()[0],
         array([0, 0, 2, 0.9, -0.8, -2.1])))
Esempio n. 4
0
 def test_blinear_regress(self):
     data = self.sc.parallelize([(1, array([1.5, 2.3, 6.2, 5.1, 3.4, 2.1]))])
     x1 = array([
         array([1, 0, 1, 0, 1, 0]),
         array([0, 1, 0, 1, 0, 1])
     ])
     x2 = array([
         array([1, 1, 0, 0, 0, 0]),
         array([0, 0, 1, 1, 0, 0]),
         array([0, 0, 0, 0, 1, 1])
     ])
     model = RegressionModel.load((x1, x2), "bilinear")
     betas, stats, resid = model.fit(data)
     tol = 1E-4  # to handle rounding errors
     assert(allclose(betas.map(lambda (_, v): v).collect()[0], array([-3.1249, 5.6875, 0.4375]), atol=tol))
Esempio n. 5
0
 def test_blinear_regress(self):
     data = self.sc.parallelize([(1, array([1.5, 2.3, 6.2, 5.1, 3.4,
                                            2.1]))])
     x1 = array([array([1, 0, 1, 0, 1, 0]), array([0, 1, 0, 1, 0, 1])])
     x2 = array([
         array([1, 1, 0, 0, 0, 0]),
         array([0, 0, 1, 1, 0, 0]),
         array([0, 0, 0, 0, 1, 1])
     ])
     model = RegressionModel.load((x1, x2), "bilinear")
     betas, stats, resid = model.fit(data)
     tol = 1E-4  # to handle rounding errors
     assert (allclose(betas.map(lambda (_, v): v).collect()[0],
                      array([-3.1249, 5.6875, 0.4375]),
                      atol=tol))
Esempio n. 6
0
from thunder.regression import RegressionModel
from thunder.factorization import PCA
from thunder.utils import load
from thunder.utils import save


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="fit a regression model")
    parser.add_argument("datafile", type=str)
    parser.add_argument("modelfile", type=str)
    parser.add_argument("outputdir", type=str)
    parser.add_argument("regressmode", choices=("mean", "linear", "bilinear"), help="form of regression")
    parser.add_argument("--k", type=int, default=2)
    parser.add_argument("--preprocess", choices=("raw", "dff", "sub", "dff-highpass", "dff-percentile"
                        "dff-detrendnonlin", "dff-detrend-percentile"), default="raw", required=False)

    args = parser.parse_args()
    
    sc = SparkContext(appName="regresswithpca")

    data = load(sc, args.datafile, args.preprocess)
    model = RegressionModel.load(args.modelfile, args.regressmode)  # do regression
    betas, stats, resid = model.fit(data)
    pca = PCA(args.k).fit(betas)  # do PCA
    traj = model.fit(data, pca.comps)  # get trajectories

    outputdir = args.outputdir + "-regress"
    save(pca.comps, outputdir, "comps", "matlab")
    save(pca.latent, outputdir, "latent", "matlab")
    save(pca.scores, outputdir, "scores", "matlab")
    save(traj, outputdir, "traj", "matlab")
Esempio n. 7
0
                        choices=("raw", "dff", "dff-highpass", "sub"),
                        default="raw",
                        required=False)
    parser.add_argument("--regressmodelfile", type=str)
    parser.add_argument("--regressmode",
                        choices=("linear", "bilinear"),
                        help="form of regression")

    args = parser.parse_args()

    sc = SparkContext(args.master, "tuning")

    if args.master != "local":
        egg = glob.glob(os.path.join(os.environ['THUNDER_EGG'], "*.egg"))
        sc.addPyFile(egg[0])

    data = load(sc, args.datafile, args.preprocess)
    tuningmodel = TuningModel.load(args.tuningmodelfile, args.tuningmode)
    if args.regressmodelfile is not None:
        # use regression results
        regressmodel = RegressionModel.load(args.regressmodelfile,
                                            args.regressmode)
        betas, stats, resid = regressmodel.fit(data)
        params = tuningmodel.fit(betas)
    else:
        # use data
        params = tuningmodel.fit(data)

    outputdir = args.outputdir + "-tuning"
    save(params, outputdir, "params", "matlab")
Esempio n. 8
0
import glob
from thunder.regression import RegressionModel
from thunder.io import load
from thunder.io import save
from pyspark import SparkContext


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="fit a regression model")
    parser.add_argument("master", type=str)
    parser.add_argument("datafile", type=str)
    parser.add_argument("modelfile", type=str)
    parser.add_argument("outputdir", type=str)
    parser.add_argument("regressmode", choices=("mean", "linear", "bilinear"), help="form of regression")
    parser.add_argument("--preprocess", choices=("raw", "dff", "dff-highpass", "sub"), default="raw", required=False)

    args = parser.parse_args()

    sc = SparkContext(args.master, "regress")

    if args.master != "local":
        egg = glob.glob(os.path.join(os.environ['THUNDER_EGG'], "*.egg"))
        sc.addPyFile(egg[0])
    
    data = load(sc, args.datafile, args.preprocess)
    stats, betas, resid = RegressionModel.load(args.modelfile, args.regressmode).fit(data)

    outputdir = args.outputdir + "-regress"
    save(stats, outputdir, "stats", "matlab")
    save(betas, outputdir, "betas", "matlab")
Esempio n. 9
0
    # compute mean map
    vals = Stats("mean").calc(data)
    save(vals,outputdir,"mean_vals","matlab")

    # compute local cor
    if args.neighbourhood != 0:
        cor = LocalCorr(neighborhood=args.neighbourhood).calc(data)
        save(cor,outputdir,"local_corr","matlab")

    # if stim argument is not default
    if args.stim != '-':
        # parse into different stim names
        p = re.compile('-')
        stims = p.split(args.stim)

        # compute regression
        for i in range(len(stims)):
            modelfile = os.path.join(args.datafolder, args.basename + stims[i])
            m = RegressionModel.load(modelfile, args.regressmode)
            betas, stats, resid = m.fit(data)
            t = TuningModel.load(modelfile, args.tuningmode)
            tune = t.fit(betas)
            out_name = "stats_" + stims[i]
            save(stats, outputdir, out_name, "matlab")
            out_name = "tune_" + stims[i]
            save(tune, outputdir, out_name, "matlab")




Esempio n. 10
0
import glob
from thunder.regression import RegressionModel
from thunder.utils import load
from thunder.utils import save
from pyspark import SparkContext


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="fit a regression model")
    parser.add_argument("master", type=str)
    parser.add_argument("datafile", type=str)
    parser.add_argument("modelfile", type=str)
    parser.add_argument("outputdir", type=str)
    parser.add_argument("regressmode", choices=("mean", "linear", "bilinear"), help="form of regression")
    parser.add_argument("--preprocess", choices=("raw", "dff", "dff-highpass", "sub"), default="raw", required=False)

    args = parser.parse_args()

    sc = SparkContext(args.master, "regress")

    if args.master != "local":
        egg = glob.glob(os.path.join(os.environ['THUNDER_EGG'], "*.egg"))
        sc.addPyFile(egg[0])
    
    data = load(sc, args.datafile, args.preprocess)
    stats, betas, resid = RegressionModel.load(args.modelfile, args.regressmode).fit(data)

    outputdir = args.outputdir + "-regress"
    save(stats, outputdir, "stats", "matlab")
    save(betas, outputdir, "betas", "matlab")
Esempio n. 11
0
 def runtest(self):
     model = RegressionModel.load(os.path.join(self.modelfile, "linear"),
                                  "linear")
     betas, stats, resid = model.fit(self.rdd)
     stats.count()
Esempio n. 12
0
 def runtest(self):
     model = RegressionModel.load(os.path.join(self.modelfile, "linear"), "linear")
     betas, stats, resid = model.fit(self.rdd)
     result = stats.map(lambda (_, v): float16(v)).collect()
     savemat(self.savefile + "tmp.mat", mdict={"tmp": result}, oned_as='column')
Esempio n. 13
0
 def runtest(self):
     model = RegressionModel.load(os.path.join(self.modelfile, "linear"), "linear")
     betas, stats, resid = model.fit(self.rdd)
     stats.count()