Пример #1
0
    def _ensure_eigen_vectors_number(self, eigen_values, e_values, x0_values,
                                     disease_indices):
        if self.eigen_vectors_number is None:
            if self.eigen_vectors_number_selection is "auto_eigenvals":
                self.eigen_vectors_number = self.get_curve_elbow_point(
                    numpy.abs(eigen_values)) + 1

            elif self.eigen_vectors_number_selection is "auto_disease":
                self.eigen_vectors_number = len(disease_indices)

            elif self.eigen_vectors_number_selection is "auto_epileptogenicity":
                self.eigen_vectors_number = self.get_curve_elbow_point(
                    e_values) + 1

            elif self.eigen_vectors_number_selection is "auto_excitability":
                self.eigen_vectors_number = self.get_curve_elbow_point(
                    x0_values) + 1

            else:
                raise_value_error(
                    "\n" + self.eigen_vectors_number_selection +
                    "is not a valid option when for automatic computation of self.eigen_vectors_number"
                )
        else:
            self.eigen_vectors_number_selection = "user_defined"
def eq_x1_hypo_x0_optimize(ix0, iE, x1EQ, zEQ, x0, K, w, yc, Iext1, a=A_DEF, b=B_DEF, d=D_DEF, slope=SLOPE_DEF):

    x1EQ, zEQ, yc, Iext1, K, a, b, d, slope = assert_arrays([x1EQ, zEQ, yc, Iext1, K, a, b, d, slope], (x1EQ.size, ))

    x0 = assert_arrays([x0],  (len(ix0, )))

    w = assert_arrays([w], (x1EQ.size, x1EQ.size))

    xinit = numpy.zeros(x1EQ.shape, dtype=x1EQ.dtype)

    #Set initial conditions for the optimization algorithm, by ignoring coupling (=0)
    # fz = 4 * (x1 - x0_values) - z -coupling = 0
    #x0init = x1 - z/4
    xinit[iE] = calc_x0(x1EQ[iE], zEQ[iE], K=0.0, w=0.0, zmode=numpy.array("lin"), z_pos=True, shape=None)
    #x1eqinit = x0 + z / 4
    xinit[ix0] = x0 + zEQ[ix0] / 4.0

    #Solve:
    sol = root(eq_x1_hypo_x0_optimize_fun, xinit,
               args=(ix0, iE, x1EQ, zEQ, x0, K, w, yc, Iext1, a, b, d, slope),
               method='lm', jac=eq_x1_hypo_x0_optimize_jac, tol=10**(-12), callback=None, options=None) #method='hybr'

    if sol.success:
        x1EQ[ix0] = sol.x[ix0]
        x0sol = sol.x[iE]
        if numpy.any([numpy.any(numpy.isnan(sol.x)), numpy.any(numpy.isinf(sol.x))]):
            raise_value_error("nan or inf values in solution x\n" + sol.message)
        else:
            return x1EQ, x0sol
    else:
        raise_value_error(sol.message)
Пример #3
0
def mean_std_to_distribution_params(distribution, mu, std=1.0):

    if np.any(std <= 0.0):
        raise_value_error("Standard deviation std = " + str(std) + " <= 0!")

    std_check = {
        "exponential": lambda mu: mu,
        "poisson": lambda mu: np.sqrt(mu),
        "chisquare": lambda mu: np.sqrt(2.0 * mu),
        "bernoulli": lambda mu: np.sqrt(mu * (1.0 - mu))
    }
    if np.in1d(distribution,
               ["exponential", "poisson", "chisquare", "bernoulli"]):
        std_check = std_check[distribution](mu)
        if std != std_check:
            msg = "\nmu = " + str(mu) + "\nstd = " + str(
                std) + "\nstd should be = " + str(std_check)
            warning(
                msg +
                "\nStandard deviation constraint not satisfied for distribution "
                + distribution + "!)")

    p = distrib_dict[distribution]["from_mu_std"](mu, std)

    if distrib_dict[distribution]["constraint"](p):
        return p

    else:
        for key, val in p.iteritems():
            logger.info("\n" + str(key) + ": " + str(val))
        raise_value_error("\nDistribution parameters'constraints " +
                          distrib_dict[distribution]["constraint_str"] +
                          " is not met!")
Пример #4
0
def generate_connectivity_variant(uq_name,
                                  new_weights,
                                  new_tracts,
                                  description,
                                  new_w=None,
                                  folder=os.path.join(PATIENT_VIRTUAL_HEAD),
                                  filename="Connectivity.h5",
                                  logger=logger):
    """
    In existing Connectivity H5 define Weights and Tracts variants
    """
    path = os.path.join(folder, filename)
    logger.info("Writing a Connectivity Variant at:\n" + path)
    h5_file = h5py.File(path, 'a', libver='latest')

    try:
        group = h5_file.create_group('/' + uq_name)
        # Array doesn't seem to work in Python 3
        # group.attrs["Operations"] = [description]
        group.attrs["Operations"] = description
        h5_file.create_dataset("/" + uq_name + "/weights", data=new_weights)
        if new_w is not None:
            h5_file.create_dataset("/" + uq_name + "/w", data=new_w)
        h5_file.create_dataset("/" + uq_name + "/tract_lengths",
                               data=new_tracts)
        h5_file.close()
    except Exception, e:
        raise_value_error(
            e + "\nYou should specify a unique group name " + uq_name, logger)
 def _set_conf_level(self, conf_level):
     if isinstance(conf_level,
                   float) and conf_level > 0.0 and conf_level < 1.0:
         self.conf_level = conf_level
     else:
         raise_value_error(
             "conf_level = " + str(conf_level) +
             "is not a float in the (0.0, 1.0) interval as it should!")
 def _set_method(self, method):
     method = method.lower()
     if np.in1d(method, METHODS):
         self.method = method
     else:
         raise_value_error("Method " + str(method) +
                           " is not one of the available methods " +
                           str(METHODS) + " !")
Пример #7
0
def gamma_to_mu_std(p):
    if p.get("alpha", False) and p.get("beta", False):
        return p["alpha"] / p["beta"], np.sqrt(p["alpha"]) / p["beta"]

    elif p.get("k", False) and p.get("theta", False):
        return p["k"] * p["theta"], np.sqrt(p["k"]) * p["theta"]

    else:
        raise_value_error(
            "The input gamma distribution parameters are neither of the a, beta system, nor of the "
            "k, theta one!")
Пример #8
0
def beta_from_mu_std(mu, std):
    var = std**2
    mu1 = 1.0 - mu

    if var < mu * mu1:
        vmu = mu * mu1 / var - 1.0
        return {"alpha": mu * vmu, "beta": mu1 * vmu}

    else:
        raise_value_error("Variance = " + str(var) +
                          " has to be smaller than the quantity mu*(1-mu) = " +
                          str(mu1) + " !")
Пример #9
0
 def sort_disease_indices_values(self, disease_dict):
     indices = []
     values = []
     for key, value in disease_dict.iteritems():
         key = ensure_list(key)
         value = ensure_list(value)
         n = len(key)
         indices += key
         if len(value) == n:
             values += value
         elif len(value) == 1 and n > 1:
             values += value * n
         else:
             raise_value_error("Length of disease indices " +
                               str(len(key)) + " and values " +
                               str(len(value)) + " do not match!")
     arg_sort = np.argsort(indices)
     return np.array(indices)[arg_sort].tolist(), np.array(values)[arg_sort]
Пример #10
0
def distribution_params_to_mean_std(distribution, **p):

    if distrib_dict[distribution]["constraint"](p):

        mu, std = distrib_dict[distribution]["to_mu_std"](p)

        if np.any(std <= 0.0):
            raise_value_error("\nStandard deviation std = " + str(std) +
                              " <= 0!")

        return mu, std

    else:
        for key, val in p.iteritems():
            logger.info("\n" + str(key) + ": " + str(val))
        raise_value_error("\nDistribution parameters'constraints " +
                          distrib_dict[distribution]["constraint_str"] +
                          " is not met!")
Пример #11
0
    def __init__(self,
                 n_samples=10,
                 n_outputs=1,
                 sampler="uniform",
                 trunc_limits={},
                 sampling_module="numpy",
                 random_seed=None,
                 **kwargs):

        super(StochasticSamplingService, self).__init__(n_samples, n_outputs)

        self.random_seed = random_seed
        self.params = kwargs
        self._list_params()
        self.trunc_limits = trunc_limits
        sampling_module = sampling_module.lower()

        self.sampler = sampler

        if len(self.trunc_limits) > 0:

            self.trunc_limits = dicts_of_lists(self.trunc_limits,
                                               self.n_outputs)

            # We use inverse transform sampling for truncated distributions...

            if sampling_module is not "scipy":
                warning("\nSelecting scipy module for truncated distributions")

            self.sampling_module = "scipy.stats." + sampler + " inverse transform sampling"

        elif sampling_module == "scipy":
            self.sampling_module = "scipy.stats." + self.sampler + ".rvs"

        elif sampling_module == "numpy":
            self.sampling_module = "numpy.random." + self.sampler

        elif sampling_module == "salib":
            self.sampling_module = "SALib.sample." + self.sampler + ".sample"

        else:
            raise_value_error("Sampler module " + str(sampling_module) +
                              " is not recognized!")
Пример #12
0
    def _compute_jacobian(self, model_configuration):

        # Check if any of the equilibria are in the supercritical regime (beyond the separatrix) and set it right before
        # the bifurcation.
        zEQ = model_configuration.zEQ
        temp = model_configuration.x1EQ > X1_EQ_CR_DEF - 10**(-3)
        if temp.any():
            correction_value = X1_EQ_CR_DEF - 10**(-3)
            warning(
                "Equibria x1EQ[" + str(numpy.where(temp)[0]) + "]  = " +
                str(model_configuration.x1EQ[temp]) +
                "\nwere corrected for LSA to value: X1_EQ_CR_DEF - 10 ** (-3) = "
                + str(correction_value) + " to be sub-critical!")
            model_configuration.x1EQ[temp] = correction_value
            i_temp = numpy.ones(model_configuration.x1EQ.shape)
            zEQ[temp] = calc_eq_z(model_configuration.x1EQ[temp],
                                  model_configuration.yc * i_temp[temp],
                                  model_configuration.Iext1 * i_temp[temp],
                                  "2d", 0.0,
                                  model_configuration.slope * i_temp[temp],
                                  model_configuration.a * i_temp[temp],
                                  model_configuration.b * i_temp[temp],
                                  model_configuration.d * i_temp[temp])

        fz_jacobian = calc_fz_jac_square_taylor(
            model_configuration.zEQ, model_configuration.yc,
            model_configuration.Iext1, model_configuration.K,
            model_configuration.connectivity_matrix, model_configuration.a,
            model_configuration.b, model_configuration.d)

        if numpy.any([
                numpy.any(numpy.isnan(fz_jacobian.flatten())),
                numpy.any(numpy.isinf(fz_jacobian.flatten()))
        ]):
            raise_value_error("nan or inf values in dfz")

        return fz_jacobian
Пример #13
0
    def compute_nearest_regions_to_sensors(self, s_type, target_contacts=None, id_sensor=0, n_regions=None, th=0.95):
        if s_type is "EEG":
            sensors_dict = self.sensorsEEG
        elif s_type is "MEG":
            sensors_dict = self.sensorsMEG
        else:
            sensors_dict = self.sensorsSEEG
        sensors = sensors_dict.keys()[id_sensor]
        n_contacts = sensors.labels.shape[0]

        if isinstance(target_contacts, (list, tuple, np.ndarray)):
            target_contacts = ensure_list(target_contacts)
            for itc, tc in enumerate(target_contacts):
                if isinstance(tc, int):
                    continue
                elif isinstance(tc, basestring):
                    target_contacts[itc] = sensors.contact_label_to_index([tc])
                else:
                    raise_value_error("target_contacts[" + str(itc) + "] = " + str(tc) +
                                      "is neither an integer nor a string!")
        else:
            target_contacts = range(n_contacts)
        auto_flag = False
        if n_regions is "all":
            n_regions = self.connectivity.number_of_regions
        elif n_regions is "auto" or not(isinstance(n_regions, int)):
            auto_flag = True
        nearest_regions = []
        for tc in target_contacts:
            projs = sensors_dict[sensors][tc]
            inds = np.argsort(projs)[::-1]
            n_regions = curve_elbow_point(projs[inds])
            nearest_regions.append((inds[:n_regions],
                                    self.connectivity.region_labels[inds[:n_regions]],
                                    projs[inds[:n_regions]]))
        return nearest_regions
Пример #14
0
    def __init__(self,
                 n_samples=10,
                 n_outputs=1,
                 low=0.0,
                 high=1.0,
                 grid_mode=True):

        super(DeterministicSamplingService,
              self).__init__(n_samples, n_outputs)

        self.sampling_module = "numpy.linspace"
        self.sampler = np.linspace
        self.grid_mode = grid_mode
        if self.grid_mode:
            self.shape = (self.n_outputs,
                          np.power(self.n_samples, self.n_outputs))

        if np.any(high <= low):
            raise_value_error("\nHigh limit of linear space " + str(high) +
                              " is not greater than the lower one " +
                              str(low) + "!")
        else:
            self.params = {"low": low, "high": high}
            self._list_params()
def calc_eq_x1(yc, Iext1, x0, K, w, a=A_DEF, b=B_DEF, d=D_DEF, zmode=numpy.array("lin"), model="6d"):

    x0, K, yc, Iext1, a, b, d = assert_arrays([x0, K, yc, Iext1, a, b, d])

    n = x0.size
    shape = x0.shape

    x0, K, yc, Iext1, a, b, d = assert_arrays([x0, K, yc, Iext1, a, b, d], (n,))
    w = assert_arrays([w], (n, n))

    # if SYMBOLIC_CALCULATIONS_FLAG:
    #
    #     fx1z, v = symbol_eqtn_fx1z(n, model, zmode)[1:]  # , x1_neg=True, z_pos=True
    #     fx1z = fx1z.tolist()
    #
    #     for iv in range(n):
    #         fx1z[iv] = fx1z[iv].subs([(v["x0_values"][iv], x0_values[iv]), (v["K"][iv], K[iv]), (v["y1"][iv], yc[iv]),
    #                                       (v["Iext1"][iv], Iext1[iv]), (v["a"][iv], a[iv]), (v["b"][iv], b[iv]),
    #                                       (v["d"][iv], d[iv]), (v["tau1"][iv], 1.0), (v["tau0"][iv], 1.0)])
    #         for jv in range(n):
    #             fx1z[iv] = fx1z[iv].subs(v["w"][iv, jv], w[iv, jv])
    #
    #     # TODO: solve symbolically if possible...
    #     # xeq = list(solve(fx1z, v["x1"].tolist()))
    #
    # else:

    fx1z = lambda x1: calc_fx1z(x1, x0, K, w, yc, Iext1, a=a, b=b, d=d, tau1=1.0, tau0=1.0, model=model, zmode=zmode,
                                shape=(Iext1.size, ))

    jac = lambda x1: calc_fx1z_diff(x1, K, w, a, b, d, tau1=1.0, tau0=1.0, model=model, zmode=zmode)

    sol = root(fx1z, -1.5*numpy.ones((Iext1.size, )), jac=jac, method='lm', tol=10 ** (-12), callback=None, options=None)
    #args=(y2eq[ii], zeq[ii], g_eq[ii], Iext2[ii], s, tau1, tau2, x2_neg)  method='hybr'

    if sol.success:

        if numpy.any([numpy.any(numpy.isnan(sol.x)), numpy.any(numpy.isinf(sol.x))]):
            raise_value_error("nan or inf values in solution x\n" + sol.message)

        x1eq = sol.x

    else:
        raise_value_error(sol.message)

    x1eq = numpy.reshape(x1eq, shape)

    if numpy.any(x1eq > 0.0):
        raise_value_error("At least one x1eq is > 0.0!")

    return x1eq
def eq_x1_hypo_x0_linTaylor(ix0, iE, x1EQ, zEQ, x0, K, w, yc, Iext1, a=A_DEF, b=B_DEF, d=D_DEF):

    x1EQ, zEQ, yc, Iext1, K, a, b, d = assert_arrays([x1EQ, zEQ, yc, Iext1, K, a, b, d], (1, x1EQ.size))

    x0 = assert_arrays([x0], (1, len(ix0)))

    w = assert_arrays([w], (x1EQ.size, x1EQ.size))

    no_x0 = len(ix0)
    no_e = len(iE)

    n_regions = no_e + no_x0

    # The equilibria of the nodes of fixed epileptogenicity
    x1_eq = x1EQ[:, iE]
    z_eq = zEQ[:, iE]

    #Prepare linear system to solve:

    x1_type = x1EQ.dtype

    #The point of the linear Taylor expansion
    x1LIN = def_x1lin(X1_DEF, X1_EQ_CR_DEF, n_regions).astype(x1_type)

    # For regions of fixed equilibria:
    ii_e = numpy.ones((1, no_e), dtype=x1_type)
    we_to_e = numpy.expand_dims(numpy.sum(w[iE][:, iE] * (numpy.dot(ii_e.T, x1_eq) -
                                                          numpy.dot(x1_eq.T, ii_e)), axis=1), 1).T.astype(x1_type)
    wx0_to_e = x1_eq * numpy.expand_dims(numpy.sum(w[ix0][:, iE], axis=0), 0).astype(x1_type)
    be = 4.0 * x1_eq - z_eq - K[:, iE] * (we_to_e - wx0_to_e)

    # For regions of fixed x0_values:
    ii_x0 = numpy.ones((1, no_x0), dtype=x1_type)
    we_to_x0 = numpy.expand_dims(numpy.sum(w[ix0][:, iE] * numpy.dot(ii_x0.T, x1_eq), axis=1), 1).T.astype(x1_type)
    bx0 = - 4.0 * x0 - yc[:, ix0] - Iext1[:, ix0] - 2.0 * x1LIN[:, ix0] ** 3 - 2.0 * x1LIN[:, ix0] ** 2 \
          - K[:, ix0] * we_to_x0

    # Concatenate B vector:
    b = -numpy.concatenate((be, bx0), axis=1).T.astype(x1_type)

    # From-to Epileptogenicity-fixed regions
    # ae_to_e = -4 * numpy.eye( no_e, dtype=numpy.float32 )
    ae_to_e = -4 * numpy.diag(numpy.ones((no_e,))).astype(x1_type)

    # From x0_values-fixed regions to Epileptogenicity-fixed regions
    ax0_to_e = -numpy.dot(K[:, iE].T, ii_x0) * w[iE][:, ix0]

    # From Epileptogenicity-fixed regions to x0_values-fixed regions
    ae_to_x0 = numpy.zeros((no_x0, no_e), dtype=x1_type)

    # From-to x0_values-fixed regions
    ax0_to_x0 = numpy.diag( (4.0 + 3.0 * x1LIN[:, ix0] ** 2 + 4.0 * x1LIN[:, ix0] +
                K[0, ix0] * numpy.expand_dims(numpy.sum(w[ix0][:, ix0], axis=0), 0)).T[:, 0]) - \
                numpy.dot(K[:, ix0].T, ii_x0) * w[ix0][:, ix0]

    # Concatenate A matrix
    a = numpy.concatenate((numpy.concatenate((ae_to_e, ax0_to_e), axis=1),
                           numpy.concatenate((ae_to_x0, ax0_to_x0), axis=1)), axis=0).astype(x1_type)

    # Solve the system
    x = numpy.dot(numpy.linalg.inv(a), b).T
    if numpy.any([numpy.any(numpy.isnan(x)), numpy.any(numpy.isnan(x))]):
        raise_value_error("nan or inf values in solution x")

    # Unpack solution:
    # The equilibria of the regions with fixed e_values have not changed:
    # The equilibria of the regions with fixed x0_values:
    x1EQ[0, ix0] = x[0, no_e:]

    #Return also the solution of x0s for the regions of fixed e_values (equilibria):
    return x1EQ.flatten(), x[0, :no_e].flatten()
Пример #17
0
    def __init__(self, name, low=None, high=None, loc=None, scale=None, shape=(1,), distribution="uniform"):

        # TODO: better controls for the inputs given!

        if isinstance(name, basestring):
            self.name = name
        else:
            raise_value_error("Parameter name " + str(name) + " is not a string!")

        if isinstance(shape, tuple):
            self.shape = shape
        else:
            raise_value_error("Parameter's " + str(self.name) + " shape="
                              + str(shape) + " is not a shape tuple!")

        if isinstance(distribution, basestring):
            self.distribution = distribution
        else:
            raise_value_error("Parameter's " + str(self.name) + " distribution="
                              + str(distribution) + " is not a string!")

        if low is None:
            warning("Lowest value for parameter + " + self.name + " is -inf!")
            self.low = -np.inf
        else:
            self.low = low

        if high is None:
            warning("Highest value for parameter + " + self.name + " is inf!")
            self.high = np.inf
        else:
            self.high = high

        if np.all(self.low >= self.high):
            raise_value_error("Lowest value low=" + str(self.low) + " of  parameter " + self.name +
                              "is not smaller than the highest one high=" + str(self.high) + "!")

        low_not_inf = np.all(np.abs(self.low) < np.inf)
        high_not_inf = np.all(np.abs(self.high) < np.inf)

        if low_not_inf and high_not_inf:
            half = (self.low + self.high) / 2.0
        elif not(low_not_inf) and not(high_not_inf):
            half = 0.0
        elif not(low_not_inf):
            half = -1.0
        else:
            half = 1.0

        if loc is None:
            self.loc = half
            warning("Location of parameter + " + self.name + " is set as location=" + str(self.loc) + "!")
        else:
            if loc < self.low and loc > self.high:
                self.loc = loc
            else:
                raise_value_error("Parameter's " + str(self.name) + " location=" + str(loc)
                                  + "is not in the interval defined by the lowest and highest values "
                                  + str([self.low, self.high]) + "!")

        if scale is None:
            if self.loc == 0.0:
                if half == 0.0:
                    warning("Scale of parameter + " + self.name + " is set as scale=1.0!")
                    self.scale = 1.0
                else:
                    self.scale = np.abs(half)
                    warning("Scale of parameter + " + self.name + " is set as scale=" + str(self.scale) + "!")
            else:
                self.scale = np.abs(self.loc)
                warning("Scale of parameter + " + self.name + " is set as scale=abs(location)=" + str(self.scale) + "!")

        else:
            if self.scale >= 0.0:
                self.scale = scale
                if self.scale == 0.0:
                    warning("Scale of parameter + " + self.name + " is 0.0!")
            else:
                raise_value_error("Parameter's " + str(self.name) + " scale=" + str(scale) + "<0.0!")
def calc_eq_x2(Iext2, y2eq=None, zeq=None, geq=None, x1eq=None, y1eq=None, Iext1=None, x2=0.0,
               slope=SLOPE_DEF, a=A_DEF, b=B_DEF, d=D_DEF, x1_neg=True, s=S_DEF, x2_neg=True):

    if geq is None:
        geq = calc_eq_g(x1eq)

    if zeq is None:
        zeq = calc_eq_z(x1eq, y1eq, Iext1, "6d", x2, slope, a, b, d, x1_neg)

    zeq, geq, Iext2, s = assert_arrays([zeq, geq, Iext2, s])

    shape = zeq.shape
    n = zeq.size

    zeq, geq, Iext2, s = assert_arrays([zeq, geq, Iext2, s], (n,))

    if SYMBOLIC_CALCULATIONS_FLAG:

        fx2y2, v = symbol_eqtn_fx2y2(n, x2_neg)[1:]
        fx2y2 = fx2y2.tolist()

        x2eq = []
        for iv in range(n):
            fx2y2[iv] = fx2y2[iv].subs([(v["z"][iv], zeq[iv]), (v["g"][iv], geq[iv]), (v["Iext2"][iv], Iext2[iv]),
                                      (v["s"][iv], s[iv]), (v["tau1"][iv], 1.0)])
            fx2y2[iv] = list(solveset(fx2y2[iv], v["x2"][iv], S.Reals))
            x2eq.append(numpy.min(numpy.array(fx2y2[iv], dtype=zeq.dtype)))

    else:

        # fx2 = tau1 * (-y2 + Iext2 + 2 * g - x2 ** 3 + x2 - 0.3 * z + 1.05)
        # if x2_neg = True, so that y2eq = 0.0:
        #   fx2 = tau1 * (Iext2 + 2 * g - x2 ** 3 + x2 - 0.3 * z + 1.05) =>
        #     0 = x2eq ** 3 - x2eq - (Iext2 + 2 * geq -0.3 * zeq + 1.05)
        # if x2_neg = False , so that y2eq = s*(x2+0.25):
        #   fx2 = tau1 * (-s * (x2 + 0.25) + Iext2 + 2 * g - x2 ** 3 + x2 - 0.3 * z + 1.05) =>
        #   fx2 = tau1 * (-0.25 * s  + Iext2 + 2 * g - x2 ** 3 + (1 - s) * x2 - 0.3 * z + 1.05 =>
        #     0 = x2eq ** 3 + (s - 1) * x2eq - (Iext2 + 2 * geq -0.3 * zeq - 0.25 * s + 1.05)

        # According to http://mathworld.wolfram.com/CubicFormula.html
        # and given that there is no square term (x2eq^2; "depressed cubic"), we write the equation in the form:
        # x^3 + 3 * Q * x -2 * R = 0
        Q = (-numpy.ones((n, ))/3.0)
        R = ((Iext2 + 2.0 * geq - 0.3 * zeq + 1.05) / 2)
        if y2eq is None:
            ss = numpy.where(x2_neg, 0.0, s)
            Q += ss / 3
            R -= 0.25 * ss / 2
        else:
            y2eq = (assert_arrays([y2eq], (n, )))
            R += y2eq / 2

        # Then the determinant is :
        # delta = Q^3 + R^2 =>
        delta = Q ** 3 + R ** 2
        # and S = cubic_root(R+sqrt(D), T = cubic_root(R-sqrt(D)
        delta_sq = numpy.sqrt(delta.astype("complex")).astype("complex")
        ST = [R + delta_sq, R - delta_sq]
        for ii in range(2):
            for iv in range(n):
                if numpy.imag(ST[ii][iv]) == 0.0:
                    ST[ii][iv] = numpy.sign(ST[ii][iv]) * numpy.power(numpy.abs(ST[ii][iv]), 1.0/3)
                else:
                    ST[ii][iv] = numpy.power(ST[ii][iv], 1.0 / 3)
        # and B = S+T, A = S-T
        B = ST[0]+ST[1]
        A = ST[0]-ST[1]
        # The roots then are:
        # x1 = -1/3 * a2 + B
        # x21 = -1/3 * a2 - 1/2 * B + 1/2 * sqrt(3) * A * j
        # x22 = -1/3 * a2 - 1/2 * B - 1/2 * sqrt(3) * A * j
        # where j = sqrt(-1)
        # But, in our case a2 = 0.0, so that:
        B2 = - 0.5 *B
        AA = (0.5 * numpy.sqrt(3.0) * A * 1j)
        sol = numpy.concatenate([[B.flatten()], [B2 + AA], [B2 - AA]]).T
        x2eq = []
        for ii in range(delta.size):
            temp = sol[ii, numpy.abs(numpy.imag(sol[ii])) < 10 ** (-6)]
            if temp.size == 0:
                raise_value_error("No real roots for x2eq_" + str(ii))
            else:
                x2eq.append(numpy.min(numpy.real(temp)))

        # zeq = zeq.flatten()
        # geq = geq.flatten()
        # Iext2 = Iext2.flatten()
        # x2eq = []
        # for ii in range(n):
        #
        #     if y2eq is None:
        #
        #         fx2 = lambda x2: calc_fx2(x2, y2=calc_eq_y2(x2, x2_neg=x2_neg), z=zeq[ii], g=geq[ii],
        #                                   Iext2=Iext2[ii], tau1=1.0)
        #
        #         jac = lambda x2: -3 * x2 ** 2 + 1.0 - numpy.where(x2_neg, 0.0, -s)
        #
        #     else:
        #
        #         fx2 = lambda x2: calc_fx2(x2, y2=0.0, z=zeq[ii], g=geq[ii], Iext2=Iext2[ii], tau1=1.0)
        #         jac = lambda x2: -3 * x2 ** 2 + 1.0
        #
        #     sol = root(fx2, -0.5, method='lm', jac=jac, tol=10 ** (-6), callback=None, options=None)
        #
        #     if sol.success:
        #
        #         if numpy.any([numpy.any(numpy.isnan(sol.x)), numpy.any(numpy.isinf(sol.x))]):
        #             raise_value_error("nan or inf values in solution x\n" + sol.message)
        #
        #         x2eq.append(numpy.min(numpy.real(numpy.array(sol.x))))
        #
        #     else:
        #         raise_value_error(sol.message)

    if numpy.array(x2_neg).size == 1:
        x2_neg = numpy.tile(x2_neg, (n, ))

    for iv in range(n):

        if x2_neg[iv] == False and x2eq[iv] < -0.25:
            warning("\nx2eq["+str(iv)+"] = " + str(x2eq[iv]) + " < -0.25, although x2_neg[" + str(iv)+"] = False!" +
                    "\n" + "Rerunning with x2_neg[" + str(iv)+"] = True...")
            temp, _ = calc_eq_x2(Iext2[iv], zeq=zeq[iv], geq=geq[iv], s=s[iv], x2_neg=True)
            if temp < -0.25:
                x2eq[iv] = temp
                x2_neg[iv] = True
            else:
                warning("\nThe value of x2eq returned after rerunning with x2_neg[" + str(iv)+"] = True, " +
                        "is " + str(temp) + ">= -0.25!" +
                        "\n" + "We will use the original x2eq!")

        if x2_neg[iv] == True and x2eq[iv] > -0.25:
            warning("\nx2eq["+str(iv)+"] = " + str(x2eq[iv]) + " > -0.25, although x2_neg[" + str(iv)+"] = True!" +
                    "\n" + "Rerunning with x2_neg[" + str(iv)+"] = False...")
            temp, _ = calc_eq_x2(Iext2[iv], zeq=zeq[iv], geq=geq[iv], s=s[iv], x2_neg=False)
            if temp > -0.25:
                x2eq[iv] = temp
                x2_neg[iv] = True
            else:
                warning("\nThe value of x2eq returned after rerunning with x2_neg[" + str(iv)+"] = False, " +
                        "is " + str(temp) + "=< -0.25!" +
                        "\n" + "We will use the original x2eq!")

    x2eq = numpy.reshape(x2eq, shape)

    return x2eq, x2_neg
Пример #19
0
def write_ts(raw_data,
             sampling_period,
             folder=os.path.join(PATIENT_VIRTUAL_HEAD, "ep"),
             filename="ts_from_python.h5",
             logger=logger):

    path, overwrite = change_filename_or_overwrite(
        os.path.join(folder, filename))
    # if os.path.exists(path):
    #     print "TS file %s already exists. Use a different name!" % path
    #     return

    logger.info("Writing a TS at:\n" + path)

    if overwrite:
        try:
            os.remove(path)
        except:
            warning("\nFile to overwrite not found!")

    h5_file = h5py.File(path, 'a', libver='latest')
    write_metadata({KEY_TYPE: "TimeSeries"}, h5_file, KEY_DATE, KEY_VERSION)

    if isinstance(raw_data, dict):
        for data in raw_data:
            if len(raw_data[data].shape) == 2 and str(
                    raw_data[data].dtype)[0] == "f":
                h5_file.create_dataset("/" + data, data=raw_data[data])
                write_metadata(
                    {
                        KEY_MAX: raw_data[data].max(),
                        KEY_MIN: raw_data[data].min(),
                        KEY_STEPS: raw_data[data].shape[0],
                        KEY_CHANNELS: raw_data[data].shape[1],
                        KEY_SV: 1,
                        KEY_SAMPLING: sampling_period,
                        KEY_START: 0.0
                    }, h5_file, KEY_DATE, KEY_VERSION, "/" + data)
            else:
                raise_value_error(
                    "Invalid TS data. 2D (time, nodes) numpy.ndarray of floats expected"
                )

    elif isinstance(raw_data, numpy.ndarray):
        if len(raw_data.shape) != 2 and str(raw_data.dtype)[0] != "f":
            h5_file.create_dataset("/data", data=raw_data)
            write_metadata(
                {
                    KEY_MAX: raw_data.max(),
                    KEY_MIN: raw_data.min(),
                    KEY_STEPS: raw_data.shape[0],
                    KEY_CHANNELS: raw_data.shape[1],
                    KEY_SV: 1,
                    KEY_SAMPLING: sampling_period,
                    KEY_START: 0.0
                }, h5_file, KEY_DATE, KEY_VERSION, "/data")
        else:
            raise_value_error(
                "Invalid TS data. 2D (time, nodes) numpy.ndarray of floats expected"
            )

    else:
        raise_value_error(
            "Invalid TS data. Dictionary or 2D (time, nodes) numpy.ndarray of floats expected"
        )

    h5_file.close()
Пример #20
0
    def dfun(self,
             state_variables,
             coupling,
             local_coupling=0.0,
             array=numpy.array,
             where=numpy.where,
             concat=numpy.concatenate):
        r"""
        Computes the derivatives of the state variables of the Epileptor
        with respect to time.

        Implementation note: we expect this version of the Epileptor to be used
        in a vectorized manner. Concretely, y has a shape of (6, n) where n is
        the number of nodes in the network. An consequence is that
        the original use of if/else is translated by calculated both the true
        and false forms and mixing them using a boolean mask.

        Variables of interest to be used by monitors: -y[0] + y[3]

            .. math::
            \dot{y_{0}} &=& y_{1} - f_{1}(y_{0}, y_{3}) - y_{2} + I_{ext1} \\
            \dot{y_{1}} &=& yc - d (y_{0} -5/3)^{2} - y{1} \\
            \dot{y_{2}} &=&
            \begin{cases}
            (f_z(y_{0}) - y_{2}-0.1 y_{2}^{7})/tau0 & \text{if } y_{0}<5/3 \\
            (f_z(y_{0}) - y_{2})/tau0           & \text{if } y_{0} \geq 5/3
            \end{cases} \\
            \dot{y_{3}} &=& -y_{4} + y_{3} - y_{3}^{3} + I_{ext2} + 0.002 y_{5} - 0.3 (y_{2}-3.5) \\
            \dot{y_{4}} &=& 1 / \tau2 (-y_{4} + f_{2}(y_{3}))\\
            \dot{y_{5}} &=& -0.01 (y_{5} - 0.1 ( y_{0} -5/3 ) )

        where:
            .. math::
                f_{1}(y_{0}, y_{3}) =
                \begin{cases}
                a ( y_{0} -5/3 )^{3} - b ( y_{0} -5/3 )^2 & \text{if } y_{0} <5/3\\
                ( y_{3} - 0.6(y_{2}-4)^2 -slope ) ( y_{0} - 5/3 ) &\text{if }y_{0} \geq 5/3
                \end{cases}

            .. math::
                f_z(y_{0})  =
                \begin{cases}
                4 * (y_{0} - x0_values) & \text{linear} \\
                \frac{3}{1+e^{-10*(y_{0}-7/6)}} - x0_values & \text{sigmoidal} \\
                \end{cases}
        and:

            .. math::
                f_{2}(y_{3}) =
                \begin{cases}
                0 & \text{if } y_{3} <-0.25\\
                s*(y_{3} + 0.25) & \text{if } y_{3} \geq -0.25
                \end{cases}

        """

        y = state_variables
        ydot = numpy.empty_like(state_variables)

        # To use later:
        x0 = y[6]
        slope = y[7]
        Iext1 = y[8]
        Iext2 = y[9]
        K = y[10]

        Iext1 = self.Iext1 + local_coupling * y[0]
        c_pop1 = coupling[0, :]
        c_pop2 = coupling[1, :]

        # population 1
        if_ydot0 = -self.a * y[0]**2 + self.b * y[0]  # self.a=1.0, self.b=3.0
        else_ydot0 = slope - y[3] + 0.6 * (y[2] - 4.0)**2
        ydot[0] = self.tau1 * (y[1] - y[2] + Iext1 + self.Kvf * c_pop1 +
                               where(y[0] < 0.0, if_ydot0, else_ydot0) * y[0])
        ydot[1] = self.tau1 * (self.yc - self.d * y[0]**2 - y[1])  # self.d=5

        # energy
        if_ydot2 = -0.1 * y[2]**7
        else_ydot2 = 0

        if self.zmode == 'lin':
            fz = 4 * (y[0] - x0) + where(y[2] < 0., if_ydot2, else_ydot2)

        elif self.zmode == 'sig':
            fz = 3.0 / (1.0 + numpy.exp(-10 * (y[0] + 0.5))) - x0

        else:
            raise_value_error("zmode has to be either "
                              "lin"
                              " or "
                              "sig"
                              " for linear and sigmoidal fz(), respectively")
        ydot[2] = self.tau1 * ((fz - y[2] + K * c_pop1) / self.tau0)

        # population 2
        ydot[3] = self.tau1 * (-y[4] + y[3] - y[3]**3 + Iext2 + 2 * y[5] -
                               0.3 * (y[2] - 3.5) + self.Kf * c_pop2)
        if_ydot4 = 0
        else_ydot4 = self.s * (y[3] + 0.25)  # self.s = 6.0
        ydot[4] = self.tau1 * (
            (-y[4] + where(y[3] < -0.25, if_ydot4, else_ydot4)) / self.tau2)

        # filter
        ydot[5] = self.tau1 * (-0.01 * (y[5] - self.gamma * y[0]))

        slope_eq, Iext2_eq = self.fun_slope_Iext2(y[2], y[5], self.pmode,
                                                  self.slope, self.Iext2)

        # x0_values
        ydot[6] = self.tau1 * (-y[6] + self.x0)
        # slope
        ydot[7] = 10 * self.tau1 * (-y[7] + slope_eq)  # 5*
        # Iext1
        ydot[8] = self.tau1 * (-y[8] + self.Iext1) / self.tau0
        # Iext2
        ydot[9] = 5 * self.tau1 * (-y[9] + Iext2_eq)
        # K
        ydot[10] = self.tau1 * (-y[10] + self.K) / self.tau0

        return ydot
    def __init__(self, model_configuration, hypothesis, target_data, time=None, fs=None, dynamical_model=None,
                 active_regions=None,  region_labels=None,  active_regions=None,
                 active_regions_th=0.1, euler_method=-1, observation_model=1, observation_expression=1,
                 sensors=None, channel_inds=None, **kwargs):

        # model configuration
        if isinstance(model_configuration, ModelConfiguration):
            self.model_configuration = model_configuration
        else:
            raise_value_error("Input model configuration is not a ModelConfiguration object:\n"
                              + str(model_configuration))

        # hypothesis
        if isinstance(hypothesis, DiseaseHypothesis):
            self.hypothesis = hypothesis
        else:
            raise_value_error("Input hypothesis is not a DiseaseHypothesis object:\n" + str(hypothesis))

        # dynamical model and defaults for time scales and noise
        if isinstance(dynamical_model, AVAILABLE_DYNAMICAL_MODELS):
            self.dynamical_model = dynamical_model
            noise_intensity = kwargs.get("noise_intensity", model_noise_intensity_dict[self.dynamic_model._ui_name])
            self.sig_def = np.mean(noise_intensity)
            if isinstance(self.dynamic_model, (Epileptor, EpileptorModel)):
                self.tau1_def = kwargs.get("tau1", np.mean(1.0 / self.dynamic_model.r))
                self.tau0_def = kwargs.get("tau0", np.mean(self.dynamic_model.tt))

            elif isinstance(self.dynamic_model, (EpileptorDP, EpileptorDP2D, EpileptorDPrealistic)):
                self.tau1_def =  kwargs.get("tau1", np.mean(self.dynamic_model.tau1))
                self.tau0_def =  kwargs.get("tau0", np.mean(self.dynamic_model.tau0))

        else:
            self.tau1_def =  kwargs.get("tau1", 0.5)
            self.tau0_def =  kwargs.get("tau0", 30)
            self.sig_def = kwargs.get("noise_intensity", 10 ** -4)

        # active regions
        self.n_regions = self.hypothesis.number_of_regions
        active_regions_flag = np.zeros((self.n_regions,), dtype="i")
        self.active_regions_th = active_regions_th
        if active_regions is None:
            # Initialize as all those regions whose equilibria lie further away from the healthy equilibrium:
            self.active_regions = np.where(model_configuration.e_values > self.active_regions_th)[0]
            # If LSA has been run, add all regions with a propagation strength greater than the minimal one:
            if len(hypothesis.propagation_strengths) > 0:
                self.active_regions = np.unique(self.active_regions.tolist() +
                                      np.where(hypothesis.propagation_strengths /
                                               np.max(hypothesis.propagation_strengths)
                                                                                    > active_regions_th)[0].tolist())
            else:
                self.active_regions = active_regions

        self.active_regions_flag[self.active_regions] = 1
        self.n_active_regions = len(self.active_regions)
        self.nonactive_regions = np.where(1-self.active_regions_flag)[0]
        self.n_nonactive_regions = len(self.nonactive_regions)

        if isinstance(target_data, np.ndarray):
            self.signals = target_data
            self.data_type = "empirical"

        elif isinstance(target_data, dict):
            self.signals = target_data.get("signals", None)
            if self.signals  is None:
                if observation_expression == 1:
                    self.signals = (target_data["x1"][:, self.active_regions].T -
                                    np.expand_dims(self.model_configuration.x1EQ[self.active_regions], 1)).T + \
                                    (target_data["z"][:, active_regions].T -
                                     np.expand_dims(model_configuration.zEQ[self.active_regions], 1)).T
                    # TODO: a better normalization
                    self.signals = self.signals / 2.75
                elif observation_expression == 2:
                    # TODO: a better normalization
                    signals = (target_data["x1"][:, self.active_regions].T -
                               np.expand_dims(model_configuration.x1EQ[self.active_regions], 1)).T / 2.0
                else:
                    signals = target_data["x1"][:, self.active_regions]

        else:
            raise_value_error("Input target data is neither a ndarray of empirical data nor a dictionary of "
                              "simulated data:\n" + str(target_data))

        (self.n_times, self.n_signals) = self.signals


        logger.info("Constructing data dictionary...")


        # Gamma distributions' parameters
        # visualize gamma distributions here: http://homepage.divms.uiowa.edu/~mbognar/applets/gamma.html
        tau1_mu = tau1_def
        tau1 = gamma_from_mu_std(kwargs.get("tau1_mu", tau1_mu), kwargs.get("tau1_std", 3 * tau1_mu))
        tau0_mu = tau0_def
        tau0 = gamma_from_mu_std(kwargs.get("tau0_mu", tau0_mu), kwargs.get("tau0_std", 3 * 10000.0))
        K_def = np.mean(model_configuration.K)
        K = gamma_from_mu_std(kwargs.get("K_mu", K_def),
                              kwargs.get("K_std", 10 * K_def))
        # zero effective connectivity:
        conn0 = gamma_from_mu_std(kwargs.get("conn0_mu", 0.001), kwargs.get("conn0_std", 0.001))
        if noise_intensity is None:
            sig_mu = np.mean(model_noise_intensity_dict["EpileptorDP2D"])
        else:
            sig_mu = noise_intensity
        sig = gamma_from_mu_std(kwargs.get("sig_mu", sig_mu), kwargs.get("sig_std", 3 * sig_mu))
        sig_eq_mu = (X1_EQ_CR_DEF - X1_DEF) / 3.0
        sig_eq_std = 3 * sig_eq_mu
        sig_eq = gamma_from_mu_std(kwargs.get("sig_eq_mu", sig_eq_mu), kwargs.get("sig_eq_std", sig_eq_std))
        sig_init_mu = sig_eq_mu
        sig_init_std = sig_init_mu
        sig_init = gamma_from_mu_std(kwargs.get("sig_init_mu", sig_init_mu), kwargs.get("sig_init_std", sig_init_std))

        if mixing is None or len(channel_inds) < 1:
            if observation_model == 2:
                mixing = np.random.rand(n_active_regions, n_active_regions)
                for ii in range(len(n_active_regions)):
                    mixing[ii, :] = mixing[ii, :] / np.sum(mixing[ii, :])
            else:
                observation_model = 3
                mixing = np.eye(n_active_regions)

        else:
            mixing = mixing[channel_inds][:, active_regions]
            for ii in range(len(channel_inds)):
                mixing[ii, :] = mixing[ii, :] / np.sum(mixing[ii, :])


            signals = (np.dot(mixing, signals.T)).T

        # from matplotlib import pyplot
        # pyplot.plot(signals)
        # pyplot.show()

        data = {"n_regions": hypothesis.number_of_regions,
                "n_active_regions": n_active_regions,
                "n_nonactive_regions": hypothesis.number_of_regions - n_active_regions,
                "active_regions_flag": active_regions_flag,
                "n_time": signals.shape[0],
                "n_signals": signals.shape[1],
                "x0_nonactive": model_configuration.x0[~active_regions_flag.astype("bool")],
                "x1eq0": model_configuration.x1EQ,
                "zeq0": model_configuration.zEQ,
                "x1eq_lo": kwargs.get("x1eq_lo", -2.0),
                "x1eq_hi": kwargs.get("x1eq_hi", X1_EQ_CR_DEF),
                "x1init_lo": kwargs.get("x1init_lo", -2.0),
                "x1init_hi": kwargs.get("x1init_hi", -1.0),
                "x1_lo": kwargs.get("x1_lo", -2.5),
                "x1_hi": kwargs.get("x1_hi", 1.5),
                "z_lo": kwargs.get("z_lo", 2.0),
                "z_hi": kwargs.get("z_hi", 5.0),
                "tau1_lo": kwargs.get("tau1_lo", tau1_mu / 2),
                "tau1_hi": kwargs.get("tau1_hi", np.min([3 * tau1_mu / 2, 1.0])),
                "tau0_lo": kwargs.get("tau0_lo", np.min([tau0_mu / 2, 10])),
                "tau0_hi": kwargs.get("tau0_hi", np.max([3 * tau1_mu / 2, 30.0])),
                "tau1_a": kwargs.get("tau1_a", tau1["alpha"]),
                "tau1_b": kwargs.get("tau1_b", tau1["beta"]),
                "tau0_a": kwargs.get("tau0_a", tau0["alpha"]),
                "tau0_b": kwargs.get("tau0_b", tau0["beta"]),
                "SC": model_configuration.connectivity_matrix,
                "SC_sig": kwargs.get("SC_sig", 0.1),
                "K_lo": kwargs.get("K_lo", K_def / 10.0),
                "K_hi": kwargs.get("K_hi", 30.0 * K_def),
                "K_a": kwargs.get("K_a", K["alpha"]),
                "K_b": kwargs.get("K_b", K["beta"]),
                "gamma0": kwargs.get("gamma0", np.array([conn0["alpha"], conn0["beta"]])),
                "dt": 1000.0 / fs,
                "sig_hi": kwargs.get("sig_hi", 3 * sig_mu),
                "sig_a": kwargs.get("sig_a", sig["alpha"]),
                "sig_b": kwargs.get("sig_b", sig["beta"]),
                "sig_eq_hi": kwargs.get("sig_eq_hi", sig_eq_std),
                "sig_eq_a": kwargs.get("sig_eq_a", sig_eq["alpha"]),
                "sig_eq_b": kwargs.get("sig_eq_b", sig_eq["beta"]),
                "sig_init_mu": kwargs.get("sig_init_mu", sig_init_mu),
                "sig_init_hi": kwargs.get("sig_init_hi", sig_init_std),
                "sig_init_a": kwargs.get("sig_init_a", sig_init["alpha"]),
                "sig_init_b": kwargs.get("sig_init_b", sig_init["beta"]),
                "observation_model": observation_model,
                "signals": signals,
                "mixing": mixing,
                "eps_hi": kwargs.get("eps_hi", (np.max(signals.flatten()) - np.min(signals.flatten()) / 100.0)),
                "eps_x0": kwargs.get("eps_x0", 0.1),
                }

        for p in ["a", "b", "d", "yc", "Iext1", "slope"]:

            temp = getattr(model_configuration, p)
            if isinstance(temp, (np.ndarray, list)):
                if np.all(temp[0], np.array(temp)):
                    temp = temp[0]
                else:
                    raise_not_implemented_error("Statistical models where not all regions have the same value " +
                                                " for parameter " + p + " are not implemented yet!")
            data.update({p: temp})

        zeq_lo = calc_eq_z(data["x1eq_hi"], data["yc"], data["Iext1"], "2d", x2=0.0, slope=data["slope"], a=data["a"],
                           b=data["b"], d=data["d"])
        zeq_hi = calc_eq_z(data["x1eq_lo"], data["yc"], data["Iext1"], "2d", x2=0.0, slope=data["slope"], a=data["a"],
                           b=data["b"], d=data["d"])
        data.update({"zeq_lo": kwargs.get("zeq_lo", zeq_lo),
                     "zeq_hi": kwargs.get("zeq_hi", zeq_hi)})
        data.update({"zinit_lo": kwargs.get("zinit_lo", zeq_lo - sig_init_std),
                     "zinit_hi": kwargs.get("zinit_hi", zeq_hi + sig_init_std)})

        x0cr, rx0 = calc_x0cr_r(data["yc"], data["Iext1"], data["a"], data["b"], data["d"], zmode=np.array("lin"),
                                x1_rest=X1_DEF, x1_cr=X1_EQ_CR_DEF, x0def=X0_DEF, x0cr_def=X0_CR_DEF, test=False,
                                shape=None, calc_mode="non_symbol")

        data.update({"x0cr": x0cr, "rx0": rx0})
        logger.info("data dictionary completed with " + str(len(data)) + " fields:\n" + str(data.keys()))
Пример #22
0
    def dfun(self,
             state_variables,
             coupling,
             local_coupling=0.0,
             array=numpy.array,
             where=numpy.where,
             concat=numpy.concatenate):
        r"""
        Computes the derivatives of the state variables of the Epileptor
        with respect to time.

        Implementation note: we expect this version of the Epileptor to be used
        in a vectorized manner. Concretely, y has a shape of (2, n) where n is
        the number of nodes in the network. An consequence is that
        the original use of if/else is translated by calculated both the true
        and false forms and mixing them using a boolean mask.

        Variables of interest to be used by monitors: -y[0] + y[3]

            .. math::
            \dot{y_{0}} &=& yc - f_{1}(y_{0}, y_{1}) - y_{2} + I_{ext1} \\
            \dot{y_{1}} &=&
            \begin{cases}
            (f_z(y_{0}) - y_{1}-0.1 y_{1}^{7})/tau0 & \text{if } y_{0}<5/3 \\
            (f_z(y_{0}) - y_{1})/tau0           & \text{if } y_{0} \geq 5/3
            \end{cases} \\

        where:
            .. math::
                f_{1}(y_{0}, y_{3}) =
                \begin{cases}
                a ( y_{0} -5/3 )^{3} - b ( y_{0} -5/3 )^2 & \text{if } y_{0} <5/3\\
                ( 5*( y_{0} -5/3 ) - 0.6(y_{1}-4)^2 -slope) ( y_{0} - 5/3 ) &\text{if }y_{0} \geq 5/3
                \end{cases}
        and:

            .. math::
                f_z(y_{0})  =
                \begin{cases}
                4 * (y_{0} - r*x0_values + x0_{cr}) & \text{linear} \\
                \frac{3}{1+e^{-10*(y_{0}-7/6)}} - r*x0_values + x0_{cr} & \text{sigmoidal} \\
                \end{cases}


        """

        y = state_variables
        ydot = numpy.empty_like(state_variables)

        Iext1 = self.Iext1 + local_coupling * y[0]
        c_pop1 = coupling[0, :]

        # population 1
        if_ydot0 = self.a * y[0]**2 + (
            self.d - self.b) * y[0]  # self.a=1.0, self.b=3.0, self.d=5.0
        else_ydot0 = self.d * y[0] - 0.6 * (y[1] - 4.0)**2 - self.slope
        ydot[0] = self.tau1 * (self.yc - y[1] + Iext1 + self.Kvf * c_pop1 -
                               where(y[0] < 0.0, if_ydot0, else_ydot0) * y[0])

        # energy
        if_ydot1 = -0.1 * y[1]**7
        else_ydot1 = 0

        if self.zmode == 'lin':
            fz = 4 * (y[0] - self.x0) + where(y[1] < 0.0, if_ydot1, else_ydot1)

        elif self.zmode == 'sig':
            fz = 3.0 / (1.0 + numpy.exp(-10 * (y[0] + 0.5))) - self.x0

        else:
            raise_value_error(
                'zmode has to be either ""lin"" or ""sig"" for linear and sigmoidal fz(), respectively'
            )

        ydot[1] = self.tau1 * (fz - y[1] + self.K * c_pop1) / self.tau0

        return ydot
Пример #23
0
    def dfun(self,
             state_variables,
             coupling,
             local_coupling=0.0,
             array=numpy.array,
             where=numpy.where,
             concat=numpy.concatenate):
        r"""
        Computes the derivatives of the state variables of the Epileptor
        with respect to time.

        Implementation note: we expect this version of the Epileptor to be used
        in a vectorized manner. Concretely, y has a shape of (6, n) where n is
        the number of nodes in the network. An consequence is that
        the original use of if/else is translated by calculated both the true
        and false forms and mixing them using a boolean mask.

        Variables of interest to be used by monitors: -y[0] + y[3]

            .. math::
            \dot{y_{0}} &=& y_{1} - f_{1}(y_{0}, y_{3}) - y_{2} + I_{ext1} \\
            \dot{y_{1}} &=& yc - d y_{0}^{2} - y{1} \\
            \dot{y_{2}} &=&
            \begin{cases}
            (f_z(y_{0}) - y_{2}-0.1 y_{2}^{7})/tau0 & \text{if } y_{0}<0 \\
            (f_z(y_{0}) - y_{2})/tau0           & \text{if } y_{0} \geq 0
            \end{cases} \\
            \dot{y_{3}} &=& -y_{4} + y_{3} - y_{3}^{3} + I_{ext2} + 0.002 y_{5} - 0.3 (y_{2}-3.5) \\
            \dot{y_{4}} &=& 1 / \tau2 (-y_{4} + f_{2}(y_{3}))\\
            \dot{y_{5}} &=& -0.01 (y_{5} - 0.1 y_{0} )

        where:
            .. math::
                f_{1}(y_{0}, y_{3}) =
                \begin{cases}
                a y_{0}^{3} - by_{0}^2 & \text{if } y_{0} <0\\
                (y_{3} - 0.6(y_{2}-4)^2 slope)y_{0} &\text{if }y_{0} \geq 0
                \end{cases}

            .. math::
                f_z(y_{0})  =
                \begin{cases}
                4 * (y_{0} - x0_values) & \text{linear} \\
                \frac{3}{1+e^{-10*(y_{0}+0.5)}} - x0_values & \text{sigmoidal} \\
                \end{cases}
        and:

            .. math::
                f_{2}(y_{3}) =
                \begin{cases}
                0 & \text{if } y_{3} <-0.25\\
                s*(y_{3} + 0.25) & \text{if } y_{3} \geq -0.25
                \end{cases}

        """

        y = state_variables
        ydot = numpy.empty_like(state_variables)

        Iext1 = self.Iext1 + local_coupling * y[0]
        c_pop1 = coupling[0, :]
        c_pop2 = coupling[1, :]

        # TVB Epileptor in commented lines below

        # population 1
        # if_ydot0 = - self.a * y[0] ** 2 + self.b * y[0]
        if_ydot0 = -self.a * y[0]**2 + self.b * y[0]  # self.a=1.0, self.b=3.0
        # else_ydot0 = self.slope - y[3] + 0.6 * (y[2] - 4.0) ** 2
        else_ydot0 = self.slope - y[3] + 0.6 * (y[2] - 4.0)**2
        # ydot[0] = self.tt * (y[1] - y[2] + Iext + self.Kvf * c_pop1 + where(y[0] < 0., if_ydot0, else_ydot0) * y[0])
        ydot[0] = self.tau1 * (y[1] - y[2] + Iext1 + self.Kvf * c_pop1 +
                               where(y[0] < 0.0, if_ydot0, else_ydot0) * y[0])
        # ydot[1] = self.tt * (self.c - self.d * y[0] ** 2 - y[1])
        ydot[1] = self.tau1 * (self.yc - self.d * y[0]**2 - y[1])  # self.d=5

        # energy
        # if_ydot2 = - 0.1 * y[2] ** 7
        if_ydot2 = -0.1 * y[2]**7
        # else_ydot2 = 0
        else_ydot2 = 0

        if self.zmode == 'lin':
            # self.r * (4 * (y[0] - self.x0_values) - y[2]      + where(y[2] < 0., if_ydot2, else_ydot2)
            fz = 4 * (y[0] - self.x0) + where(y[2] < 0., if_ydot2, else_ydot2)

        elif self.zmode == 'sig':
            fz = 3.0 / (1.0 + numpy.exp(-10 * (y[0] + 0.5))) - self.x0

        else:
            raise_value_error("zmode has to be either "
                              "lin"
                              " or "
                              "sig"
                              " for linear and sigmoidal fz(), " +
                              "respectively")

        # ydot[2] = self.tt * (        ...+ self.Ks * c_pop1))
        ydot[2] = self.tau1 * ((fz - y[2] + self.K * c_pop1) / self.tau0)

        # population 2
        # ydot[3] = self.tt * (-y[4] + y[3] - y[3] ** 3 + self.Iext2 + 2 * y[5] - 0.3 * (y[2] - 3.5) + self.Kf * c_pop2)
        ydot[3] = self.tau1 * (-y[4] + y[3] - y[3]**3 + self.Iext2 + 2 * y[5] -
                               0.3 * (y[2] - 3.5) + self.Kf * c_pop2)
        # if_ydot4 = 0
        if_ydot4 = 0
        # else_ydot4 = self.aa * (y[3] + 0.25)
        else_ydot4 = self.s * (y[3] + 0.25)  # self.s = 6.0
        # ydot[4] = self.tt * ((-y[4] + where(y[3] < -0.25, if_ydot4, else_ydot4)) / self.tau)
        ydot[4] = self.tau1 * (
            (-y[4] + where(y[3] < -0.25, if_ydot4, else_ydot4)) / self.tau2)

        # filter
        # ydot[5] = self.tt * (-0.01 * (y[5] - 0.1 * y[0]))
        ydot[5] = self.tau1 * (-0.01 * (y[5] - self.gamma * y[0]))

        return ydot
Пример #24
0
def sensitivity_analysis_pse_from_lsa_hypothesis(
        lsa_hypothesis,
        connectivity_matrix,
        region_labels,
        n_samples,
        method="sobol",
        half_range=0.1,
        global_coupling=[],
        healthy_regions_parameters=[],
        model_configuration_service=None,
        lsa_service=None,
        save_services=False,
        logger=None,
        **kwargs):

    if logger is None:
        logger = initialize_logger(__name__)

    method = method.lower()
    if np.in1d(method, METHODS):
        if np.in1d(method, ["delta", "dgsm"]):
            sampler = "latin"
        elif method == "sobol":
            sampler = "saltelli"
        elif method == "fast":
            sampler = "fast_sampler"
        else:
            sampler = method
    else:
        raise_value_error("Method " + str(method) +
                          " is not one of the available methods " +
                          str(METHODS) + " !")

    all_regions_indices = range(lsa_hypothesis.number_of_regions)
    disease_indices = lsa_hypothesis.get_regions_disease_indices()
    healthy_indices = np.delete(all_regions_indices, disease_indices).tolist()

    pse_params = {"path": [], "indices": [], "name": [], "bounds": []}
    n_inputs = 0

    # First build from the hypothesis the input parameters of the sensitivity analysis.
    # These can be either originating from excitability, epileptogenicity or connectivity hypotheses,
    # or they can relate to the global coupling scaling (parameter K of the model configuration)
    for ii in range(len(lsa_hypothesis.x0_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.x0_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.x0_indices[ii]]) +
            " Excitability")
        pse_params["bounds"].append([
            lsa_hypothesis.x0_values[ii] - half_range,
            np.min(
                [MAX_DISEASE_VALUE, lsa_hypothesis.x0_values[ii] + half_range])
        ])

    for ii in range(len(lsa_hypothesis.e_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.e_values")
        pse_params["name"].append(
            str(region_labels[lsa_hypothesis.e_indices[ii]]) +
            " Epileptogenicity")
        pse_params["bounds"].append([
            lsa_hypothesis.e_values[ii] - half_range,
            np.min(
                [MAX_DISEASE_VALUE, lsa_hypothesis.e_values[ii] + half_range])
        ])

    for ii in range(len(lsa_hypothesis.w_values)):
        n_inputs += 1
        pse_params["indices"].append([ii])
        pse_params["path"].append("hypothesis.w_values")
        inds = linear_index_to_coordinate_tuples(lsa_hypothesis.w_indices[ii],
                                                 connectivity_matrix.shape)
        if len(inds) == 1:
            pse_params["name"].append(
                str(region_labels[inds[0][0]]) + "-" +
                str(region_labels[inds[0][0]]) + " Connectivity")
        else:
            pse_params["name"].append("Connectivity[" + str(inds), + "]")
            pse_params["bounds"].append([
                np.max([lsa_hypothesis.w_values[ii] - half_range, 0.0]),
                lsa_hypothesis.w_values[ii] + half_range
            ])

    for val in global_coupling:
        n_inputs += 1
        pse_params["path"].append("model.configuration.service.K_unscaled")
        inds = val.get("indices", all_regions_indices)
        if np.all(inds == all_regions_indices):
            pse_params["name"].append("Global coupling")
        else:
            pse_params["name"].append("Afferent coupling[" + str(inds) + "]")
        pse_params["indices"].append(inds)
        pse_params["bounds"].append(val["bounds"])

    # Now generate samples suitable for sensitivity analysis
    sampler = StochasticSamplingService(n_samples=n_samples,
                                        n_outputs=n_inputs,
                                        sampler=sampler,
                                        trunc_limits={},
                                        sampling_module="salib",
                                        random_seed=kwargs.get(
                                            "random_seed", None),
                                        bounds=pse_params["bounds"])

    input_samples = sampler.generate_samples(**kwargs)
    n_samples = input_samples.shape[1]
    pse_params.update(
        {"samples": [np.array(value) for value in input_samples.tolist()]})

    pse_params_list = dicts_of_lists_to_lists_of_dicts(pse_params)

    # Add a random jitter to the healthy regions if required...:
    for val in healthy_regions_parameters:
        inds = val.get("indices", healthy_indices)
        name = val.get("name", "x0_values")
        n_params = len(inds)
        sampler = StochasticSamplingService(
            n_samples=n_samples,
            n_outputs=n_params,
            sampler="uniform",
            trunc_limits={"low": 0.0},
            sampling_module="scipy",
            random_seed=kwargs.get("random_seed", None),
            loc=kwargs.get("loc", 0.0),
            scale=kwargs.get("scale", 2 * half_range))

        samples = sampler.generate_samples(**kwargs)
        for ii in range(n_params):
            pse_params_list.append({
                "path": "model_configuration_service." + name,
                "samples": samples[ii],
                "indices": [inds[ii]],
                "name": name
            })

    # Now run pse service to generate output samples:
    pse = PSEService("LSA",
                     hypothesis=lsa_hypothesis,
                     params_pse=pse_params_list)
    pse_results, execution_status = pse.run_pse(
        connectivity_matrix,
        grid_mode=False,
        lsa_service_input=lsa_service,
        model_configuration_service_input=model_configuration_service)

    pse_results = list_of_dicts_to_dicts_of_ndarrays(pse_results)

    # Now prepare inputs and outputs and run the sensitivity analysis:
    # NOTE!: Without the jittered healthy regions which we don' want to include into the sensitivity analysis!
    inputs = dicts_of_lists_to_lists_of_dicts(pse_params)

    outputs = [{
        "names": ["LSA Propagation Strength"],
        "values": pse_results["propagation_strengths"]
    }]
    sensitivity_analysis_service = SensitivityAnalysisService(
        inputs,
        outputs,
        method=method,
        calc_second_order=kwargs.get("calc_second_order", True),
        conf_level=kwargs.get("conf_level", 0.95))

    results = sensitivity_analysis_service.run(**kwargs)

    if save_services:
        logger.info(pse.__repr__())
        pse.write_to_h5(FOLDER_RES, method + "_test_pse_service.h5")

        logger.info(sensitivity_analysis_service.__repr__())
        sensitivity_analysis_service.write_to_h5(
            FOLDER_RES, method + "_test_sa_service.h5")

    return results, pse_results
Пример #25
0
def symbol_vars(n_regions, vars_str, dims=1, ind_str="_", shape=None, output_flag="numpy_array"):

    vars_out = list()
    vars_dict = {}

    if dims == 1:

        if shape is None:
            if output_flag == "sympy_array":
                shape = (n_regions, 1)
            else:
                shape = (n_regions, )

        for vs in vars_str:
            temp = [Symbol(vs+ind_str+'%d' % i_n, real=True) for i_n in range(n_regions)]
            if output_flag == "numpy_array":
                temp = reshape(temp, shape)
            elif output_flag == "sympy_array":
                temp = Array(temp).reshape(shape[0], shape[1])
            vars_out.append(temp)
            vars_dict[vs] = vars_out[-1:][0]

    elif dims == 0:

        if shape is None:
            if output_flag == "sympy_array":
                shape = (1, 1)
            else:
                shape = (1, )

        for vs in vars_str:
            temp = Symbol(vs, real=True)
            if output_flag == "numpy_array":
                temp = reshape(temp, shape)
            elif output_flag == "sympy_array":
                temp = Array(temp).reshape(shape[0], shape[1])
            vars_out.append(temp)
            vars_dict[vs] = vars_out[-1:][0]

    elif dims == 2:

        if shape is None:
            shape = (n_regions, n_regions)

        for vs in vars_str:
            temp = []
            for i_n in range(n_regions):
                temp.append([Symbol(vs + ind_str + '%d' % i_n + ind_str + '%d' % j_n, real=True)
                             for j_n in range(n_regions)])
            if output_flag == "numpy_array":
                temp = reshape(temp, shape)
            elif output_flag == "sympy_array":
                temp = Array(temp).reshape(shape[0], shape[1])
            vars_out.append(temp)
            vars_dict[vs] = vars_out[-1:][0]
    else:
        raise_value_error("The dimensionality of the variables is neither 1 nor 2: " + str(dims))

    vars_out.append(vars_dict)

    return tuple(vars_out)
Пример #26
0
def write_ts_epi(raw_data,
                 sampling_period,
                 lfp_data=None,
                 folder=os.path.join(PATIENT_VIRTUAL_HEAD, "ep"),
                 filename="ts_from_python.h5",
                 logger=logger):

    path, overwrite = change_filename_or_overwrite(folder, filename)
    # if os.path.exists(path):
    #     print "TS file %s already exists. Use a different name!" % path
    #     return

    if raw_data is None or len(raw_data.shape) != 3:
        raise_value_error("Invalid TS data 3D (time, regions, sv) expected",
                          logger)

    logger.info("Writing a TS at:\n" + path)

    if type(lfp_data) == int:
        lfp_data = raw_data[:, :, lfp_data[1]]
        raw_data[:, :, lfp_data[1]] = []
    elif isinstance(lfp_data, list):
        lfp_data = raw_data[:, :, lfp_data[1]] - raw_data[:, :, lfp_data[0]]
    elif isinstance(lfp_data, numpy.ndarray):
        lfp_data = lfp_data.reshape((lfp_data.shape[0], lfp_data.shape[1], 1))
    else:
        raise_value_error("Invalid lfp_data 3D (time, regions, sv) expected",
                          logger)

    if overwrite:
        try:
            os.remove(path)
        except:
            warning("\nFile to overwrite not found!")

    h5_file = h5py.File(path, 'a', libver='latest')
    h5_file.create_dataset("/data", data=raw_data)
    h5_file.create_dataset("/lfpdata", data=lfp_data)

    write_metadata({KEY_TYPE: "TimeSeries"}, h5_file, KEY_DATE, KEY_VERSION)
    write_metadata(
        {
            KEY_MAX: raw_data.max(),
            KEY_MIN: raw_data.min(),
            KEY_STEPS: raw_data.shape[0],
            KEY_CHANNELS: raw_data.shape[1],
            KEY_SV: raw_data.shape[2],
            KEY_SAMPLING: sampling_period,
            KEY_START: 0.0
        }, h5_file, KEY_DATE, KEY_VERSION, "/data")
    write_metadata(
        {
            KEY_MAX: lfp_data.max(),
            KEY_MIN: lfp_data.min(),
            KEY_STEPS: lfp_data.shape[0],
            KEY_CHANNELS: lfp_data.shape[1],
            KEY_SV: 1,
            KEY_SAMPLING: sampling_period,
            KEY_START: 0.0
        }, h5_file, KEY_DATE, KEY_VERSION, "/lfpdata")
    h5_file.close()
 def _set_calc_second_order(self, calc_second_order):
     if isinstance(calc_second_order, bool):
         self.calc_second_order = calc_second_order
     else:
         raise_value_error("calc_second_order = " + str(calc_second_order) +
                           "is not a boolean as it should!")
    def run(self,
            input_ids=None,
            output_ids=None,
            method=None,
            calc_second_order=None,
            conf_level=None,
            **kwargs):

        self._update_parameters(method, calc_second_order, conf_level)

        self.other_parameters = kwargs

        if input_ids is None:
            input_ids = range(self.n_inputs)

        self.problem = {
            "num_vars": len(input_ids),
            "names": np.array(self.input_names)[input_ids].tolist(),
            "bounds": np.array(self.input_bounds)[input_ids].tolist()
        }

        if output_ids is None:
            output_ids = range(self.n_outputs)

        n_outputs = len(output_ids)

        if self.method.lower() == "sobol":
            warning("'sobol' method requires 'saltelli' sampling scheme!")
            # Additional keyword parameters and their defaults:
            # calc_second_order (bool): Calculate second-order sensitivities (default True)
            # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000)
            # conf_level (float): The confidence interval level (default 0.95)
            # print_to_console (bool): Print results directly to console (default False)
            # parallel: False,
            # n_processors: None
            self.analyzer = lambda output: sobol.analyze(
                self.problem,
                output,
                calc_second_order=self.calc_second_order,
                conf_level=self.conf_level,
                num_resamples=self.other_parameters.get("num_resamples", 1000),
                parallel=self.other_parameters.get("parallel", False),
                n_processors=self.other_parameters.get("n_processors", None),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        elif np.in1d(self.method.lower(), ["latin", "delta"]):
            warning(
                "'latin' sampling scheme is recommended for 'delta' method!")
            # Additional keyword parameters and their defaults:
            # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000)
            # conf_level (float): The confidence interval level (default 0.95)
            # print_to_console (bool): Print results directly to console (default False)
            self.analyzer = lambda output: delta.analyze(
                self.problem,
                self.input_samples[:, input_ids],
                output,
                conf_level=self.conf_level,
                num_resamples=self.other_parameters.get("num_resamples", 1000),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        elif np.in1d(self.method.lower(), ["fast", "fast_sampler"]):
            warning("'fast' method requires 'fast_sampler' sampling scheme!")
            # Additional keyword parameters and their defaults:
            # M (int): The interference parameter,
            #           i.e., the number of harmonics to sum in the Fourier series decomposition (default 4)
            # print_to_console (bool): Print results directly to console (default False)
            self.analyzer = lambda output: fast.analyze(
                self.problem,
                output,
                M=self.other_parameters.get("M", 4),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        elif np.in1d(self.method.lower(), ["ff", "fractional_factorial"]):
            # Additional keyword parameters and their defaults:
            # second_order (bool, default=False): Include interaction effects
            # print_to_console (bool, default=False): Print results directly to console
            warning(
                "'fractional_factorial' method requires 'fractional_factorial' sampling scheme!"
            )
            self.analyzer = lambda output: ff.analyze(
                self.problem,
                self.input_samples[:, input_ids],
                output,
                calc_second_order=self.calc_second_order,
                conf_level=self.conf_level,
                num_resamples=self.other_parameters.get("num_resamples", 1000),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        elif self.method.lower().lower() == "morris":
            warning("'morris' method requires 'morris' sampling scheme!")
            # Additional keyword parameters and their defaults:
            # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000)
            # conf_level (float): The confidence interval level (default 0.95)
            # print_to_console (bool): Print results directly to console (default False)
            # grid_jump (int): The grid jump size, must be identical to the value passed to
            #                   SALib.sample.morris.sample() (default 2)
            # num_levels (int): The number of grid levels, must be identical to the value passed to
            #                   SALib.sample.morris (default 4)
            self.analyzer = lambda output: morris.analyze(
                self.problem,
                self.input_samples[:, input_ids],
                output,
                conf_level=self.conf_level,
                grid_jump=self.other_parameters.get("grid_jump", 2),
                num_levels=self.other_parameters.get("num_levels", 4),
                num_resamples=self.other_parameters.get("num_resamples", 1000),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        elif self.method.lower() == "dgsm":
            # num_resamples (int): The number of resamples used to compute the confidence intervals (default 1000)
            # conf_level (float): The confidence interval level (default 0.95)
            # print_to_console (bool): Print results directly to console (default False)
            self.analyzer = lambda output: dgsm.analyze(
                self.problem,
                self.input_samples[:, input_ids],
                output,
                conf_level=self.conf_level,
                num_resamples=self.other_parameters.get("num_resamples", 1000),
                print_to_console=self.other_parameters.get(
                    "print_to_console", False))

        else:
            raise_value_error("Method " + str(self.method) +
                              " is not one of the available methods " +
                              str(METHODS) + " !")

        output_names = []
        results = []
        for io in output_ids:
            output_names.append(self.output_names[io])
            results.append(self.analyzer(self.output_values[:, io]))

        # TODO: Adjust list_of_dicts_to_dicts_of_ndarrays to handle ndarray concatenation
        results = list_of_dicts_to_dicts_of_ndarrays(results)

        results.update({"output_names": output_names})

        return results
Пример #29
0
    def __init__(self, name, type, parameters, n_regions=0, n_active_regions=0, n_signals=0, n_times=0,
                 euler_method="backward", observation_model="logpower", observation_expression="x1z_offset"):

        self.n_regions = n_regions
        self.n_active_regions = n_active_regions
        self.n_nonactive_regions = self.n_regions - self.n_active_regions
        self.n_signals = n_signals
        self.n_times = n_times

        if isinstance(name, basestring):
            self.name = name
        else:
            raise_value_error("Statistical model's name " + str(name) + " is not a string!")

        self.parameters = {}
        try:
            for p in ensure_list(parameters):
                if isinstance(p, Parameter):
                    self.parameters.update({p.name: p})
                else:
                    raise_value_error("Not valid Parameter object detected!")
        except:
            raise_value_error("Failed to set StatisticalModel parameters=\n" + str(parameters))
        self.n_parameters = len(self.parameters)

        if np.in1d(type, STATISTICAL_MODEL_TYPES):
            self.type = type
        else:
            raise_value_error("Statistical model's tupe " + str(type) + " is not one of the valid ones: "
                              + str(STATISTICAL_MODEL_TYPES) + "!")

        if np.in1d(euler_method, ["backward", "forward"]):
            self.euler_method = euler_method
        else:
            raise_value_error("Statistical model's euler_method " + str(euler_method) + " is not one of the valid ones: "
                              + str(["backward", "forward"]) + "!")

        if np.in1d(observation_expression, OBSERVATION_MODEL_EXPRESSIONS):
            self.observation_expression = observation_expression
        else:
            raise_value_error("Statistical model's observation expression " + str(observation_expression) +
                              " is not one of the valid ones: "
                              + str(OBSERVATION_MODEL_EXPRESSIONS) + "!")

        if np.in1d(observation_model, OBSERVATION_MODELS):
            self.observation_model = observation_model
        else:
            raise_value_error("Statistical model's observation expression " + str(observation_model) +
                              " is not one of the valid ones: "
                              + str(OBSERVATION_MODELS) + "!")
    def __init__(self,
                 inputs,
                 outputs,
                 method="delta",
                 calc_second_order=True,
                 conf_level=0.95):

        self._set_method(method)
        self._set_calc_second_order(calc_second_order)
        self._set_conf_level(conf_level)

        self.n_samples = []
        self.input_names = []
        self.input_bounds = []
        self.input_samples = []
        self.n_inputs = len(inputs)

        for input in inputs:

            self.input_names.append(input["name"])

            samples = np.array(input["samples"]).flatten()
            self.n_samples.append(samples.size)
            self.input_samples.append(samples)

            self.input_bounds.append(
                input.get("bounds",
                          [samples.min(), samples.max()]))

        if len(self.n_samples) > 0:
            if np.all(np.array(self.n_samples) == self.n_samples[0]):
                self.n_samples = self.n_samples[0]
            else:
                raise_value_error(
                    "Not all input parameters have equal number of samples!: "
                    + str(self.n_samples))

        self.input_samples = np.array(self.input_samples).T

        self.n_outputs = 0
        self.output_values = []
        self.output_names = []

        for output in outputs:

            if output["values"].size == self.n_samples:
                n_outputs = 1
                self.output_values.append(output["values"].flatten())
            else:
                if output["values"].shape[0] == self.n_samples:
                    self.output_values.append(output["values"])
                    n_outputs = output["values"].shape[1]
                elif output["values"].shape[1] == self.n_samples:
                    self.output_values.append(output["values"].T)
                    n_outputs = output["values"].shape[0]
                else:
                    raise_value_error(
                        "Non of the dimensions of output samples: " +
                        str(output["values"].shape) + " matches n_samples = " +
                        str(self.n_samples) + " !")
            self.n_outputs += n_outputs

            if n_outputs > 1 and len(output["names"]) == 1:
                self.output_names += np.array([
                    "%s[%d]" % l
                    for l in zip(np.repeat(output["names"][0], n_outputs),
                                 range(n_outputs))
                ]).tolist()
            else:
                self.output_names += output["names"]

        if len(self.output_values) > 0:
            self.output_values = np.vstack(self.output_values)

        self.problem = {}
        self.other_parameters = {}