Exemple #1
0
    def strategy(self, opponent: Player) -> Action:
        if self.history:
            # Update internal state from the last play
            last_round = (self.history[-1], opponent.history[-1])
            self.s += self.delta[last_round]

        # Compute probability of Cooperation
        p = self.perr + (1.0 - 2 * self.perr) * (
            heaviside(self.s + 1, 1) - heaviside(self.s - 1, 1))
        # Draw action
        action = random_choice(p)
        return action
Exemple #2
0
def lbm_grid():
    _xi, _w, order = lattices[args.lattice].xi, lattices[args.lattice].w, lattices[args.lattice].order
    a, D = fixed.sqr_a, fixed.D
    xi_v = lambda v: np.einsum('il,al', _xi, v)/a
    sqr_xi = np.einsum('il,il->i', _xi, _xi)/a
    sqr = lambda v: np.einsum('al,al,i->ai', v, v, np.ones_like(_w))/a
    weighted = lambda f: np.einsum('i,ai->ai', _w, f)
    weighted_rho = lambda m, f: np.einsum('a,i,ai->ai', m.rho, _w, f)
    xi = lambda v: np.einsum('il,a', _xi, np.ones(v.shape[0])) - np.einsum('i,al', np.ones_like(_w), v)
    Tn = lambda n: 1./np.math.factorial(n) * np.heaviside(order-2*n, 1)

    T1 = lambda m: xi_v(m.vel) * Tn(1)
    T2 = lambda m: ( (xi_v(m.vel))**2 - sqr(m.vel) + np.einsum('a,i', m.temp-1, sqr_xi-D) ) * Tn(2)
    T3 = lambda m: xi_v(m.vel)*( xi_v(m.vel)**2 - 3*sqr(m.vel) + 3*np.einsum('a,i', m.temp-1, sqr_xi-D-2) ) * Tn(3)
    Maxw = lambda m: weighted_rho(m, 1 + T1(m) + T2(m) + T3(m))
    G2 = lambda m: np.einsum('il,im,an,lmn->ai', _xi, _xi, m.tau, hodge)/a * Tn(2)
    G3 = lambda m: xi_v(m.qflow) * (sqr_xi/(fixed.D+2)-1) + G2(m)*xi_v(m.vel) \
        - np.einsum('il,am,an,lmn->ai', _xi, m.vel, m.tau, hodge)/a
    Grad13 = lambda m: Maxw(m) + weighted(G2(m) + G3(m))
    adapter = lambda func, macro: func(to_arr(macro))[from_arr(macro)]

    return Model(
        info = 'LBM: %s' % args.lattice,
        weights = _w,
        xi = lambda vel=zeros: xi(_to_arr(vel))[_from_arr(vel)],
        Maxw = lambda macro: adapter(Maxw, macro),
        Grad13 = lambda macro: adapter(Grad13, macro)
    )
def prob_bh_mass(mass, alpha, m_gap, m_cap):
    part1 = mass**{-alpha}
    part2 = np.heaviside(mass, m_gap)
    part3 = np.exp(-mass / m_cap)

    prob = part1 * part2 * part3

    return prob
Exemple #4
0
 def convert(self, config, tag="?", sigma=0.5, weight=None, typemap=None, laplace_cutoff=0):
     R = config.get_positions()
     T = config.get_chemical_symbols()
     N = R.shape[0]
     if weight is None: weight = np.ones((N,))
     if type(sigma) in [float, np.float64, np.float32]:
         sigma = sigma*np.ones((N,))
     if config.pbc.all(): 
         box = np.array([config.cell[0], config.cell[1], config.cell[2]])
     elif not config.pbc.any(): 
         box = np.zeros((3,3))
     else: 
         raise NotImplementedError("<IO::convert> Partial periodicity not implemented.")
     struct = soap.Structure(tag)
     struct.box = box
     segment = struct.addSegment()
     for i in range(R.shape[0]):
         r = R[i]
         t = T[i]
         particle = struct.addParticle(segment)
         particle.pos = r
         particle.weight = weight[i]
         particle.sigma = sigma[i]
         if typemap is None: particle.type = T[i]
         else:
             colour = typemap[T[i]]
             for channel_idx, c in enumerate(colour):
                 particle.addType(typemap["channels"][channel_idx], c)
     if laplace_cutoff > 0:
         D = partition.calculate_distance_mat(R, R)
         L = 1*partition.calculate_connectivity_mat(D, T)
         np.fill_diagonal(L, 0)
         L_out = np.copy(L)
         L_visited = np.copy(L)
         for lcut in range(2, laplace_cutoff+1):
             dL_visited = np.heaviside(np.heaviside(L_visited.dot(L),0) - L_visited, 0)
             np.fill_diagonal(dL_visited, 0)
             L_out = L_out + lcut*dL_visited
             L_visited = L_visited + dL_visited
         L_out = L_out + 2*laplace_cutoff*(1-L_visited)
         np.fill_diagonal(L_out, 0)
         struct.setLaplacian(L_out, str(L_out.dtype))
     return struct
Exemple #5
0
    def __call__(self, x):
        """
        Evaluates the spline function.
        :param x: one dimensional array with values where the spline will be evaluated
        :return: spline value at x
        """
        # \sum_{j=0}^n \beta_{oj} x^j
        x_powers = np.zeros((self.n + 1, len(x)))
        x_powers[0] = x
        for i in range(1, self.n):
            # i is a row
            x_powers[i] = x_powers[i-1] * x
        sx = np.matmul(self.beta_oj.reshape((1, len(self.beta_oj))), x_powers)

        # \sum_{i=1}^K \beta_{in}(x - t_i)^n_+
        knot_terms = np.zeros((self.k, len(x)))
        for i in range(0, self.k - 1):
            knot_term = np.power(x - self.ti[i], self.n)
            knot_terms[i] = knot_term * np.heaviside(knot_term, 0) # heaviside is the step function
        sx += np.matmul(self.beta_in, knot_terms)

        return sx.flatten()
Exemple #6
0
 def relu_prime(self, Z):
     return np.heaviside(Z, 1)
Exemple #7
0
np.random.seed(123)

with open("results/nngp_depth" + str(depth) + ".json", "r") as fp:
    stride = json.load(fp)["stride"]

print("Loading predictive mean and variance")
with h5py.File("results/nngp_pred_depth" + str(depth) + ".h5", "r") as f:
    Y_mean = np.array(f["/mean"])
    Y_std = np.expand_dims(np.sqrt(np.array(f["/var"])), 1)

with h5py.File("/raid/ChestXRay14/chestxray14_1024.h5", "r") as hf:
    Y_test = np.array(hf["labels/test"][::stride]).astype(np.int32)

print("Computing metrics")
Y_pred_hard = np.heaviside(Y_mean, np.random.randint(0, 2, size=Y_mean.shape))
Y_pred_soft = uq.Phi(Y_mean / Y_std)
accs = np.mean(np.equal(Y_test, Y_pred_hard), axis=0)
aucs = np.array([
    roc_auc_score(Y_test[:, j], Y_pred_soft[:, j])
    for j in range(Y_test.shape[1])
])

print("Saving results")
with open("../headers.json", "r") as fp:
    headers = json.load(fp)
data = pd.DataFrame.from_dict({
    "condition": headers,
    "acc": accs,
    "auc": aucs
})[["condition", "acc", "auc"]]
Exemple #8
0
    def solve(self, max_iteration=50, tolerance=1e-5, message=False):
        """

        Args:
            max_iteration:
            tolerance:
            message:

        Returns:

        """
        convergence_flag = False

        for iteration_index in range(max_iteration):
            # variable 1 estimation
            h = self.r1 * self.q1_hat
            self.x1_hat = utils.update_dumping(
                old_x=self.x1_hat,
                new_x=np.heaviside(np.abs(h) - self.l, 0.5) *
                (h - self.l * np.sign(h)) / self.q1_hat,
                dumping_coefficient=self.dumping)

            # self.chi1 = self.clip(np.heaviside(np.abs(h) - self.l, 0.5) / self.q1_hat)
            self.chi1 = utils.update_dumping(
                old_x=self.chi1,
                new_x=self.clip(
                    np.heaviside(np.abs(h) - self.l, 0.5) / self.q1_hat),
                dumping_coefficient=self.dumping)

            self.eta1 = 1.0 / self.chi1

            # message from 1 to 2
            self.q2_hat = self.clip(self.eta1 - self.q1_hat)
            self.r2 = (self.eta1 * self.x1_hat -
                       self.q1_hat * self.r1) / self.q2_hat

            # variable 2 estimation
            temp = np.linalg.pinv(np.diag(self.q2_hat) + self.J)
            self.x2_hat = temp @ (self.y_tilde + self.q2_hat * self.r2)
            self.chi2 = self.clip(np.diag(temp))
            self.eta2 = 1.0 / self.chi2

            # message from 2 to 1
            # self.q1_hat = self.clip(self.eta2 - self.q2_hat)
            self.q1_hat = utils.update_dumping(
                old_x=self.q1_hat,
                new_x=self.clip(self.eta2 - self.q2_hat),
                dumping_coefficient=self.dumping)
            # self.r1 = (self.eta2 * self.x2_hat - self.q2_hat * self.r2) / self.q1_hat
            self.r1 = utils.update_dumping(
                old_x=self.r1,
                new_x=(self.eta2 * self.x2_hat - self.q2_hat * self.r2) /
                self.q1_hat,
                dumping_coefficient=self.dumping)

            # check convergence
            diff_x = np.linalg.norm(self.x1_hat - self.x2_hat) / np.sqrt(
                self.N)
            diff_chi = np.linalg.norm(self.chi1 - self.chi2) / np.sqrt(self.N)

            if max(diff_x, diff_chi) < tolerance and iteration_index > 1:
                convergence_flag = True
                break
def OnevsOne(X_train, X_test, y, y_actual, learning_parameter, num_classes,
             split):

    labels = np.unique(y).astype('str')
    new_models = [[] for i in range(int(num_classes * (num_classes - 1) / 2))]
    binary_class_models = [
        [] for i in range(int(num_classes * (num_classes - 1) / 2))
    ]
    binary_class_labels = [
        [] for i in range(int(num_classes * (num_classes - 1) / 2))
    ]

    for i in range(len(new_models)):
        new_models[i] = np.where(y == i + 1, 1, 0)

    tempargs = [[] for i in range(int(num_classes * (num_classes - 1) / 2))]

    i = 0
    for p in range(1, num_classes):
        for q in range(p):
            binary_class_labels[i] = labels[q] + labels[p]
            binary_class_models[i] = np.vstack((new_models[q], new_models[p]))
            i += 1

    for m, model in enumerate(binary_class_models):
        for arg in range(model.shape[1]):
            if model[0][arg] == model[1][arg]:
                tempargs[m].append(arg)

    binary_class_models = [model[1] for model in binary_class_models]

    new_models = binary_class_models
    predictions = [[] for i in range(num_classes)]
    probabilities = [[] for i in range(num_classes)]
    class_predictions = [[] for i in range(num_classes)]

    y_pred = []

    for pred in range(len(predictions)):
        print("Binary Class: ", binary_class_labels[pred])
        weights = np.random.rand(7)

        y_train = new_models[pred][:split]
        y_test = new_models[pred][split:]

        for i in range(1000):
            weights = update(X_train, weights, y_train, learning_parameter)

        class_labels = list(map(int, binary_class_labels[pred]))
        probabilities[pred] = sigmoid(
            np.sum(np.multiply(X_test, weights), axis=1))
        predictions[pred] = np.heaviside((probabilities[pred] - 0.5),
                                         0).astype(int)
        print("Accuracy for Class", binary_class_labels[pred], ": ",
              accuracy_score(y_test, predictions[pred]), "\n")
        class_predictions[pred] = [
            class_labels[label] for label in predictions[pred]
        ]

    y_pred = stats.mode(class_predictions)[0][-1]
    print("Overall Accuracy: ", accuracy_score(y_actual, y_pred))
    print(confusion_matrix(y_actual, y_pred))
Exemple #10
0
def docalc(args, data, len_data, sims, len_sims, error):
    """
	# Fitness Calculation Template:
	if set(args.error).issuperset(set(['the-acronysm'])):
		1. func = 0

		2. func = an algebraic expression combining the data average (data_avrg), data standard deviation (data_stdv), simulation average (sims_stdv),
		simulation standard deviation (sims_stdv), single experimental files (data.loc[i]), and/or simulation files (sims.loc[j])
		Note1: Perform two for-loops if using data.loc[i] and sims.loc[j].
		Note2: Please consider these variables are DataFrames, meaning that multiplication and division are methods (e.g. df1.division(df2))

		3. Drop NaN values (from experimental time points without simulated values, or simulated values without experimental data)
		with dropna(axis = 0, how = 'all').dropna(axis = 1, how = 'all'). Also transform Inf values with replace([numpy.inf, -numpy.inf], numpy.nan)

		4. Sum the two dimensions, and return a 6 float points scientific notation number (0 float points for statistical tests):
		error['the-acronysm'] = '{:.6e}'.format(func.dropna(axis = 0, how = 'all').dropna(axis = 1, how = 'all').sum().sum())
	"""

    if args.do_all:
        args.error = [
            'SDA', 'ADA', 'SSQ', 'CHISQ', 'MNSE', 'PWSD', 'APWSD', 'NPWSD',
            'ANPWSD', 'MWUT', 'WMWET', 'TOST', 'DUT'
        ]
        """
		SDA    : Squared Difference of Averages
		ADA    : Absolute Difference of Averages
		SSQ    : Sum of SQuares
		CHISQ  : Chi-Square (Differences divided by data standard deviation)
		MNSE   : Mean Normalized Square Error (Differences divided by data average)
		PWSD   : Pair-Wise Square Deviation
		APWSD  : Absolute Pair-Wise Deviation
		NPWSD  : Normalized Pair-Wise Square Deviation
		ANPWSD : Absolute Normalized Pair-Wise Deviation
		MWUT   : Mann-Whitney U-test (Mann and Whitney, 1947, DOI 10.1214/aoms/1177730491)
		WMWET  : Wellek's Mann-Whitney Equivalence Test (Wellek 1996, DOI 10.1002/bimj.4710380608)
		TOST   : Two one-sided t-tests (Dunnet and Gent, 1977, DOI 10.2307/2529457, as well other authors)
		DUT    : Double Mann-Whitney U-tests (Reviewed in Cornell, 1990, DOI 10.1080/03610929008830433)

		More information in https://pleione.readthedocs.io/en/latest/ObjectiveFunctions.html
		"""

        data_avrg = doavrg(data, len_data)
        data_stdv = dostdv(data, len_data)

        sims_avrg = doavrg(sims, len_sims)
        sims_stdv = dostdv(sims, len_sims)

    # former mean square error, now square difference of means
    if set(args.error).issuperset(set(['SDA'])) or set(args.error).issuperset(
            set(['MSE'])):
        func = 0

        if not args.do_all:
            data_avrg = doavrg(data, len_data)
            sims_avrg = doavrg(sims, len_sims)

        func = (data_avrg - sims_avrg)**2

        error['SDA'] = '{:.6e}'.format(
            func.dropna(axis=0, how='all').dropna(axis=1,
                                                  how='all').sum().sum())

    # former mean absolute error, now absolute value of the difference of means
    if set(args.error).issuperset(set(['ADA'])) or set(args.error).issuperset(
            set(['MAE'])):
        func = 0

        if not args.do_all:
            data_avrg = doavrg(data, len_data)
            sims_avrg = doavrg(sims, len_sims)

        func = abs(data_avrg - sims_avrg)

        error['ADA'] = '{:.6e}'.format(
            func.dropna(axis=0, how='all').dropna(axis=1,
                                                  how='all').sum().sum())

    # sum of squares (from BioNetFit paper)
    if set(args.error).issuperset(set(['SSQ'])):
        func = 0

        for i in range(len_data):
            for j in range(len_sims):
                func += (data.loc[i] - sims.loc[j])**2

        error['SSQ'] = '{:.6e}'.format(
            func.dropna(axis=0, how='all').dropna(axis=1,
                                                  how='all').sum().sum())

    # chi-square (from BioNetFit paper)
    if set(args.error).issuperset(set(['CHISQ'])):
        func = 0

        if not args.do_all:
            data_stdv = dostdv(data, len_data)

        for i in range(len_data):
            for j in range(len_sims):
                func += ((data.loc[i] - sims.loc[j]).divide(data_stdv))**2

        error['CHISQ'] = '{:.6e}'.format(
            func.dropna(axis=0, how='all').dropna(axis=1,
                                                  how='all').sum().sum())

    # mean normalized square error (from BioNetFit paper)
    if set(args.error).issuperset(set(['MNSE'])):
        func = 0

        if not args.do_all:
            data_avrg = doavrg(data, len_data)

        for i in range(len_data):
            for j in range(len_sims):
                func += ((data.loc[i] - sims.loc[j]).divide(data_avrg))**2

        error['MNSE'] = '{:.6e}'.format(
            func.replace([numpy.inf, -numpy.inf], numpy.nan).dropna(
                axis=0, how='all').dropna(axis=1, how='all').sum().sum())

    # pair-wise square deviation
    if set(args.error).issuperset(set(['PWSD'])):
        func = 0

        for i in range(len_data):
            for j in range(len_sims):
                func += ((data.loc[i] - sims.loc[j])**2).divide(len_data *
                                                                len_sims)

        error['PWSD'] = '{:.6e}'.format(
            func.dropna(axis=0, how='all').dropna(axis=1,
                                                  how='all').sum().sum())

    # pair-wise absolute deviation
    if set(args.error).issuperset(set(['APWSD'])):
        func = 0

        for i in range(len_data):
            for j in range(len_sims):
                func += (abs(data.loc[i] - sims.loc[j])).divide(len_data *
                                                                len_sims)

        error['APWSD'] = '{:.6e}'.format(
            func.dropna(axis=0, how='all').dropna(axis=1,
                                                  how='all').sum().sum())

    # normalized pair-wise square deviation (also implemented in BioNetFit as equation 3, but not normalized by the number of data * sims)
    if set(args.error).issuperset(set(['NPWSD'])):
        func = 0

        for i in range(len_data):
            for j in range(len_sims):
                func += (((data.loc[i] - sims.loc[j]).divide(
                    data.loc[i]))**2).divide(len_data * len_sims)

        error['NPWSD'] = '{:.6e}'.format(
            func.replace([numpy.inf, -numpy.inf], numpy.nan).dropna(
                axis=0, how='all').dropna(axis=1, how='all').sum().sum())

    # normalized pair-wise absolute deviation
    if set(args.error).issuperset(set(['ANPWSD'])):
        func = 0

        for i in range(len_data):
            for j in range(len_sims):
                func += (abs((data.loc[i] - sims.loc[j]).divide(
                    data.loc[i]))).divide(len_data * len_sims)

        error['ANPWSD'] = '{:.6e}'.format(
            func.replace([numpy.inf, -numpy.inf], numpy.nan).dropna(
                axis=0, how='all').dropna(axis=1, how='all').sum().sum())
    """
	Wellek's Mann-Whitney Equivalence Test.
	Based on mawi.R script from the EQUIVNONINF package
	modifications done to perform the test "vectorized"
	(it compares two matrices; the first has all exp data, the second all the simulations)
	"""
    if set(args.error).issuperset(set(['WMWET'])):
        from scipy.stats import ncx2
        # useful variables (namespace identical to mawi.R script)
        m = len_data  # x = data
        n = len_sims  # y = sims
        eps1_ = .3129  # Wellek's paper
        eps2_ = .2661  # Wellek's paper
        eqctr = 0.5 + (eps2_ - eps1_) / 2
        eqleng = eps1_ + eps2_

        # estimators needed for calculations
        wxy = pandas.DataFrame(index=sims.loc[0].index,
                               columns=sims.loc[0].columns).fillna(0)
        pihxxy = pandas.DataFrame(index=sims.loc[0].index,
                                  columns=sims.loc[0].columns).fillna(0)
        pihxyy = pandas.DataFrame(index=sims.loc[0].index,
                                  columns=sims.loc[0].columns).fillna(0)
        sigmah = pandas.DataFrame(index=sims.loc[0].index,
                                  columns=sims.loc[0].columns).fillna(0)

        # ŷ estimator (wxy in mawi.R)
        # equation 1.2 from Wellek 1996 paper
        # for (i in 1:m) for (j in 1:n) wxy <- wxy + trunc(0.5 * (sign(x[i] - y[j]) + 1))
        for i in range(m):
            for j in range(n):
                diff = (data.loc[i] - sims.loc[j])
                diff = diff.dropna(axis=0, how='all').dropna(axis=1, how='all')
                diff = diff.apply(numpy.sign)
                diff = diff + 1
                diff = diff.multiply(0.5)
                diff = diff.apply(numpy.trunc)
                # add to ŷ (wxy in mawi.R)
                wxy += diff

        # yFFG estimator (pihxxy in mawi.R)
        # equation 2.5a from Wellek 1996 paper
        #for (i1 in 1:(m - 1)) for (i2 in (i1 + 1):m) for (j in 1:n) pihxxy <- pihxxy + trunc(0.5 * (sign(min(x[i1], x[i2]) - y[j]) + 1))
        for xi1 in range(m - 1):
            for xi2 in range(xi1 + 1, m):
                for xj in range(n):
                    diff = data.loc[xi1].where(data.loc[xi1] < data.loc[xi2],
                                               data.loc[xi2]) - sims.loc[xj]
                    diff = diff.dropna(axis=0, how='all').dropna(axis=1,
                                                                 how='all')
                    diff = diff.apply(numpy.sign)
                    diff = diff + 1
                    diff = diff.multiply(0.5)
                    diff = diff.apply(numpy.trunc)
                    # add to yFGG (pihxxy in mawi.R)
                    pihxxy += diff

        # yFGG estimator (pihxyy in mawi.R)
        # equation 2.5b from Wellek 1996 paper
        # for (i in 1:m) for (j1 in 1:(n - 1)) for (j2 in (j1 + 1):n) pihxyy <- pihxyy + trunc(0.5 * (sign(x[i] - max(y[j1], y[j2])) + 1))
        for xi in range(m):
            for xj1 in range(n - 1):
                for xj2 in range(xj1 + 1, n):
                    diff = (data.loc[xi] - sims.loc[xj1].where(
                        sims.loc[xj1] > sims.loc[xj2], sims.loc[xj2]))
                    diff = diff.dropna(axis=0, how='all').dropna(axis=1,
                                                                 how='all')
                    diff = diff.apply(numpy.sign)
                    diff = diff + 1
                    diff = diff.multiply(0.5)
                    diff = diff.apply(numpy.trunc)
                    # add to yFGG (pihxyy in mawi.R)
                    pihxyy += diff

        # in equation 1.2
        wxy = wxy.divide(m * n)
        # in equation 2.5a, inverse of (m choose 2 = 0.5 * (m-1) * m), then divided by n
        pihxxy = pihxxy.multiply(2).divide(m * (m - 1) * n)
        # in equation 2.5b, inverse of (n choose 2 = 0.5 * (n-1) * n), then divided by m
        pihxyy = pihxyy.multiply(2).divide(n * (n - 1) * m)

        # variance estimator sigmah (same name as in mawi.R)
        # equation 2.6 from Wellek 1996 paper
        # sigmah <- sqrt((wxy - (m + n - 1) * wxy^2 + (m - 1) * pihxxy + (n - 1) * pihxyy)/(m * n))
        sigmah = wxy - (wxy**2).multiply(m + n - 1) + pihxxy.multiply(
            m - 1) + pihxyy.multiply(n - 1)
        sigmah = sigmah.divide(m * n)
        sigmah = sigmah**0.5

        # critical value
        # right hand of inequality 2.8 from Wellek 1996 paper
        phi = ((eqleng / 2) / sigmah)**2
        # crit <- sqrt(qchisq(alpha, 1, (eqleng/2/sigmah)^2))
        # Ca(phi) is the square root of the alpha-th quantile of the chi2-distribution with a single degree of freedom and non-centrality parameter phi square
        crit = pandas.DataFrame(data=ncx2.ppf(0.05, 1, phi),
                                index=sims.loc[0].index,
                                columns=sims.loc[0].columns)**.5

        # compare with Z
        # left hand side of the inequality 2.8 from Wellek 1996 paper
        Z = abs((wxy - eqctr).divide(sigmah))
        z = Z.copy(deep=True)
        """
		we want to maximize the amount of true alternative hypotheses, so
		we purposely changed the values to use the Wellek's test as an objective function to minimize
		"""
        # test the inequality 2.8 from Wellek 1996 paper
        # the test cannot reject null hypothesis: P[X-Y] < .5 - e1 or P[X-Y] > .5 + e2
        Z[z >= crit] = +1.0
        # the null hypothesis is rejected, therefore .5 - e1 < P[X-Y] < .5 + e2
        Z[z < crit] = +0.0

        if args.report:
            print('wxy estimator:\n', wxy, '\n')
            print('pihxxy estimator:\n', pihxxy, '\n')
            print('pihxyy estimator:\n', pihxyy, '\n')
            print('sigmah estimator:\n', sigmah, '\n')
            print('phi matrix:\n', phi, '\n')
            print('critical values:\n', crit, '\n')
            print('Z estimator: \n', Z, '\n')
            print(
                'Wellek\'s test matrix: a zero means data and simulations are equivalents within the threshold\n',
                Z)

        error['WMWET'] = '{:.0f}'.format(Z.sum().sum())

    # the same as WMWET, but as identical as the Wellek's paper (look for the heaviside function)
    if set(args.error).issuperset(set(['WMWET_paper'])):
        from scipy.stats import ncx2

        eps1_ = .3129  # Wellek's paper
        eps2_ = .2661  # Wellek's paper
        eqctr = 0.5 + (eps2_ - eps1_) / 2
        eqleng = eps1_ + eps2_

        # estimators needed for calculations
        wxy = pandas.DataFrame(index=y.loc[0].index,
                               columns=y.loc[0].columns).fillna(0)
        pihxxy = pandas.DataFrame(index=y.loc[0].index,
                                  columns=y.loc[0].columns).fillna(0)
        pihxyy = pandas.DataFrame(index=y.loc[0].index,
                                  columns=y.loc[0].columns).fillna(0)
        sigmah = pandas.DataFrame(index=y.loc[0].index,
                                  columns=y.loc[0].columns).fillna(0)

        # ŷ estimator (wxy in mawi.R)
        # for (i in 1:m) for (j in 1:n) wxy <- wxy + trunc(0.5 * (sign(x[i] - y[j]) + 1))
        for i in range(m):
            for j in range(n):
                diff = (x.loc[i] - y.loc[j]).dropna(axis=0, how='all').dropna(
                    axis=1, how='all')
                wxy += numpy.heaviside(diff, 0)

        # yFFG estimator (pihxxy in mawi.R)
        #for (i1 in 1:(m - 1)) for (i2 in (i1 + 1):m) for (j in 1:n) pihxxy <- pihxxy + trunc(0.5 * (sign(min(x[i1], x[i2]) - y[j]) + 1))
        for xi1 in range(m - 1):
            for xi2 in range(xi1 + 1, m):
                for xj in range(n):
                    diff1 = (x.loc[xi1] - y.loc[xj]).dropna(
                        axis=0, how='all').dropna(axis=1, how='all')
                    diff2 = (x.loc[xi2] - y.loc[xj]).dropna(
                        axis=0, how='all').dropna(axis=1, how='all')
                    pihxxy += numpy.heaviside(diff1, 0) * numpy.heaviside(
                        diff2, 0)

        # yFGG estimator (pihxyy in mawi.R)
        # for (i in 1:m) for (j1 in 1:(n - 1)) for (j2 in (j1 + 1):n) pihxyy <- pihxyy + trunc(0.5 * (sign(x[i] - max(y[j1], y[j2])) + 1))
        for xi in range(m):
            for xj1 in range(n - 1):
                for xj2 in range(xj1 + 1, n):
                    diff1 = (x.loc[xi] - y.loc[xj1]).dropna(
                        axis=0, how='all').dropna(axis=1, how='all')
                    diff2 = (x.loc[xi] - y.loc[xj2]).dropna(
                        axis=0, how='all').dropna(axis=1, how='all')
                    pihxyy += numpy.heaviside(diff1, 0) * numpy.heaviside(
                        diff2, 0)

        #
        wxy = wxy.divide(m * n)
        pihxxy = pihxxy.multiply(2).divide(m * (m - 1) * n)
        pihxyy = pihxyy.multiply(2).divide(n * (n - 1) * m)

        # variance estimator sigmah (same name as in mawi.R)
        # sigmah <- sqrt((wxy - (m + n - 1) * wxy^2 + (m - 1) * pihxxy + (n - 1) * pihxyy)/(m * n))
        sigmah = wxy - (wxy**2).multiply(m + n - 1) + pihxxy.multiply(
            m - 1) + pihxyy.multiply(n - 1)
        sigmah = sigmah.divide(m * n)
        sigmah = sigmah**0.5

        # critical value
        # crit <- sqrt(qchisq(alpha, 1, (eqleng/2/sigmah)^2))
        phi = (eqleng / 2 / sigmah)**2
        crit = pandas.DataFrame(data=ncx2.ppf(0.05, 1, phi),
                                index=y.loc[0].index,
                                columns=y.loc[0].columns)**.5

        # compare with Z
        Z = abs((wxy - eqctr).divide(sigmah))
        z = Z.copy(deep=True)
        Z[z <
          crit] = +0.0  # the null hypothesis is rejected, therefore .5 - e1 < P[X-Y] < .5 + e2
        Z[z >=
          crit] = +1.0  # the test cannot reject the null hypothesis: P[X-Y] < .5 - e1 or P[X-Y] > .5 + e2

        if args.report:
            print('wxy estimator:\n', wxy, '\n')
            print('pihxxy estimator:\n', pihxxy, '\n')
            print('pihxyy estimator:\n', pihxyy, '\n')
            print('sigmah estimator:\n', sigmah, '\n')
            print('phi matrix:\n', phi, '\n')
            print('critical values:\n', crit, '\n')
            print('Z estimator: \n', Z, '\n')
            print(
                'Wellek\'s test matrix: a zero means data and simulations are equivalents within the threshold\n',
                Z)

        error['WMWET_paper'] = '{:.0f}'.format(Z.sum().sum())

    if set(args.error).issuperset(set(['TOST'])):
        print(
            "WARNING: data and/or simulations not necessarily are normal distributions."
        )
        print(
            "As a test-bed, we consider data and simulations have unequal standard deviations"
        )
        print(
            "See https://www.statsmodels.org/devel/generated/statsmodels.stats.weightstats.ttost_ind.html for more information"
        )
        from statsmodels.stats.weightstats import ttost_ind

        if not args.do_all:
            data_stdv = dostdv(data, len_data)

        # reshape data and sims to allow calculate the test in a for-loop
        tost_sims = numpy.dstack([sims.loc[x] for x in range(len_sims)])
        # since we operate numpy arrays without labels, we must ensure sims and data indexes and columns have the same order
        index = data.loc[0].index
        columns = data.loc[0].columns
        tost_data = numpy.dstack([
            data.loc[x].reindex(columns=columns, index=index)
            for x in range(len_data)
        ])

        p = numpy.zeros((len(data_stdv.index), len(data_stdv.columns)))
        row = 0
        for x, y, lim in zip(tost_sims, tost_data, data_stdv.values):
            for col, _ in enumerate(data_stdv.columns):
                p[row, col] = ttost_ind(x[col], y[col], -lim[col],
                                        +lim[col])[0]
            row += 1

        # transform matrix of p-values into a non-rejection DataFrame (if p-value less than 5% -> rejects, but set to zero)
        p = pandas.DataFrame(index=index, columns=columns, data=p)
        P = p.copy(deep=True)
        P[p >= .05] = +1.0
        P[p < .05] = +0.0

        if args.report:
            print(
                'Two one-sided t-tests matrix: a zero means data and simulations are equivalents within one standard deviation threshold\n',
                P)

        error['TOST'] = '{:.0f}'.format(P.sum().sum())

    # Mann-Whitney U-test
    def mwut(data, sims, alternative):
        ucrit = pandas.read_csv(args.crit,
                                sep=None,
                                engine='python',
                                header=0,
                                index_col=0)
        udata = pandas.DataFrame(index=sims.loc[0].index,
                                 columns=sims.loc[0].columns).fillna(0)
        usims = pandas.DataFrame(index=sims.loc[0].index,
                                 columns=sims.loc[0].columns).fillna(0)

        for i in range(len_data):
            for j in range(len_sims):
                Diff = (data.loc[i] - sims.loc[j]).dropna(
                    axis=0, how='all').dropna(axis=1, how='all')
                diff = Diff.copy(deep=True)
                # transform data
                # if data < sims, count -1.0
                Diff[diff < 0] = -1.0
                # if data > sims, count +1.0
                Diff[diff > 0] = +1.0
                # if data = sims, count +0.5
                Diff[diff == 0] = +0.5
                # count how many times is data < sims (udata and usims are complementary)
                diff = Diff.copy(deep=True)
                udata += Diff[diff == -1.0].fillna(0).divide(-1) + Diff[
                    diff == +0.5].fillna(0)
                usims += Diff[diff == +1.0].fillna(0).divide(+1) + Diff[
                    diff == +0.5].fillna(0)

        if alternative == 'two-sided':
            # bigU is max(udata, usims), where udata and usims are DataFrames
            bigU = udata.where(udata >= usims).fillna(
                usims.where(usims >= udata))
        if alternative == 'less':
            bigU = udata
        if alternative == 'greater':
            bigU = usims

        U = len_data * len_sims - bigU
        u = U.copy(deep=True)
        # U is significant if it is less than or equal to a critical value
        U[u <= ucrit.loc[len_sims, str(len_data)]] = +1.0
        U[u > ucrit.loc[len_sims, str(len_data)]] = +0.0

        if args.report:
            print('U-estimator for data\n', udata, '\n')
            print('U-estimator for sims\n', usims, '\n')
            if alternative == 'two-sided':
                print(
                    'U-test matrix: A one means data and sims are differents\n',
                    U, '\n')
            if alternative == 'less':
                print(
                    'U-test matrix: A one means data is smaller than sims (shifted to the right)\n',
                    U, '\n')
            if alternative == 'greater':
                print(
                    'U-test matrix: A one means data is greater than sims (shifted to the left)\n',
                    U, '\n')

        return '{:.0f}'.format(U.sum().sum()), U

    if set(args.error).issuperset(set(['MWUT'])):
        if (len_data >= 3 and len_sims >= 3):
            error['MWUT'] = mwut(data, sims, 'two-sided')[0]
        else:
            error['MWUT'] = str(numpy.nan)

    if set(args.error).issuperset(set(['DUT'])):
        if (len_data >= 3 and len_sims >= 3):
            # set what the user wants
            if args.lower is not None and args.upper is None:
                args.upper = args.lower  # symmetric equivalence interval
            if args.lower is None and args.upper is not None:
                args.lower = args.upper  # symmetric equivalence interval

            if args.lower is None and args.upper is None:
                if not args.do_all:
                    if args.stdv == 'sims':
                        lower = upper = dostdv(sims, len_sims)
                    else:
                        lower = upper = dostdv(data, len_data)
                else:
                    if args.stdv == 'sims':
                        lower = upper = sims_stdv
                    else:
                        lower = upper = data_stdv

            # divide by factor
            lower = lower / float(args.factor)
            upper = upper / float(args.factor)

            # copy simulations to a temporary variable
            tmp = sims

            # test lower limit
            new_sims = []
            for i in range(len_sims):
                new_sims.append(tmp.loc[i] - lower)
            sims = pandas.concat(new_sims, keys=range(len_sims))

            # test data > sims - lower with one-tail U-test
            LB = mwut(data, sims, 'greater')[1]

            # test upper limit
            new_sims = []
            for i in range(len_sims):
                new_sims.append(tmp.loc[i] + upper)
            sims = pandas.concat(new_sims, keys=range(len_sims))

            # test data < sims + upper with one
            UB = mwut(data, sims, 'less')[1]

            # rejection DataFrame (U-test report with ones true alternative hypotheses)
            # both one-sided tests should reject the null hypotheses
            U = LB * UB
            # However, we minimize the number of non-rejected null hypotheses
            # transform U into a non-rejection DataFrame.
            U = numpy.logical_xor(U.values, 1).astype(int)
            U = pandas.DataFrame(index=LB.index, columns=LB.columns, data=U)

            if args.report:
                print(
                    'Double U-test matrix: 1.0 means data and sims are not equivalents if sims are shifted:\n',
                    U, '\n')

            error['DUT'] = '{:.0f}'.format(U.sum().sum())

        else:
            error['DUT'] = str(numpy.nan)
]

Forces = Forces_case2

for i in Forces:
    i[1] = i[1] * 10**(-3)

momentlist = []
shearlist = []

poslist = []
for pos in np.linspace(0, L_HAMRAC, int(L_HAMRAC * step) + 1):
    shear = 0
    moment = 0
    for i in range(len(Forces)):
        shear += Forces[i][0] * np.heaviside(pos - Forces[i][1], 0.5)
        moment += Forces[i][0] * (pos - Forces[i][1]) * np.heaviside(
            pos - Forces[i][1], 0.5)

    momentlist.append(moment)
    shearlist.append(shear)
    poslist.append(pos)

maxshear = max(max(shearlist), abs(min(shearlist)))
maxmoment = max(max(momentlist), abs(min(momentlist)))

a = W_airframe / 2
b = H_airframe / 2 + 0.2
thetalist = np.linspace(90, 270, 180) / 180 * np.pi

## creating the profile, only the left side
Exemple #12
0
def theta(x):
    return np.heaviside(x, 0)
Exemple #13
0
 def s(mjtj):
     return k * mjtj[1] * np.heaviside(tn - mjtj[0], 0.5) * np.exp(
         k * (mjtj[0] - tn))
Exemple #14
0
N = 3

Ginkq = np.eye(N, N, k=1) * topkq + np.eye(
    N, N, k=-1) * botkq + innkq * np.eye(N, N) - d

Gink = np.eye(N, N, k=1) * topk + np.eye(N, N, k=-1) * botk + innk * np.eye(
    N, N) - d

Grkq = np.linalg.inv(Ginkq)
Gakq = np.transpose(np.conj(Grkq))

Grk = np.linalg.inv(Gink)
Gak = np.transpose(np.conj(Grk))

fer = np.heaviside(-(d + np.eye(N, N) * (om - mu)), 0)

in1 = np.matmul(Grkq, np.matmul(Grk, np.matmul(fer, Gak)))
in2 = np.matmul(Grkq, np.matmul(fer, np.matmul(Gakq, Gak)))


@numba.cuda.jit(device=True)
def ds(kx, ky, qx, qy, om, d):

    topkq = -complex(0, 1) * V0 * ((kx + qx) - complex(0, 1) * (ky + qy))
    botkq = complex(0, 1) * V0 * ((kx + qx) + complex(0, 1) * (ky + qy))
    innkq = om + complex(0, 1) * Gamm - A * ((kx + qx)**2 + (ky + qy)**2) - V2

    topk = -complex(0, 1) * V0 * (kx - complex(0, 1) * ky)
    botk = complex(0, 1) * V0 * (kx + complex(0, 1) * ky)
    innk = om + complex(0, 1) * Gamm - A * (kx**2 + ky**2) - V2
Exemple #15
0
def generate_artificial_data(nTrials=2,
                             nChannels=2,
                             equidistant=True,
                             seed=None,
                             overlapping=False,
                             inmemory=True,
                             dimord="default"):
    """
    Create :class:`~syncopy.AnalogData` object with synthetic harmonic signal(s)

    Parameters
    ----------
    nTrials : int
        Number of trials to populate synthetic data object with
    nChannels : int
        Number of channels to populate synthetic object with
    equidistant : bool
        If `True`, trials of equal length are defined
    seed : None or int
        If `None`, imposed noise is completely random. If `seed` is an integer,
        it is used to fix the (initial) state of NumPy's random number generator
        :func:`numpy.random.default_rng`, i.e., objects created wtih same `seed`
        will be populated with identical artificial signals.
    overlapping : bool
        If `True`, constructed trials overlap
    inmemory : bool
        If `True`, the full `data` array (all channels across all trials) is allocated
        in memory (fast but dangerous for large arrays), otherwise the output data
        object's corresponding backing HDF5 file in `__storage__` is filled with
        synthetic data in a trial-by-trial manner (slow but safe even for very
        large datasets).
    dimord : str or list
        If `dimord` is "default", the constructed output object uses the default
        dimensional layout of a standard :class:`~syncopy.AnalogData` object.
        If `dimord` is a list (i.e., ``["channel", "time"]``) the provided sequence
        of dimensions is used.

    Returns
    -------
    out : :class:`~syncopy.AnalogData` object
        Syncopy :class:`~syncopy.AnalogData` object with specified properties
        populated with a synthetic multivariate trigonometric signal.

    Notes
    -----
    This is an auxiliary method that is intended purely for internal use. Thus,
    no error checking is performed.

    Examples
    --------
    Generate small artificial :class:`~syncopy.AnalogData` object in memory

    .. code-block:: python

        >>> iAmSmall = generate_artificial_data(nTrials=5, nChannels=10, inmemory=True)
        >>> iAmSmall
        Syncopy AnalogData object with fields

                    cfg : dictionary with keys ''
                channel : [10] element <class 'numpy.ndarray'>
              container : None
                   data : 5 trials of length 3000 defined on [15000 x 10] float32 Dataset of size 0.57 MB
                 dimord : 2 element list
               filename : /Users/pantaray/.spy/spy_158f_4d4153e3.analog
                   mode : r+
             sampleinfo : [5 x 2] element <class 'numpy.ndarray'>
             samplerate : 1000.0
                    tag : None
                   time : 5 element list
              trialinfo : [5 x 0] element <class 'numpy.ndarray'>
                 trials : 5 element iterable

        Use `.log` to see object history

    Generate artificial :class:`~syncopy.AnalogData` object of more substantial
    size on disk

    .. code-block:: python

        >>> iAmBig = generate_artificial_data(nTrials=50, nChannels=1024, inmemory=False)
        >>> iAmBig
        Syncopy AnalogData object with fields

                    cfg : dictionary with keys ''
                channel : [1024] element <class 'numpy.ndarray'>
              container : None
                   data : 200 trials of length 3000 defined on [600000 x 1024] float32 Dataset of size 2.29 GB
                 dimord : 2 element list
               filename : /Users/pantaray/.spy/spy_158f_b80715fe.analog
                   mode : r+
             sampleinfo : [200 x 2] element <class 'numpy.ndarray'>
             samplerate : 1000.0
                    tag : None
                   time : 200 element list
              trialinfo : [200 x 0] element <class 'numpy.ndarray'>
                 trials : 200 element iterable

        Use `.log` to see object history

    """

    # Create dummy 1d signal that will be blown up to fill channels later
    dt = 0.001
    t = np.arange(0, 3, dt, dtype="float32") - 1.0
    sig = np.cos(2 * np.pi * (7 * (np.heaviside(t, 1) * t - 1) + 10) * t)

    # Depending on chosen `dimord` either get default position of time-axis
    # in `AnalogData` objects or use provided `dimord` and reshape signal accordingly
    if dimord == "default":
        dimord = AnalogData._defaultDimord
    timeAxis = dimord.index("time")
    idx = [1, 1]
    idx[timeAxis] = -1
    sig = np.repeat(sig.reshape(*idx), axis=idx.index(1), repeats=nChannels)

    # Initialize random number generator (with possibly user-provided seed-value)
    rng = np.random.default_rng(seed)

    # Either construct the full data array in memory using tiling or create
    # an HDF5 container in `__storage__` and fill it trial-by-trial
    # NOTE: use `swapaxes` here to ensure two objects created w/same seed really
    # are affected w/identical additive noise patterns, no matter their respective
    # `dimord`.
    out = AnalogData(samplerate=1 / dt, dimord=dimord)
    if inmemory:
        idx[timeAxis] = nTrials
        sig = np.tile(sig, idx)
        shp = [slice(None), slice(None)]
        for iTrial in range(nTrials):
            shp[timeAxis] = slice(iTrial * t.size, (iTrial + 1) * t.size)
            noise = rng.standard_normal(
                (t.size, nChannels)).astype(sig.dtype) * 0.5
            sig[tuple(shp)] += np.swapaxes(noise, timeAxis, 0)
        out.data = sig
    else:
        with h5py.File(out.filename, "w") as h5f:
            shp = list(sig.shape)
            shp[timeAxis] *= nTrials
            dset = h5f.create_dataset("data",
                                      shape=tuple(shp),
                                      dtype=sig.dtype)
            shp = [slice(None), slice(None)]
            for iTrial in range(nTrials):
                shp[timeAxis] = slice(iTrial * t.size, (iTrial + 1) * t.size)
                noise = rng.standard_normal(
                    (t.size, nChannels)).astype(sig.dtype) * 0.5
                dset[tuple(shp)] = sig + np.swapaxes(noise, timeAxis, 0)
                dset.flush()
        out.data = h5py.File(out.filename, "r+")["data"]

    # Define by-trial offsets to generate (non-)equidistant/(non-)overlapping trials
    trialdefinition = np.zeros((nTrials, 3), dtype='int')
    if equidistant:
        equiOffset = 0
        if overlapping:
            equiOffset = 100
        offsets = np.full((nTrials, ), equiOffset, dtype=sig.dtype)
    else:
        offsets = rng.integers(low=int(0.1 * t.size),
                               high=int(0.2 * t.size),
                               size=(nTrials, ))

    # Using generated offsets, construct trialdef array and make sure initial
    # and end-samples are within data bounds (only relevant if overlapping
    # trials are built)
    shift = (-1)**(not overlapping)
    for iTrial in range(nTrials):
        trialdefinition[iTrial, :] = np.array([
            iTrial * t.size - shift * offsets[iTrial],
            (iTrial + 1) * t.size + shift * offsets[iTrial], -1000
        ])
    if equidistant:
        trialdefinition[0, :2] += equiOffset
        trialdefinition[-1, :2] -= equiOffset
    else:
        trialdefinition[0, 0] = 0
        trialdefinition[-1, 1] = nTrials * t.size
    out.definetrial(trialdefinition)

    return out
Exemple #16
0
def test_ufunc_heaviside_uu(A: dace.uint32[10], B: dace.uint32[10]):
    return np.heaviside(A, B)
Exemple #17
0
def test_ufunc_heaviside_ff(A: dace.float32[10], B: dace.float32[10]):
    return np.heaviside(A, B)
Exemple #18
0
def test_ufunc_heaviside_cc(A: dace.complex64[10], B: dace.complex64[10]):
    return np.heaviside(A, B)
def PFA(objf, lb, ub, dim, n, MaxGeneration):
    pop = n
    #General parameters

    #n=50 #number of fireflies
    dim = 30  #dim
    #lb=-50
    #ub=50
    #MaxGeneration=500

    #FFA parameters
    alpha = 0.50  # Randomness 0--1 (highly random)
    betamin = 0.50  # minimum value of beta
    gamma = 1  # Absorption coefficient
    delta = 0.01
    # delta2=(ub-lb)/MaxGeneration

    zn = numpy.ones(n)
    zn.fill(float("inf"))

    #ns(i,:)=Lb+(Ub-Lb).*rand(1,d);
    ns = numpy.random.uniform(0, 1, (n, dim)) * (ub - lb) + lb
    Lightn = numpy.ones(n)
    Lightn.fill(float("inf"))
    Lightnprev = numpy.ones(n)
    Lightnprev.fill(float("inf"))

    #[ns,Lightn]=init_ffa(n,d,Lb,Ub,u0)

    convergence = []
    s = solution()

    print("FFA is optimizing  \"" + objf.__name__ + "\"")

    timerStart = time.time()
    s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")

    # Main loop
    for k in range(0, MaxGeneration):  # start iterations

        #% This line of reducing alpha is optional
        #alpha=alpha_new(alpha,MaxGeneration);
        Lightnprev = Lightn
        #% Evaluate new solutions (for all n fireflies)
        for i in range(0, n):
            zn[i] = objf(ns[i, :])
            Lightn[i] = zn[i]

        # Ranking fireflies by their light intensity/objectives

        Lightn = numpy.sort(zn)
        Index = numpy.argsort(zn)
        ns = ns[Index, :]

        #Find the current best
        nso = ns
        Lighto = Lightn
        nbest = ns[0, :]
        Lightbest = Lightn[0]

        #% For output only
        fbest = Lightbest

        #% Move all fireflies to the better locations
        #    [ns]=ffa_move(n,d,ns,Lightn,nso,Lighto,nbest,...
        #          Lightbest,alpha,betamin,gamma,Lb,Ub);
        scale = numpy.ones(dim) * abs(ub - lb)
        if (k % 10 != 0):
            for i in range(0, n):
                # The attractiveness parameter beta=exp(-gamma*r)
                for j in range(0, n):
                    # r=numpy.sqrt(numpy.sum((ns[i,:]-ns[j,:])**2));
                    # r2=numpy.sqrt(numpy.sum((ns[i,:]-ns[0,:])**2));
                    r = numpy.sum((ns[i, :] - ns[j, :]))
                    r2 = numpy.sum((ns[0, :] - ns[j, :]))
                    #r=1
                    # Update moves
                    if Lightn[i] > Lighto[j]:  # Brighter and more attractive
                        # PropFA parameters
                        per = ((k / MaxGeneration) * 100) / 50
                        per2 = numpy.heaviside(per - 1, 0.5)
                        ratA = (numpy.absolute(Lightn[i]) -
                                numpy.absolute(Lightnprev[i])) / max(
                                    numpy.absolute(Lightn[i]),
                                    numpy.absolute(Lightnprev[i]))
                        ratB = (numpy.absolute(Lightn[j]) - numpy.absolute(
                            Lightn[i])) / max(numpy.absolute(Lightn[j]),
                                              numpy.absolute(Lightn[i]))
                        ratC = (numpy.absolute(fbest) - numpy.absolute(
                            Lightn[i])) / max(numpy.absolute(fbest),
                                              numpy.absolute(Lightn[i]))
                        ratAvg = (ratA + ratB + ratC) / 3
                        scale2 = numpy.absolute(ub - lb)
                        delta = r2 / 10
                        if (Lightnprev[i] == Lightn[i]):
                            alpha = 1
                        else:
                            alpha = (delta) * ratAvg * numpy.exp(-k * per2)
                        #    alpha=1*ratAvg*1

                        if (Lightnprev[i] == Lightn[i]):
                            gamma = 1
                        else:
                            gamma = 1 * (ratB / ratC)

                        beta0 = 1
                        beta = (beta0 - betamin) * numpy.exp(
                            -gamma * r**2) + betamin
                        beta2 = (beta0 - betamin) * numpy.exp(
                            -gamma * r2**2) + betamin
                        tmpf = alpha * (numpy.random.rand(dim) - 0.5) * 1

                        #ns[i,:]=ns[i,:]*(1-beta)+nso[j,:]*beta+tmpf

                        ns[i, :] = ns[i, :] + (
                            beta * (nso[j, :] - ns[i, :])) + (
                                beta2 * (nso[0, :] - ns[i, :])) + tmpf
                    #    ns=numpy.clip(ns, lb, ub)
        else:
            bet = 3 / 2
            sigma = (math.gamma(1 + bet) * math.sin(math.pi * bet / 2) /
                     (math.gamma(
                         (1 + bet) / 2) * bet * 2**((bet - 1) / 2)))**(1 / bet)
            u = numpy.random.randn(dim) * sigma
            v = numpy.random.randn(dim)
            step = u / abs(v)**(1 / bet)
            stepsize = 0.001 * (step * (ns[i, :] - ns[0, :]))
            lastn = n - int(pop / 2)
            for t in range(lastn, n):
                ran2 = numpy.random.random_sample()
                for y in range(dim):
                    ns[t,
                       y] = ns[t,
                               y] + stepsize[y] * numpy.random.random_sample()
                    # delta2=(ns[0,y] +ns[1,y])*0.5
                    # # delta2=delta
                    # # print (ns[0,y],ns[lastn,y],delta2)
                    # ran=numpy.random.uniform(0, delta2)
                    # if (ran2<0.5):
                    #     ns[t,y]=ns[0,y]-ran
                    #     # ns[t,y]=numpy.random.uniform(lb,ub)
                    # else:
                    #     ns[t,y]=ns[0,y]+ran
                    #     # ns[t,y]=numpy.random.uniform(lb,ub)

        ns = numpy.clip(ns, lb, ub)
        IterationNumber = k
        BestQuality = fbest

        if (k % 1 == 0):
            print([
                'At iteration ' + str(k) + ' the best fitness is ' +
                str(BestQuality) + ": PFA" + " :" + str(objf)
            ])
        if (k % 100 == 0):
            convergence.append(fbest)
    #
    ####################### End main loop
    convergence.append(Lightn[0])
    convergence.append(Lightn[6])
    convergence.append(Lightn[12])
    convergence.append(Lightn[18])
    convergence.append(Lightn[24])
    timerEnd = time.time()
    s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    s.executionTime = timerEnd - timerStart
    s.convergence = convergence
    s.optimizer = "PFA"
    s.objfname = objf.__name__

    return s
Exemple #20
0
    l_num, l_den = [sy.lambdify((), c)()
                    for c in c_num_den]  #convert to floats
    return signal.lti(l_num, l_den)


# ------------ Initiate variables  -------------------
g = 9.8  #[m/s^2]
gamma = 1  #[who knows?]
v_bar = 6  #[V]
r_bar = -np.sqrt((gamma * v_bar**2) / g)  #[length]
print('r_bar = ', r_bar)
#R_hat_0 = [-1, -0.5, -0.25, -0.1, -0.05, 0.05, 0.1, 0.25, 0.4, 0.5, 1, 2]
R_hat_0 = 1
print('R_hat_0 = ', R_hat_0)
t = np.arange(0.0, 1.0, 0.01)
step = 0 * np.heaviside(t, 1)

# ---------------- Initiate Plant ------------------
name = 'Test'
num = 2 * gamma * v_bar / (r_bar**2)
p1 = 2 * gamma * v_bar**2 / r_bar**3
den = [1, 0, p1]
G = signal.TransferFunction(num, den)
z, p, k = signal.tf2zpk(num, den)
#print(G)

#  ------------- Initiate Controller ---------------
Kd = 1
Kp = 1
Ki = 1
C = signal.TransferFunction([Kd, Kp, Ki], 1)
Exemple #21
0
 pandas_udf(lambda s1, s2: np.floor_divide(s1, s2), DoubleType(),
            PandasUDFType.SCALAR),
 "fmax":
 pandas_udf(lambda s1, s2: np.fmax(s1, s2), DoubleType(),
            PandasUDFType.SCALAR),
 "fmin":
 pandas_udf(lambda s1, s2: np.fmin(s1, s2), DoubleType(),
            PandasUDFType.SCALAR),
 "fmod":
 pandas_udf(lambda s1, s2: np.fmod(s1, s2), DoubleType(),
            PandasUDFType.SCALAR),
 "gcd":
 pandas_udf(lambda s1, s2: np.gcd(s1, s2), DoubleType(),
            PandasUDFType.SCALAR),
 "heaviside":
 pandas_udf(lambda s1, s2: np.heaviside(s1, s2), DoubleType(),
            PandasUDFType.SCALAR),
 "hypot":
 F.hypot,
 "lcm":
 pandas_udf(lambda s1, s2: np.lcm(s1, s2), DoubleType(),
            PandasUDFType.SCALAR),
 "ldexp":
 pandas_udf(lambda s1, s2: np.ldexp(s1, s2), DoubleType(),
            PandasUDFType.SCALAR),
 "left_shift":
 pandas_udf(lambda s1, s2: np.left_shift(s1, s2), LongType(),
            PandasUDFType.SCALAR),
 "logaddexp":
 pandas_udf(lambda s1, s2: np.logaddexp(s1, s2), DoubleType(),
            PandasUDFType.SCALAR),
Exemple #22
0
def square_window(x):
    '''returns 0 if x<-1 and 1 if x>1'''
    return np.heaviside(x + 1, 1) - np.heaviside(x - 1, 0)
Exemple #23
0
 def _Ns(self, M, a):
     # Number of satellites
     M0 = 10.**self._lM0(a)
     M1 = 10.**self._lM1(a)
     return np.heaviside(M - M0, 1) * ((M - M0) / M1)**self.alpha
Exemple #24
0

def prediction(attributes, weights):
    predicted = np.sum(weights.T * attributes, axis=1)
    return sigmoid(predicted)


def gradient(X_train, weights):
    #Calculates the gradient based on the newly predicted values
    difference = y_train - prediction(X_train, weights)
    del_E = -np.sum(np.multiply(X_train.T, difference), axis=1)
    return del_E


def update(weights):
    #updates the weights using the gradient descent algorithm
    updated_weights = weights - learning_parameter * gradient(X_train, weights)
    return updated_weights


def determine_class(weights, test):
    np.multiply(X_test.T, weights)


for i in range(10):
    #print(weights)
    weights = update(weights)
    test_prediction = sigmoid(np.sum(np.multiply(X_test, weights), axis=1))
    y_pred = np.heaviside((test_prediction - 0.5), 0)
    print(accuracy_score(y_test, y_pred))
Exemple #25
0
def boxpot(r0, V0):
    return lambda r: V0 * (1 - np.heaviside(r - r0, 0))
Exemple #26
0
 def solution(x,y):
     sol = np.heaviside(-lvl_func(x,y),1)*(desired_func(x,y))
     return sol
e_aberrated = np.zeros((len(x), len(y)), dtype=complex)
e_sum = np.zeros((len(x), len(y)), dtype=complex)
e_diff = np.zeros((len(x), len(y)), dtype=complex)
'''amplitude normalization'''
for counterx, elx in enumerate(x):
    for countery, ely in enumerate(y):

        # perform transformation to polar coordinates
        ra = np.sqrt(elx**2 + ely**2)
        the = np.arctan2(ely, elx)

        # specify wavefront error
        wfe_gen = float(wfe(list_wfe, ra, the, D1_2))

        # define aprture 1
        aperture1norm[counterx][countery] = 1 * np.heaviside(
            D1_2 - ra, 1) * np.heaviside(ra, 1) * np.exp(
                -2 * np.pi * 1j * wfe_gen / lam)

# normalize this amplitude to unit intensity
amplitude_temp = np.sum(aperture1norm)

# choose initial amplitude imaginary part because of degeneracy
a0_imag = amplitude_temp.imag


# define function to find roots for
def func(a0_real):

    func = 0

    for el1 in aperture1norm:
Exemple #28
0
 def surge_pwr(E, p, lmbd, E_th):
     x = np.heaviside(E - E_th, 1) * (E - E_th)
     return (lmbd**p) * (p + 1)**(p + 1) * x / (x + p * lmbd)**(p + 1)
Exemple #29
0
for i in range(0, nt):
    Amiddle[0, i] = A[i, int(A.shape[1] / 2), 0]
    Amiddle[1, i] = A[i, int(A.shape[1] / 2), 1]
    Amiddle[2, i] = A[i, int(A.shape[1] / 2), 2]

r0 = 0.5
E = 3e6
h0 = 0.05
T = 2 * 0.165
A0 = np.pi * r0**2
time = np.linspace(0, (T / 2 + (0.25 - 0.165)), int(nt))
# time = np.linspace(0,(T/2),int(nt))
dt = time[1] - time[0]

freq = 1
Pin = 2e4 * np.sin(2 * np.pi * time / T * freq) * np.heaviside(
    T / freq / 2 - time, 1)
beta = E * h0 * np.sqrt(np.pi)
Ainlet = (Pin * A0 / beta + np.sqrt(A0))**2

A1 = beta * (np.sqrt(Amiddle[0, :]) - np.sqrt(A0)) / A0
A2 = beta * (np.sqrt(Amiddle[1, :]) - np.sqrt(A0)) / A0
A3 = beta * (np.sqrt(Amiddle[2, :]) - np.sqrt(A0)) / A0

plt.figure(figsize=[10, 6])
# plt.plot(time,A1,'r',label='$\\Omega_1$')
# plt.plot(time,A2,'b',label='$\\Omega_2$')
# plt.plot(time,A3,'g',label='$\\Omega_3$')
plt.plot(time, Pin)
plt.xlabel('Time (s)')
plt.ylabel('Pressure $dyn \\cdot cm^{-2}$')
# plt.legend()
Exemple #30
0
def energy(pos, param, write_energy=False):
    E_i = 0
    for ind, par in param.items():
        if ind == 0:
            i1, i2 = par[:,1].astype(int), par[:,2].astype(int)
            p1, p2 = pos[i1], pos[i2]
            k, d = par[:,3], par[:,4]
            n_p12 = norm(p1-p2, axis=1)
            E_i += .5 * np.sum(k * (n_p12 - d)**2)
        elif ind == 1:
            i1, i2 = par[:,1].astype(int), par[:,2].astype(int)
            p1, p2 = pos[i1], pos[i2]
            k, d = par[:,3], par[:,4]
            n_p12 = norm(p1-p2, axis=1)
            E_i += .5 * np.sum(np.heaviside(d-n_p12, 1) * k * (n_p12 - d)**2)
        elif ind == 2:
            i1, i2, i3, i4 = par[:,1].astype(int), par[:,2].astype(int), par[:,3].astype(int), par[:,4].astype(int)
            ii = np.array([i1, i2, i3, i4])
            p1, p2, p3, p4 = pos[i1], pos[i2], pos[i3], pos[i4]
            pp = np.array([p1, p2, p3, p4])
            k, a0 = par[:,5], par[:,6]
            sets = np.array([[0, 1, 3], [1, 3, 2],[ 3, 2, 0],[2, 0, 1]])
            v1 = pp[sets[:,1]]-pp[sets[:,0]]
            v2 = pp[sets[:,2]]-pp[sets[:,1]]
            angles = (np.arctan2(v1[:,:,0], v1[:,:,1])-np.arctan2(v2[:,:,0], v2[:,:,1])) % (2*np.pi) 
            angles[np.where(angles>np.pi)] -= 2*np.pi
            angles[np.where(angles<=-np.pi)] += 2*np.pi
            for i in range(len(par)):
                nn = len(np.where(angles[:,i]<0)[0])
                if nn == 1:
                    angles[np.where(angles[:,i]<0),i] += 2*np.pi
                elif nn>1:
                    angles[np.where(angles[:,i]>0),i] -= 2*np.pi

            angles = np.abs(angles)
            angles[np.where(angles>np.pi)] -= 2*np.pi
            E_i += 0.5 * np.sum(k * (angles - a0)**2)
        elif ind == 8:
            i1, i2, i3 = par[:,1].astype(int), par[:,2].astype(int), par[:,3].astype(int)
            p1, p2, p3 = pos[i1], pos[i2], pos[i3]
            k, a0 = par[:,4], par[:,5]
            n_p12, n_p23 = norm(p1-p2, axis=1), norm(p3-p2, axis=1)
            arg = np.multiply(p2-p1, p3-p2).sum(1)/n_p12/n_p23
            arg[np.where(arg >= 1.)] = 1.-1e-6 
            arg[np.where(arg <= -1.)] = -1.+1e-6 
            angle = np.arccos(arg)
            E_i += 0.5 * np.sum(k * (angle - a0)**2)
        elif ind == 4:
            i1, i2 = par[:,1].astype(int), par[:,2].astype(int)
            p1, p2 = pos[i1], pos[i2]
            k = par[:,3]
            _p3 = [p2[:,0]+30, 0.5*(p1[:,1]+p2[:,1])]
            p3 = np.swapaxes(_p3,0,1)
            a0 = 0
            n_p12, n_p23 = norm(p1-p2, axis=1), norm(p3-p2, axis=1)
            arg = np.multiply(p2-p1, p3-p2).sum(1)/n_p12/n_p23
            arg[np.where(arg >= 1.)] = 1.-1e-6 
            arg[np.where(arg <= -1.)] = -1.+1e-6 
            angle = np.arccos(arg)
            E_i += 0.5 * np.sum(k * (angle - a0)**2)
        elif ind == 5:
            i1, i2, i3 = par[:,1].astype(int), par[:,2].astype(int), par[:,3].astype(int)
            p1, p2, p3 = pos[i1], pos[i2], pos[i3]
            k, a0 = par[:,4], par[:,5]
            a02 = np.full((k.size), np.pi*.5)
            n_p12, n_p23 = norm(p1-p2, axis=1), norm(p3-p2, axis=1)
            arg = np.multiply(p2-p1, p3-p2).sum(1)/n_p12/n_p23
            arg[np.where(arg >= 1.)] = 1.-1e-6 
            arg[np.where(arg <= -1.)] = -1.+1e-6 
            angle = np.arccos(arg)
            E_i += 0.5 * np.sum(np.heaviside(a02-angle, 1) * k * (angle - a0)**2)
        elif ind == 6:
            i1, i2, i3, i4 = par[:,1].astype(int), par[:,2].astype(int), par[:,3].astype(int), par[:,4].astype(int)
            p1, p2, p3, p4 = pos[i1], pos[i2], pos[i3], pos[i4]
            k, a0 = par[:,5], par[:,6]
            n_p12, n_p34 = norm(p1-p2, axis=1), norm(p4-p3, axis=1)
            arg = np.multiply(p2-p1, p4-p3).sum(1)/n_p12/n_p34
            arg[np.where(arg >= 1.)] = 1.-1e-6 
            arg[np.where(arg <= -1.)] = -1.+1e-6 
            angle = np.arccos(arg)
            E_i += 0.5 * np.sum(k * (angle - a0)**2)
        elif ind == 7:
            i1, i2 = par[:,1].astype(int), par[:,2].astype(int)
            p1, p2 = pos[i1], pos[i2]
            k = par[:,3]
            n_p12 = norm(p1-p2, axis=1)
            E_i += np.sum(k / n_p12)

    return E_i
Exemple #31
0
 def _true_g_function(self, x):
     return np.heaviside(x, 1) * self._step_height
 def test_heaviside_scalar(self):
     assert np.heaviside(0. * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
     assert np.heaviside(0. * u.s,
                         25 * u.percent) == 0.25 * u.dimensionless_unscaled
     assert np.heaviside(2. * u.J, 0.25) == 1. * u.dimensionless_unscaled
 def classify(self, sample):
     log_ratio = self.weight[1:] @ sample + self.weight[0]
     return np.heaviside(log_ratio, 0)
Exemple #34
0
 def surge_exp(E, p, lmbd, E_th):
     x = np.heaviside(E - E_th, 1) * (E - E_th)
     return ((e / lmbd)**p) * (x**p) * np.exp(-p * x / lmbd)
Exemple #35
0
def force(pos, param, __i1, __i2, __k_rep, __d_rep, __k_rep_lr, write_force):
    _shape = pos.shape
    n = _shape[0]
    F = np.zeros(_shape)
    for ind, par in param.items():
        if ind == 0:
            i1, i2 = par[:,1].astype(int), par[:,2].astype(int)
            p1, p2 = pos[i1], pos[i2]
            p12 = p1-p2
            k, d = np.column_stack((par[:,3], par[:,3])), np.column_stack((par[:,4], par[:,4]))
            n_p12 = norm(p12, axis=1)
            n12 = np.column_stack((n_p12, n_p12))
            diff = n12 - d
            f = np.zeros((n, n, 2))
            f[i1, i2] = -  k * diff * p12 * np.power(n12, -1)
            F += np.sum(f, axis=1)
            F -= np.sum(f, axis=0)
        elif ind == 1:
            i1, i2 = __i1, __i2
            p1, p2 = np.take(pos, i1, axis=0), np.take(pos, i2, axis=0)
            p12 = p1-p2
            k = __k_rep
            d = __d_rep
            n_p12 = norm(p12, axis=1)
            diff = n_p12 - d[:,0]
            f = np.zeros((n, n, 2))
            _i  = np.where(diff < 0)
            i1, i2 = i1[_i], i2[_i]
            p12 = p12[_i]
            k = k[_i]
            n12 = np.column_stack((n_p12[_i], n_p12[_i]))
            diff = n12 - d[_i]
            f[i1, i2] = - k * diff * p12 * np.power(n12, -1)
            F += np.sum(f, axis=1)
            F -= np.sum(f, axis=0)
            ##
        elif ind == 2:
            i1, i2, i3, i4 = par[:,1].astype(int), par[:,2].astype(int), par[:,3].astype(int), par[:,4].astype(int)
            ii = np.array([i1, i2, i3, i4])
            p1, p2, p3, p4 = pos[i1], pos[i2], pos[i3], pos[i4]
            pp = np.array([p1, p2, p3, p4])
            k, a0 = par[:,5], par[:,6]
            sets = np.array([[0, 1, 3], [1, 3, 2],[ 3, 2, 0],[2, 0, 1]])
            v1 = pp[sets[:,1]]-pp[sets[:,0]]
            v2 = pp[sets[:,2]]-pp[sets[:,1]]
            angles = (np.arctan2(v1[:,:,0], v1[:,:,1])-np.arctan2(v2[:,:,0], v2[:,:,1]))
            angles[np.where(angles>np.pi)] -= 2*np.pi
            angles[np.where(angles<=-np.pi)] += 2*np.pi
            for i in range(len(par)):
                nn = len(np.where(angles[:,i]<0)[0])
                if nn == 1:
                    angles[np.where(angles[:,i]<0),i] += 2*np.pi
                elif nn>1:
                    angles[np.where(angles[:,i]>0),i] -= 2*np.pi

            angles = np.abs(angles)
            n1 = np.linalg.norm(v1, axis=2)
            n2 = np.linalg.norm(v2, axis=2)
            arg = np.cos(angles)
            arg[np.where(arg >= 1.)] = 1.-1e-7 
            arg[np.where(arg <= -1.)] = -1.+1e-7
#            print(ii, (angles - a0)/np.pi*180, np.sum((angles - a0), axis=0))

            _f = - k *  (angles - a0) * (-1./np.sqrt(1.-(arg)**2))
            _f[np.where(np.abs(angles-a0)<1e-2)] = 0
            for comp in range(2):
                f1 = _f * ( (-v2[:,:,comp])/n1/n2 + (v1[:,:,comp])*arg/n1**2 )
                f2 = _f * ( (-v1[:,:,comp]+v2[:,:,comp])/n1/n2 + v2[:,:,comp]*arg/n2**2 - v1[:,:,comp]*arg/n1**2 ) 
                f3 = _f * ( (v1[:,:,comp])/n1/n2 - (v2[:,:,comp])*arg/n2**2 )
                ff = np.array([f1, f2, f3])
                for i in range(4):
                    for j in range(3):
                        F[ii[sets[i][j]],comp] += ff[j][i]
        elif ind == 8:
            i1, i2, i3 = par[:,1].astype(int), par[:,2].astype(int), par[:,3].astype(int)
            ii = np.array([i1, i2, i3])
            p1, p2, p3 = pos[i1], pos[i2], pos[i3]
            k, a0 = par[:,4], par[:,5]
            n_p12, n_p23 = norm(p1-p2, axis=1), norm(p3-p2, axis=1)
            arg = np.multiply(p2-p1, p3-p2).sum(1)/n_p12/n_p23
            arg[np.where(arg >= 1.)] = 1.-1e-7 
            arg[np.where(arg <= -1.)] = -1.+1e-7 
            v12 = p2-p1
            v23 = p3-p2
            a1 = np.arctan2(v12[:,0], v12[:,1])
            a2 = np.arctan2(v23[:,0], v23[:,1])
            angle = (a2-a1) % (2*np.pi)
            angle[np.where(angle>np.pi)] -= 2*np.pi
            angle = np.abs(angle)
            _f = - k * (angle - a0) * (-1./np.sqrt(1.-(arg)**2))
            for comp in range(2):
                f1 = _f * ( (p2[:,comp]-p3[:,comp])/n_p12/n_p23 + (p2[:,comp]-p1[:,comp])*arg/n_p12**2 )
                f2 = _f * ( (p1[:,comp]-2*p2[:,comp]+p3[:,comp])/n_p12/n_p23 + (p3[:,comp]-p2[:,comp])*arg/n_p23**2 -
                        (p2[:,comp]-p1[:,comp])*arg/n_p12**2 ) 
                f3 = _f * ( (p2[:,comp]-p1[:,comp])/n_p12/n_p23 - (p3[:,comp]-p2[:,comp])*arg/n_p23**2 )
                ff = np.array([f1, f2, f3])
                for j in range(3):
                    if np.unique(ii[j]).size < ii[j].size:
                        for ik, _i in np.ndenumerate(ii[j]):
                            F[_i][comp] += ff[j][ik]
                    else:
                        F[ii[j], comp] += ff[j]
        elif ind == 4:
            i1, i2 = par[:,1].astype(int), par[:,2].astype(int)
            ii = [i1, i2]
            p1, p2 = pos[i1], pos[i2]
            k = par[:,3]
            _p3 = [p2[:,0]+30, 0.5*(p1[:,1]+p2[:,1])]
            p3 = np.swapaxes(_p3,0,1)
            a0 = np.zeros((k.size))
            n_p12, n_p23 = norm(p1-p2, axis=1), norm(p3-p2, axis=1)
            arg = np.multiply(p2-p1, p3-p2).sum(1)/n_p12/n_p23
            arg[np.where(arg >= 1.)] = 1.-1e-6 
            arg[np.where(arg <= -1.)] = -1.+1e-6 
            angle = np.arccos(arg)
            _f = - k * (angle - a0) * (-1./np.sqrt(1.-(arg)**2))
            for comp in range(2):
                f1 = _f * ( (p2[:,comp]-p3[:,comp])/n_p12/n_p23 + (p2[:,comp]-p1[:,comp])*arg/n_p12**2 )
                f2 = _f * ( (p1[:,comp]-2*p2[:,comp]+p3[:,comp])/n_p12/n_p23 + (p3[:,comp]-p2[:,comp])*arg/n_p23**2 -
                        (p2[:,comp]-p1[:,comp])*arg/n_p12**2 ) 
                ff = [f1, f2]
                for j in range(2):
                    if len(set(ii[j])) < ii[j].size:
                        for ik, _i in enumerate(ii[j]):
                            F[_i][comp] += ff[j][ik]
                    else:
                        F[ii[j], comp] += ff[j]
        elif ind == 5:
            i1, i2, i3 = par[:,1].astype(int), par[:,2].astype(int), par[:,3].astype(int)
            p1, p2, p3 = pos[i1], pos[i2], pos[i3]
            k, a0 = par[:,4], par[:,5]
            a02 = np.full((k.size), np.pi*.5)
            n_p12, n_p23 = norm(p1-p2, axis=1), norm(p3-p2, axis=1)
            arg = np.multiply(p2-p1, p3-p2).sum(1)/n_p12/n_p23
            arg[np.where(arg >= 1.)] = 1.-1e-6 
            arg[np.where(arg <= -1.)] = -1.+1e-6 
            angle = np.arccos(arg)
            ii = [i1, i2, i3]
            _f = - np.heaviside(angle-a02, 1) * k * (angle - a0) * (-1./np.sqrt(1.-(arg)**2))
            for comp in range(2):
                f1 = _f * ( (p2[:,comp]-p3[:,comp])/n_p12/n_p23 + (p2[:,comp]-p1[:,comp])*arg/n_p12**2 )
                f2 = _f * ( (p1[:,comp]-2*p2[:,comp]+p3[:,comp])/n_p12/n_p23 + (p3[:,comp]-p2[:,comp])*arg/n_p23**2 -
                        (p2[:,comp]-p1[:,comp])*arg/n_p12**2 ) 
                f3 = _f * ( (p2[:,comp]-p1[:,comp])/n_p12/n_p23 - (p3[:,comp]-p2[:,comp])*arg/n_p23**2 )
                ff = [f1, f2, f3]
                for j in range(3):
                    if len(set(ii[j])) < ii[j].size:
                        for ik, _i in enumerate(ii[j]):
                            F[_i][comp] += ff[j][ik]
                    else:
                        F[ii[j], comp] += ff[j]
        elif ind == 6:
            i1, i2, i3, i4 = par[:,1].astype(int), par[:,2].astype(int), par[:,3].astype(int), par[:,4].astype(int)
            p1, p2, p3, p4 = pos[i1], pos[i2], pos[i3], pos[i4]
            k, a0 = par[:,5], par[:,6]
            v12 = p2-p1
            v34 = p4-p3
            a1 = np.arctan2(v12[:,0], v12[:,1])
            a2 = np.arctan2(v34[:,0], v34[:,1])
            angle = (a2-a1) % (2*np.pi)
            angle[np.where(angle>np.pi)] -= 2*np.pi
            M = np.full((p1.shape[0],3), [0.,0.,1.])
            tf12 = np.cross(M, np.hstack((v12, np.zeros((p1.shape[0],1)))))
            tf34 = np.cross(M, np.hstack((v34, np.zeros((p1.shape[0],1)))))
            _f12 = - np.matmul(k * (angle - a0),  tf12[:,0:2])
            _f34 =  np.matmul(k * (angle - a0) , tf34[:,0:2])
            ii = [i1, i2, i3, i4]
            f1 = -_f12
            f2 = _f12
            f3 = -_f34
            f4 = _f34
            ff = [f1, f2, f3, f4]
            for j in range(4):
                if len(set(ii[j])) < ii[j].size:
                    for ik, _i in enumerate(ii[j]):
                        F[_i] += ff[j][ik]
                else:
                    F[ii[j]] += ff[j]
        elif ind == 7:
            i1, i2 = __i1, __i2
            p1, p2 = pos[i1], pos[i2]
            p12 = p1-p2
            k = __k_rep_lr
            n_p12 = norm(p12, axis=1)
            f = np.zeros((n, n, 2))
            n12 = np.column_stack((n_p12, n_p12))
            diff = n12 - d
            f[i1, i2] =  k * np.power(n12, -3) * p12
            F += np.sum(f, axis=1)
            F -= np.sum(f, axis=0)
        else:
            print("Don't have potential %d" % ind)
#        print("F 20 %d %.2e %.2e" % (ind, F[2][0], F[2][1] )    )
    return F
Exemple #36
0
def relu_derivative(g):
    derivatives = np.heaviside(g, 0)
    return derivatives
Exemple #37
0
 def get_action(self, x):
     z = np.dot(self.W1, x)
     a = np.tanh(z)
     z = np.dot(self.W2, a)
     return int(np.heaviside(z, 0)[0])
def main():
    """
    Generate a new game
    The function below generates a new chess board with King, Queen and Enemy King pieces randomly assigned so that they
    do not cause any threats to each other.
    s: a size_board   size_board matrix filled with zeros and three numbers:
    1 = location of the King
    2 = location of the Queen
    3 = location fo the Enemy King
    p_k2: 1x2 vector specifying the location of the Enemy King, the first number represents the row and the second
    number the colunm
    p_k1: same as p_k2 but for the King
    p_q1: same as p_k2 but for the Queen
    """
    s, p_k2, p_k1, p_q1 = generate_game(size_board)

    """
    Possible actions for the Queen are the eight directions (down, up, right, left, up-right, down-left, up-left, 
    down-right) multiplied by the number of squares that the Queen can cover in one movement which equals the size of 
    the board - 1
    """
    possible_queen_a = (s.shape[0] - 1) * 8
    """
    Possible actions for the King are the eight directions (down, up, right, left, up-right, down-left, up-left, 
    down-right)
    """
    possible_king_a = 8

    # Total number of actions for Player 1 = actions of King + actions of Queen
    N_a = possible_king_a + possible_queen_a

    """
    Possible actions of the King
    This functions returns the locations in the chessboard that the King can go
    dfK1: a size_board x size_board matrix filled with 0 and 1.
          1 = locations that the king can move to
    a_k1: a 8x1 vector specifying the allowed actions for the King (marked with 1): 
          down, up, right, left, down-right, down-left, up-right, up-left
    """
    dfK1, a_k1, _ = degree_freedom_king1(p_k1, p_k2, p_q1, s)
    """
    Possible actions of the Queen
    Same as the above function but for the Queen. Here we have 8*(size_board-1) possible actions as explained above
    """
    dfQ1, a_q1, dfQ1_ = degree_freedom_queen(p_k1, p_k2, p_q1, s)
    """
    Possible actions of the Enemy King
    Same as the above function but for the Enemy King. Here we have 8 possible actions as explained above
    """
    dfK2, a_k2, check = degree_freedom_king2(dfK1, p_k2, dfQ1_, s, p_k1)

    """
    Compute the features
    x is a Nx1 vector computing a number of input features based on which the network should adapt its weights  
    with board size of 4x4 this N=50
    """
    x = features(p_q1, p_k1, p_k2, dfK2, s, check)

    """
    Initialization
    Define the size of the layers and initialization
    FILL THE CODE
    Define the network, the number of the nodes of the hidden layer should be 200, you should know the rest. The weights 
    should be initialised according to a uniform distribution and rescaled by the total number of connections between 
    the considered two layers. For instance, if you are initializing the weights between the input layer and the hidden 
    layer each weight should be divided by (n_input_layer x n_hidden_layer), where n_input_layer and n_hidden_layer 
    refer to the number of nodes in the input layer and the number of nodes in the hidden layer respectively. The biases
     should be initialized with zeros.
    """
    n_input_layer = 50  # Number of neurons of the input layer. TODO: Change this value
    n_hidden_layer = 200  # Number of neurons of the hidden layer
    n_output_layer = 32  # Number of neurons of the output layer. TODO: Change this value accordingly

    """
    TODO: Define the w weights between the input and the hidden layer and the w weights between the hidden layer and the 
    output layer according to the instructions. Define also the biases.
    """

    w_input_hidden = np.random.rand(n_hidden_layer,n_input_layer)/(n_input_layer * n_hidden_layer)
    normW1 = np.sqrt(np.diag(w_input_hidden.dot(w_input_hidden.T)))
    normW1 = normW1.reshape(n_hidden_layer, -1)
    w_input_hidden = w_input_hidden/normW1
    
    w_hidden_output = np.random.rand(n_output_layer,n_hidden_layer)/(n_hidden_layer * n_output_layer)
    normW2 = np.sqrt(np.diag(w_hidden_output.dot(w_hidden_output.T)))
    normW2 = normW2.reshape(n_output_layer, -1)
    w_hidden_output = w_hidden_output/normW2
    
    bias_W1 = np.zeros((n_hidden_layer))
    bias_W2 = np.zeros((n_output_layer))


    # YOUR CODES ENDS HERE

    # Network Parameters
    epsilon_0 = 0.2   #epsilon for the e-greedy policy
    beta = 0.00005    #epsilon discount factor
    gamma = 0.85      #SARSA Learning discount factor
    eta = 0.0035      #learning rate
    N_episodes = 40000 #Number of games, each game ends when we have a checkmate or a draw
    alpha = 1/10000
    ###  Training Loop  ###

    # Directions: down, up, right, left, down-right, down-left, up-right, up-left
    # Each row specifies a direction, 
    # e.g. for down we need to add +1 to the current row and +0 to current column
    map = np.array([[1, 0],
                    [-1, 0],
                    [0, 1],
                    [0, -1],
                    [1, 1],
                    [1, -1],
                    [-1, 1],
                    [-1, -1]])
    
    # THE FOLLOWING VARIABLES COULD CONTAIN THE REWARDS PER EPISODE AND THE
    # NUMBER OF MOVES PER EPISODE, FILL THEM IN THE CODE ABOVE FOR THE
    # LEARNING. OTHER WAYS TO DO THIS ARE POSSIBLE, THIS IS A SUGGESTION ONLY.    

#    R_save = np.zeros([N_episodes, 1])
    R_save = np.zeros([N_episodes+1, 1])
    N_moves_save = np.zeros([N_episodes+1, 1])
    
    # END OF SUGGESTIONS
    

    for n in tqdm(range(N_episodes)):
#    for n in (range(N_episodes)):
        epsilon_f = epsilon_0 / (1 + beta * n) #psilon is discounting per iteration to have less probability to explore
        checkmate = 0  # 0 = not a checkmate, 1 = checkmate
        draw = 0  # 0 = not a draw, 1 = draw
        i = 1  # counter for movements

        # Generate a new game
        s, p_k2, p_k1, p_q1 = generate_game(size_board)

        # Possible actions of the King
        dfK1, a_k1, _ = degree_freedom_king1(p_k1, p_k2, p_q1, s)
        # Possible actions of the Queen
        dfQ1, a_q1, dfQ1_ = degree_freedom_queen(p_k1, p_k2, p_q1, s)
        # Possible actions of the enemy king
        dfK2, a_k2, check = degree_freedom_king2(dfK1, p_k2, dfQ1_, s, p_k1)
        
        
        Start = np.array([np.random.randint(size_board),np.random.randint(size_board)])   #random start
        s_start = np.ravel_multi_index(Start,dims=(size_board,size_board),order='F')      #conversion in single index
        s_index = s_start

        while checkmate == 0 and draw == 0:
            R = 0  # Reward

            # Player 1

            # Actions & allowed_actions
            a = np.concatenate([np.array(a_q1), np.array(a_k1)])
            allowed_a = np.where(a > 0)[0]
#            print(a)
#            print(allowed_a)
            # Computing Features
            x = features(p_q1, p_k1, p_k2, dfK2, s, check)

            # FILL THE CODE 
            # Enter inside the Q_values function and fill it with your code.
            # You need to compute the Q values as output of your neural
            # network. You can change the input of the function by adding other
            # data, but the input of the function is suggested. 
            
#            states_matrix = np.eye(size_board*size_board)
    
#            input_matrix = states_matrix[:,s_index].reshape((size_board*size_board),1)
            
            Q, out1 = Q_values(x, w_input_hidden, w_hidden_output, bias_W1, bias_W2)
#            print(Q)
#            print(np.argsort(-Q))
#            print(len(Q))
            """
            YOUR CODE STARTS HERE
            
            FILL THE CODE
            Implement epsilon greedy policy by using the vector a and a_allowed vector: be careful that the action must
            be chosen from the a_allowed vector. The index of this action must be remapped to the index of the vector a,
            containing all the possible actions. Create a vector called a_agent that contains the index of the action 
            chosen. For instance, if a_allowed = [8, 16, 32] and you select the third action, a_agent=32 not 3.
            """
            
            greedy = (np.random.rand() > epsilon_f)
            
            if greedy:
#                a_agent = np.random.choice(allowed_a)

                max_sort = np.argsort(-Q)
                for i in max_sort:
                    if i in allowed_a:
                        a_agent = i
                        break
                    else:
                        a_agent = np.random.choice(allowed_a)
#                if np.argmax(Q) in allowed_a:
#                    a_agent = np.argmax(Q)
#                else:
#                a_agent = np.argmax(Q)
                    
            else:
                a_agent = np.random.choice(allowed_a)
#                a_agent = a.index(a_agent)
                
#            if action in allowed_a:
#                a_agent = 

#            a_agent = 1  # CHANGE THIS VALUE BASED ON YOUR CODE TO USE EPSILON GREEDY POLICY
            
            #THE CODE ENDS HERE. 

#            print(a_agent)

            # Player 1 makes the action
            if a_agent < possible_queen_a:
                direction = int(np.ceil((a_agent + 1) / (size_board - 1))) - 1
                steps = a_agent - direction * (size_board - 1) + 1

                s[p_q1[0], p_q1[1]] = 0
                mov = map[direction, :] * steps
                s[p_q1[0] + mov[0], p_q1[1] + mov[1]] = 2
                p_q1[0] = p_q1[0] + mov[0]
                p_q1[1] = p_q1[1] + mov[1]

            else:
                direction = a_agent - possible_queen_a
                steps = 1

                s[p_k1[0], p_k1[1]] = 0
                mov = map[direction, :] * steps
                s[p_k1[0] + mov[0], p_k1[1] + mov[1]] = 1
                p_k1[0] = p_k1[0] + mov[0]
                p_k1[1] = p_k1[1] + mov[1]

            # Compute the allowed actions for the new position

            # Possible actions of the King
            dfK1, a_k1, _ = degree_freedom_king1(p_k1, p_k2, p_q1, s)
            # Possible actions of the Queen
            dfQ1, a_q1, dfQ1_ = degree_freedom_queen(p_k1, p_k2, p_q1, s)
            # Possible actions of the enemy king
            dfK2, a_k2, check = degree_freedom_king2(dfK1, p_k2, dfQ1_, s, p_k1)

            # Player 2

            # Check for draw or checkmate
            if np.sum(dfK2) == 0 and dfQ1_[p_k2[0], p_k2[1]] == 1:
                # King 2 has no freedom and it is checked
                # Checkmate and collect reward
                checkmate = 1
                R = 1  # Reward for checkmate
                t = R + (gamma * max(Q))

                """
                FILL THE CODE
                Update the parameters of your network by applying backpropagation and Q-learning. You need to use the 
                rectified linear function as activation function (see supplementary materials). Exploit the Q value for 
                the action made. You computed previously Q values in the Q_values function. Be careful: this is the last 
                iteration of the episode, the agent gave checkmate.
                """

                deltaOut = (t-Q) * np.heaviside(Q, 0)
                w_hidden_output += eta * np.outer(deltaOut, out1)
                bias_W2 = eta * deltaOut
                
                deltaHid = np.dot(deltaOut,w_hidden_output) * np.heaviside(out1, 0)
                w_input_hidden = w_input_hidden + eta * np.outer(deltaHid, x)
                bias_W1 = eta * deltaHid
                
                R_save[n+1, 0] = alpha * R + (1-alpha) * R_save[n, 0]
                N_moves_save[n+1, 0] = alpha * i + (1-alpha) * N_moves_save[n, 0]
                # THE CODE ENDS HERE

                if checkmate:
                    break

            elif np.sum(dfK2) == 0 and dfQ1_[p_k2[0], p_k2[1]] == 0:
                # King 2 has no freedom but it is not checked
                draw = 1
                R = 0.1
#                print(Q)
                t = R + (gamma * max(Q))
                
                """
                FILL THE CODE
                Update the parameters of your network by applying backpropagation and Q-learning. You need to use the 
                rectified linear function as activation function (see supplementary materials). Exploit the Q value for 
                the action made. You computed previously Q values in the Q_values function. Be careful: this is the last 
                iteration of the episode, it is a draw.
                """

                deltaOut = (t-Q) * np.heaviside(Q, 0)
                w_hidden_output += eta * np.outer(deltaOut, out1)
                bias_W2 = eta * deltaOut
                
                deltaHid = np.dot(deltaOut,w_hidden_output) * np.heaviside(out1, 0)
                w_input_hidden = w_input_hidden + eta * np.outer(deltaHid, x)
                bias_W1 = eta * deltaHid
                
                R_save[n+1, 0] = alpha * R + (1-alpha) * R_save[n, 0]
                N_moves_save[n+1, 0] = alpha * i + (1-alpha) * N_moves_save[n, 0]

                # YOUR CODE ENDS HERE
                

                if draw:
                    break

            else:
                # Move enemy King randomly to a safe location
                allowed_enemy_a = np.where(a_k2 > 0)[0]
                a_help = int(np.ceil(np.random.rand() * allowed_enemy_a.shape[0]) - 1)
                a_enemy = allowed_enemy_a[a_help]

                direction = a_enemy
                steps = 1

                s[p_k2[0], p_k2[1]] = 0
                mov = map[direction, :] * steps
                s[p_k2[0] + mov[0], p_k2[1] + mov[1]] = 3

                p_k2[0] = p_k2[0] + mov[0]
                p_k2[1] = p_k2[1] + mov[1]

            # Update the parameters

            # Possible actions of the King
            dfK1, a_k1, _ = degree_freedom_king1(p_k1, p_k2, p_q1, s)
            # Possible actions of the Queen
            dfQ1, a_q1, dfQ1_ = degree_freedom_queen(p_k1, p_k2, p_q1, s)
            # Possible actions of the enemy king
            dfK2, a_k2, check = degree_freedom_king2(dfK1, p_k2, dfQ1_, s, p_k1)
            # Compute features
            x_next = features(p_q1, p_k1, p_k2, dfK2, s, check)
            # Compute Q-values for the discounted factor
#            Q_next = Q_values(x_next, W1, W2, bias_W1, bias_W2)
            Q_next, demon = Q_values(x_next, w_input_hidden, w_hidden_output, bias_W1, bias_W2)
            t = R + (gamma * max(Q_next))
            """
            FILL THE CODE
            Update the parameters of your network by applying backpropagation and Q-learning. You need to use the 
            rectified linear function as activation function (see supplementary materials). Exploit the Q value for 
            the action made. You computed previously Q values in the Q_values function. Be careful: this is not the last 
            iteration of the episode, the match continues.
            """

            deltaOut = (t-Q) * np.heaviside(Q, 0)
            w_hidden_output += eta * np.outer(deltaOut, out1)
            bias_W2 = eta * deltaOut
                
            deltaHid = np.dot(deltaOut,w_hidden_output) * np.heaviside(out1, 0)
            w_input_hidden = w_input_hidden + eta * np.outer(deltaHid, x_next)
            bias_W1 = eta * deltaHid

            # YOUR CODE ENDS HERE
            i += 1
#        print(R)
        R_save[n+1, 0] = alpha * R + (1-alpha) * R_save[n, 0]
        N_moves_save[n+1, 0] = alpha * i + (1-alpha) * N_moves_save[n, 0]
    
    return R_save, N_moves_save
 def test_heaviside_array(self):
     values = np.array([-1., 0., 0., +1.])
     halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
     assert np.all(np.heaviside(values * u.m,
                                halfway * u.dimensionless_unscaled) ==
                   [0, 0.25, 0.75, +1.] * u.dimensionless_unscaled)
 def heaviside(x):
     return np.heaviside(x, .5)