Exemplo n.º 1
0
def construct_numpy_representation_with_pairs_of_rankings(
        features: pd.DataFrame,
        performances: pd.DataFrame,
        max_pairs_per_instance=100,
        seed=1,
        order="asc",
        skip_value=None):
    """Get numpy representation of features, performances and rankings

    Arguments:
        features {pd.DataFrame} -- Feature values
        performances {pd.DataFrame} -- Performances of algorithms

    Returns:
        [type] -- Triple of numpy ndarrays, first stores the feature
        values, the second stores the algorithm performances and the
        third stores the algorithm rankings
    """
    labels, ranks = sample_pairs(performances,
                                 pairs_per_instance=max_pairs_per_instance,
                                 seed=seed,
                                 skip_value=skip_value)
    joined = labels.join(features).join(performances,
                                        lsuffix="_rank",
                                        rsuffix="_performance")
    np_features = joined[features.columns.values].values
    np_performances = joined[[x for x in performances.columns]].values
    np_labels = joined[[x for x in labels.columns]].values + 1
    np_performances = np_performances[
        np.arange(np_performances.shape[0])[:, np.newaxis], np_labels - 1]
    if order == "desc":
        np_labels = np.flip(np_labels, axis=1)
        np_performances = np.flip(np_performances, axis=1)

    return np_features, np_performances, np_labels
Exemplo n.º 2
0
    def __init__(self, components, center_of_mass_shifts=None):
        """Generate a FK solver from link and joint instances."""
        joint_indexes = [
            i for i, c in enumerate(components) if isinstance(c, Joint)
        ]

        if center_of_mass_shifts != None and len(
                center_of_mass_shifts) != len(components) - len(joint_indexes):
            #if len(components)-len(joint_indexes) != len(joint_indexes)+1:
            #    raise Exception("Invalid Actuator: Each joint must be between two links, and to N joints needs N+1 links.")
            raise Exception(
                "Invalid Actuator: All links needs a CoM shift position.")

        def matrices(angles):
            joints = dict(zip(joint_indexes, angles))
            a = [joints.get(i, None) for i in range(len(components))]
            return [c.matrix(a[i]) for i, c in enumerate(components)]

        self._com_shifts = []
        if center_of_mass_shifts != None:
            self._com_shifts = np.flip(np.array(center_of_mass_shifts))
            self._com_shifts = np.concatenate(
                (center_of_mass_shifts,
                 np.array([[1] * len(center_of_mass_shifts)]).transpose()),
                axis=1)
        self._matrices = matrices
        self._types = [0] * len(components)
        for it in joint_indexes:
            self._types[it] = 1

        self._types = np.flip(self._types)
    def calculate_Fi_ci_si(self):
        ''' Calculate simple calculation of Fi, ci, si: does not include any CPV effects, not time dependece '''
        bin_num = self.binning.get_number_of_bins()
        Fi = np.array([])
        ci = np.array([])
        si = np.array([])

        A_mag = abs(self.amplitude.get_A(
            0))  # Just make simple calculation in this class
        A_ph = np.angle(self.amplitude.get_A(0))
        A_mag_inv = np.transpose(A_mag)
        A_ph_inv = np.transpose(A_ph)

        avg_eff_over_phsp = self.efficiency.get_time_averaged_eff()
        for i in range(-bin_num, bin_num + 1):
            if i == 0: continue
            bin_idx = self.binning.get_bin_indices(i)
            inv_bin_idx = self.binning.get_bin_indices(-i)
            avg_eff = avg_eff_over_phsp[bin_idx]
            Fi = np.append(Fi, np.sum(avg_eff * A_mag[bin_idx]**2))
            ci = np.append(
                ci,
                np.sum(avg_eff * A_mag[bin_idx] * A_mag_inv[bin_idx] *
                       np.cos(A_ph[bin_idx] - A_ph_inv[bin_idx])))
            si = np.append(
                si,
                np.sum(avg_eff * A_mag[bin_idx] * A_mag_inv[bin_idx] *
                       np.sin(A_ph[bin_idx] - A_ph_inv[bin_idx])))

        Fi_inv = np.flip(Fi, 0)
        ci = ci / np.sqrt(Fi * np.flip(Fi, 0))
        si = si / np.sqrt(Fi * np.flip(Fi, 0))
        Fi = Fi / sum(Fi)

        return Fi, ci, si
Exemplo n.º 4
0
    def render(self, x, u=None):
        image = self.render_sub(self.frame_buffer, x)

        # Change upside-down
        image = np.flip(image, 0)

        return image
Exemplo n.º 5
0
    def time_derivative_of_interpolatated_path(self, q_n_i, q_n_plus_1_i, t_lower, t_upper, t):
        """
        Interpolate the path of a single q degree of freedom.
        This function should be evaluated for each DOF in the solution separately.
        """

        # path = q_n_i + (q_n_plus_1_i - q_n_i) * 1.0 / (t_upper - t_lower)
        # return path

        if hasattr(q_n_i, '_value'):
            q_n_i_unpacked = q_n_i._value
        else:
            q_n_i_unpacked = q_n_i

        if hasattr(q_n_plus_1_i, '_value'):
            q_n_plus_1_i_unpacked = q_n_plus_1_i._value
        else:
            q_n_plus_1_i_unpacked = q_n_plus_1_i

        coefficients = np.flip(
            np.polyfit([t_lower, t_upper], [q_n_i_unpacked, q_n_plus_1_i_unpacked], self.order_of_integrator))
        coefficients_of_differentiated_path = [0 for i in coefficients]
        for index, c in enumerate(coefficients):
            if index < len(coefficients) - 1:
                coefficients_of_differentiated_path[index] = coefficients[index + 1] * (index + 1)
            else:
                coefficients_of_differentiated_path[index] = 0

        return self.evaluate_polynomial(coefficients_of_differentiated_path, t)
    def __init__(self,
                 KS_amplitude_file,
                 efficiency=None,
                 binning_file="input/KsPiPi_optimal.pickle"):
        A_KS, s12, s13 = uf.load_amplitude(KS_amplitude_file)
        self.amplitude = Amplitude(A_KS, s12, s13)
        self.s12 = s12
        self.s13 = s13

        bin_def, bin_def_s12, bin_def_s13 = uf.load_binning(binning_file)
        self.binning = Binning(bin_def, bin_def_s12, bin_def_s13, s12, s13)

        if efficiency == None:
            efficiency = DefaultEfficiency(s12, s13)

        self.efficiency = efficiency

        self.Fi, self.ci, self.si = self.calculate_Fi_ci_si()
        self.Fi_inv = np.flip(self.Fi, 0)
        self.sqrt_Fi_Fi_inv = np.sqrt(self.Fi * self.Fi_inv)

        self.bin_num = self.binning.get_number_of_bins()

        self.channel_num = 1  # only fit one Dh channel

        return
Exemplo n.º 7
0
def construct_numpy_representation_with_ordered_pairs_of_rankings_and_features_and_weights(
        features: pd.DataFrame,
        performances: pd.DataFrame,
        max_pairs_per_instance=100,
        seed=1,
        order="asc",
        skip_value=None):
    """Get numpy representation of features, performances and rankings

    Arguments:
        features {pd.DataFrame} -- Feature values
        performances {pd.DataFrame} -- Performances of algorithms

    Returns:
        [type] -- Triple of numpy ndarrays, first stores the feature
        values, the second stores the algirhtm performances and the
        third stores the algorithm rankings
    """
    rankings, weights = sample_pairs(performances,
                                     pairs_per_instance=max_pairs_per_instance,
                                     seed=seed,
                                     skip_value=skip_value)
    joined = rankings.join(features).join(performances,
                                          lsuffix="_rank",
                                          rsuffix="_performance")
    np_features = joined[features.columns.values].values
    np_performances = joined[[x for x in performances.columns]].values
    np_rankings = joined[[x for x in rankings.columns]].values + 1
    np_performances = np_performances[
        np.arange(np_performances.shape[0])[:, np.newaxis], np_rankings - 1]
    max_len = len(performances.columns)
    print("performances", performances.head())
    print("labels", rankings.head())
    print("weight", weights.head())
    np_weights = weights.to_numpy()
    np_weights = np.amax(np_weights, axis=1)
    # print("np_weights", np_weights)
    np_weights = np.exp2(np_weights)
    # print("exp np_weights", np_weights)

    # TODO check for maximization problems
    if order == "desc":
        np_rankings = np.flip(np_rankings, axis=1)
        np_performances = np.flip(np_performances, axis=1)

    return np_features, np_performances, np_rankings, np_weights
Exemplo n.º 8
0
def apply_carb_curve(column):
    assert column.ctype == "carb"

    coeffs = carb_curve(Wtime, column.meta["delay"] / 5,
                        column.meta["duration"] / 5)
    coeffs = np.flip(coeffs, 0)

    return column.series.rolling(window=Whoriz).apply(
        lambda ucis: np.dot(ucis, coeffs), raw=True)
Exemplo n.º 9
0
def flip(var, axis=[0], backend='autograd'):
    if backend == 'autograd':
        return anp.flip(var, axis=axis)
    elif backend == 'pytorch':
        try:
            _ = len(axis)
            return tc.flip(var, dims=axis)
        except:
            return tc.flip(var, dims=[axis])
Exemplo n.º 10
0
def construct_numpy_representation_with_ordered_pairs_of_rankings_and_features(
        features: pd.DataFrame,
        performances: pd.DataFrame,
        max_pairs_per_instance=100,
        seed=1,
        order="asc",
        skip_value=None):
    """Get numpy representation of features, performances and rankings

    Arguments:
        features {pd.DataFrame} -- Feature values
        performances {pd.DataFrame} -- Performances of algorithms
        max_pairs_per_instance {pd.DataFrame} -- Upper bound for sampled pairs per instance
        seed {pd.DataFrame} -- Seed used for sampling randomly
        order {pd.DataFrame} -- Takes values "asc" and "desc", to decide whether pairs are in ascending or descending order
        skip_value {pd.DataFrame} -- Pairs containing this value are being skipped during sampling, the intended use is ignoring timed out algorithm runs in the context of algorithm selection

    Returns:
        [type] -- Triple of numpy ndarrays, first stores the feature
        values, the second stores the algirhtm performances and the
        third stores the algorithm rankings
    """
    rankings, weights = sample_pairs(performances,
                                     pairs_per_instance=max_pairs_per_instance,
                                     seed=seed,
                                     skip_value=skip_value)
    joined = rankings.join(features).join(performances,
                                          lsuffix="_rank",
                                          rsuffix="_performance")
    np_features = joined[features.columns.values].values
    np_performances = joined[[x for x in performances.columns]].values
    np_rankings = joined[[x for x in rankings.columns]].values + 1
    np_performances = np_performances[
        np.arange(np_performances.shape[0])[:, np.newaxis], np_rankings - 1]
    # TODO check for maximization problems
    if order == "desc":
        np_rankings = np.flip(np_rankings, axis=1)
        np_performances = np.flip(np_performances, axis=1)

    return np_features, np_performances, np_rankings
Exemplo n.º 11
0
def attribute_parameters(curve, index, values, nparam=24, nperiod=288):
    assert curve.shape == (nperiod, ), f"bad curve shape {curve.shape}"

    roll = np.zeros([nperiod, nperiod], dtype=int)
    curve_index = np.flip(np.arange(nperiod))
    for i in range(roll.shape[0]):
        # Shape the curve at period i.
        roll[i, :] = np.roll(curve_index, i + 1)
    X = curve[roll]

    for i, ivs in enumerate(index_to_intervals(index, nperiod=nperiod)):
        for beg, end in ivs:
            X[:, beg:end] *= values[i]  # np.sum(X_rolled[:, beg:end], axis=1)
    x = np.sum(X, axis=1)
    x = np.mean(np.reshape(x, (nparam, nperiod // nparam)), axis=1)
    return x
Exemplo n.º 12
0
 def center_of_mass_parts(self, angles):
     if (len(self._com_shifts) != 0):
         points = np.array([[0.], [0.], [0.], [1.]])
         it = 0
         for i, mat in enumerate(reversed(self._matrices(angles))):
             points = np.dot(mat, points)
             if (self._types[i] == 0):  # link
                 points = np.concatenate(
                     (points, np.array([self._com_shifts[it]]).transpose()),
                     axis=1)
                 it += 1
         return np.flip(points.transpose()[1:, :3], axis=0)
     else:
         raise Exception(
             "Paramters not found: Insert center of mass of actuator links on Actuator constructor."
         )
Exemplo n.º 13
0
def test_fwdAddLKParamComp():
	parser = argparse.ArgumentParser()
	parser.add_argument("img")

	preprocess = transforms.Compose([
		transforms.ToTensor(),
	])

	args = parser.parse_args()

	img = Image.open(args.img)
	img_w, img_h = img.size
	aspect = img_w / img_h
	img_h_sm = 200
	img_w_sm = ceil(aspect * img_h_sm)

	img_tens = preprocess(img.resize((img_w_sm, img_h_sm)))
	img_tens = torch.unsqueeze(img_tens, 0)

	p_gt = torch.FloatTensor([[
		[-.1],[0],[10],[0],[-.1],[10],[0],[0]
		]])
	
	img_tens_w, mask_tens_w = dlk.warp_hmg(img_tens, p_gt)

	# transforms.ToPILImage()(img_tens[0,:,:,:]).show()
	# transforms.ToPILImage()(img_tens_w[0,:,:,:]).show()

	# p_falk = fwdAddLK(img_tens, img_tens_w, 1e-3)

	mot_par = torch.FloatTensor([
		[[-.05],[0.01],[5],[0],[-.05],[5],[0.0006],[0.0005]],
		[[0],[0],[2],[0],[0],[2],[0],[0]],
		[[0],[0],[0],[0],[0],[0],[0],[0]],
		])

	q_ind = 2

	mot_par_falkpc = fwdAddLKParamComp(img_tens, img_tens_w, 1e-3, mot_par, q_ind)

	print(mot_par_falkpc)

	print(reduce(np.dot, np.flip(dlk.param_to_H(mot_par).numpy(), axis=0)))
Exemplo n.º 14
0
def apply_insulin_curve(column):
    assert column.ctype in ["insulin", "basal", "bolus"]

    coeffs = expia1(
        Wtime,
        column.meta["delay"] / 5,
        column.meta["peak"] / 5,
        column.meta["duration"] / 5,
    )
    # We flip them because we're going to be applying these to
    # "trailing" data.
    coeffs = np.flip(coeffs, 0)

    # ia indicates insulin activity. This is computed by adding up
    # the contributions of each delivery over a rolling window.
    # Conveniently, this is equivalent to taking the dot product of
    # the deliveries in the window with the coefficients computed
    # above.
    return column.series.rolling(window=Whoriz).apply(
        lambda pids: np.dot(pids, coeffs), raw=True)
Exemplo n.º 15
0
    def _get_weight_matrix(self, tt, i, store=True):
        """
        Weight matrix for the trapezoidal rule.
        """
        try:
            return self._weights[i]

        except:

            def _get_weights(tt):
                tt = np.asarray(tt)
                W = np.zeros((tt.size, tt.size))
                h = np.diff(tt)
                for i in range(len(tt)):
                    W[i, :i] += .5 * h[:i]
                    W[i, 1:i+1] += .5 * h[:i]
                return W

            ttb = tt[:i+1]
            ttf = tt[i:]

            Wb = _get_weights([t for t in reversed(ttb)])
            Wb = np.flip(Wb, (0, 1))
            Wf = _get_weights(ttf)

            W = np.zeros((len(tt), len(tt)))
            W[:i+1, :i+1] += Wb
            W[i:, i:] += Wf

            if store:
                if hasattr(self, '_weights'):
                    self._weights[i] = W
                else:
                    self._weights = {i: W}

            return W
Exemplo n.º 16
0
    def _get_weight_matrix(self, tt, i, store=True):
        """
        Weight matrix for the trapezoidal rule.
        """
        try:
            return self._weights[i]

        except:

            def _get_weights(tt):
                tt = np.asarray(tt)
                W = np.zeros((tt.size, tt.size))
                h = np.diff(tt)
                for i in range(len(tt)):
                    W[i, :i] += .5 * h[:i]
                    W[i, 1:i + 1] += .5 * h[:i]
                return W

            ttb = tt[:i + 1]
            ttf = tt[i:]

            Wb = _get_weights([t for t in reversed(ttb)])
            Wb = np.flip(Wb, (0, 1))
            Wf = _get_weights(ttf)

            W = np.zeros((len(tt), len(tt)))
            W[:i + 1, :i + 1] += Wb
            W[i:, i:] += Wf

            if store:
                if hasattr(self, '_weights'):
                    self._weights[i] = W
                else:
                    self._weights = {i: W}

            return W
Exemplo n.º 17
0
def transpose_indices(indices):
    # returns the transposed indices for transpose sparse matrix creation
    return npa.flip(indices, axis=0)
 def set_Fi(self, Fi):
     self.Fi = Fi
     self.Fi_inv = np.flip(self.Fi, 0)
     self.sqrt_Fi_Fi_inv = np.sqrt(self.Fi * self.Fi_inv)
     self.bin_num = len(Fi) / 2
Exemplo n.º 19
0
        'model accuracy:',
        np.mean(
            np.equal(
                np.argmax(
                    forward(params, inputs=inputs, hps=hps)[-1],
                    axis=1,
                ), labels)))

    g = 20
    m1, m2 = [-1, 2]
    mesh = np.array(np.meshgrid(np.linspace(m1, m2, g),
                                np.linspace(m1, m2, g))).reshape(2, g * g).T

    pred_ax = plt.subplot(gs[0, 1])
    pred_ax.imshow(
        np.flip(forward(params, inputs=mesh, hps=hps)[-1][:, 0].reshape(g, g),
                axis=0),
        extent=[m1, m2, m1, m2],
        cmap='binary',
    )
    pred_ax.scatter(*params['input']['hidden']['bias'],
                    s=np.abs(params['hidden']['output']['weights'][:, 0]) *
                    200,
                    c=[
                        'red' if w < 0 else 'black'
                        for w in params['hidden']['output']['weights'][:, 0]
                    ])
    pred_ax.scatter(*inputs.T, c=[cm[l] for l in labels], alpha=.05)

    pred_ax.set_xticks([])
    pred_ax.set_yticks([])
Exemplo n.º 20
0
def test_img_seq(args):

	img_sz = 200
	img_tens = open_img_as_tens(args.img, img_sz)
	num_seq = 3

	# size scale range
	min_scale = 0.9
	max_scale = 1.1

	# rotation range (-angle_range, angle_range)
	angle_range = 4 # degrees

	# projective variables (p7, p8)
	projective_range = 0

	# translation (p3, p6)
	translation_range = 8 # pixels

	p_gt = torch.zeros(num_seq, 8, 1)

	p_gt[0, :, :] = torch.FloatTensor([[-0.4,0,0,0,-0.4,0,0,0]]).t()

	for i in range(p_gt.shape[0] - 1):
		p_gt[i + 1, :, :] = gen_rand_p(min_scale, max_scale, angle_range, projective_range, translation_range)

	p_gt_comp = np.zeros((num_seq, 8, 1))

	for i in range(p_gt_comp.shape[0]):
		H_gt = p_to_H(p_gt[0 : i + 1, :, :])
		H_gt_comp_i = reduce(np.dot, np.flip(H_gt, axis=0))
		p_gt_comp_i = H_to_p(H_gt_comp_i)
		p_gt_comp[i] = p_gt_comp_i

	I = torch.zeros(img_tens.shape)
	I = np.tile(I, (num_seq, 1, 1, 1))

	for i in range(p_gt_comp.shape[0]):
		img_tens_w, _ = dlk.warp_hmg(img_tens, torch.from_numpy(p_gt_comp[i : i + 1, :, :]).float())
		I[i, :, :, :] = img_tens_w

	P = np.zeros((num_seq - 1, 8, 1))

	T = np.array([num_seq - 1])

	V = np.array(
		[
		 np.arange(50),
		 np.arange(num_seq - 1),
		]
	)		

	V = np.delete(V, 0)

	# T = np.array([2, 5, 8])

	# V = np.array(
	# 	[
	# 	 np.arange(50),
	# 	 np.array([0, 1, 3, 4]),
	# 	 np.array([3, 4, 6, 7]),
	# 	 np.array([6, 7, 9, 10, 11])
	# 	]
	# )	
	# V = np.delete(V, 0)

	tol = 1e-4

	P_opt_dep = optimize(I, P, T, V, tol, 1)

	# T = np.array([1])

	# V = np.array(
	# 	[
	# 	 np.arange(50),
	# 	 np.array([0, 2]),
	# 	]
	# )		

	# V = np.delete(V, 0)

	# T = np.array([1, 3, 5, 7, 9, 10])

	# V = np.array(
	# 	[
	# 	 np.arange(50),
	# 	 np.array([0, 2]),
	# 	 np.array([2, 4]),
	# 	 np.array([4, 6]),
	# 	 np.array([6, 8]),
	# 	 np.array([8, 10]),
	# 	 np.array([11])
	# 	]
	# )
	# V = np.delete(V, 0)

	# P_opt_odom = optimize(I, P, T, V, tol, 1)

	print('')
	print('Corners Dep Calc:')
	loss_dep = corner_loss(P_opt_dep, p_gt[1:, :, :], img_sz)

	print('')
	print('Corners No-op Calc:')
	loss_noop = corner_loss(P, p_gt[1:, :, :], img_sz)

	# print('')
	# print('Corner Odom Calc:')
	# loss_odom = corner_loss(P_opt_odom, p_gt[1:, :, :], img_sz)

	print('')
	print('loss noop: {:.3f}'.format(loss_noop))

	print('')
	print('loss dep: {:.3f}'.format(loss_dep))

	# print('')
	# print('loss odom: {:.3f}'.format(loss_odom))

	print('')
	print('P_opt_dep:')
	print(P_opt_dep)

	# print('P_opt_odom:')
	# print(P_opt_odom)

	print('')
	print('P_gt:')
	print(p_gt[1:,:,:].numpy())

	plt.figure()

	for i in range(num_seq):
		plt.subplot(2, num_seq, i + 1)

		plt.imshow(plt_axis_match_np(I[i, :, :, :]))
		plt.title('I[{:d}]'.format(i))

		plt.subplot(2, num_seq, i + 1 + num_seq)
		plt.title('I_LK[{:d}]'.format(i))

		if (i == 0):
			I_w = torch.from_numpy(I[i : i + 1, :, :, :])
		else:
			I_w, _ = dlk.warp_hmg(I_w, torch.from_numpy(P_opt_dep[i - 1 : i, :, :]).float())

		plt.imshow(plt_axis_match_tens(I_w[0, :, :, :]))

	plt.show()
Exemplo n.º 21
0
    def populate_coordinates(self):
        # Populates a variable called self.coordinates with the coordinates of the airfoil.
        name = self.name.lower().strip()

        # If it's a NACA 4-series airfoil, try to generate it
        if "naca" in name:
            nacanumber = name.split("naca")[1]
            if nacanumber.isdigit():
                if len(nacanumber) == 4:

                    # Parse
                    max_camber = int(nacanumber[0]) * 0.01
                    camber_loc = int(nacanumber[1]) * 0.1
                    thickness = int(nacanumber[2:]) * 0.01

                    # Set number of points per side
                    n_points_per_side = 100

                    # Referencing https://en.wikipedia.org/wiki/NACA_airfoil#Equation_for_a_cambered_4-digit_NACA_airfoil
                    # from here on out

                    # Make uncambered coordinates
                    x_t = cosspace(n_points=n_points_per_side)  # Generate some cosine-spaced points
                    y_t = 5 * thickness * (
                            + 0.2969 * np.power(x_t, 0.5)
                            - 0.1260 * x_t
                            - 0.3516 * np.power(x_t, 2)
                            + 0.2843 * np.power(x_t, 3)
                            - 0.1015 * np.power(x_t, 4) #0.1015 is original, #0.1036 for sharp TE
                    )

                    if camber_loc == 0:
                        camber_loc = 0.5  # prevents divide by zero errors for things like naca0012's.

                    # Get camber
                    y_c_piece1 = max_camber / camber_loc ** 2 * (
                            2 * camber_loc * x_t[x_t <= camber_loc]
                            - x_t[x_t <= camber_loc] ** 2
                    )
                    y_c_piece2 = max_camber / (1 - camber_loc) ** 2 * (
                            (1 - 2 * camber_loc) +
                            2 * camber_loc * x_t[x_t > camber_loc]
                            - x_t[x_t > camber_loc] ** 2
                    )
                    y_c = np.hstack((y_c_piece1, y_c_piece2))

                    # Get camber slope
                    dycdx_piece1 = 2 * max_camber / camber_loc ** 2 * (
                            camber_loc - x_t[x_t <= camber_loc]
                    )
                    dycdx_piece2 = 2 * max_camber / (1 - camber_loc) ** 2 * (
                            camber_loc - x_t[x_t > camber_loc]
                    )
                    dycdx = np.hstack((dycdx_piece1, dycdx_piece2))
                    theta = np.arctan(dycdx)

                    # Combine everything
                    x_U = x_t - y_t * np.sin(theta)
                    x_L = x_t + y_t * np.sin(theta)
                    y_U = y_c + y_t * np.cos(theta)
                    y_L = y_c - y_t * np.cos(theta)

                    # Flip upper surface so it's back to front
                    x_U, y_U = np.flip(x_U), np.flip(y_U)

                    # Trim 1 point from lower surface so there's no overlap
                    x_L, y_L = x_L[1:], y_L[1:]

                    x = np.hstack((x_U, x_L))
                    y = np.hstack((y_U, y_L))

                    coordinates = np.column_stack((x, y))

                    self.coordinates = coordinates
                    return
                else:
                    print("Unfortunately, only 4-series NACA airfoils can be generated at this time.")

        # Try to read from airfoil database
        try:
            import importlib.resources
            from . import airfoils
            raw_text = importlib.resources.read_text(airfoils, name + '.dat')
            trimmed_text = raw_text[raw_text.find('\n'):]

            coordinates1D = np.fromstring(trimmed_text, sep='\n')  # returns the coordinates in a 1D array
            assert len(
                coordinates1D) % 2 == 0, 'File was found in airfoil database, but it could not be read correctly!'  # Should be even

            coordinates = np.reshape(coordinates1D, (-1, 2))
            self.coordinates = coordinates
            return

        except FileNotFoundError:
            print("File was not found in airfoil database!")
Exemplo n.º 22
0
    xx, yy = np.meshgrid(np.linspace(m1, m2, g), np.linspace(m1, m2, g))
    mesh = np.array([xx, yy]).reshape(2, g * g).T

    fig = plt.figure(figsize=[8, 4])
    gs = GridSpec(1, 2)

    hidden_activation, output_activation = forward(params,
                                                   inputs=mesh,
                                                   hps=hps)

    ##__Surface Plot
    surface_ax = plt.subplot(gs[:, 0], projection='3d')
    surface_ax.plot_surface(
        xx,
        yy,
        np.flip(hidden_activation.sum(axis=1).reshape(g, g), axis=0),
        alpha=.5,
        cmap='viridis',
    )
    clean3d(surface_ax)
    surface_ax.set_title('Surface')

    ##__Flat Plot
    flat_ax = plt.subplot(gs[:, 1])
    flat_ax.imshow(
        np.flip(hidden_activation.sum(axis=1).reshape(g, g), axis=0),
        cmap='viridis',
        extent=[m1, m2, m1, m2],
    )
    flat_ax.scatter(*inputs.T,
                    c=['purple' if l == 0 else 'orange' for l in labels])
Exemplo n.º 23
0
 def __init__(self, bins, Fs, cs):
     self.sort_bins(bins)
     self.Fs = Fs
     self.F_inv = np.flip(self.Fs)
     self.cs = cs
Exemplo n.º 24
0
def calculate_states(data, N_DIMENSIONS=2, METRIC='rank', SINGLE_SESSION_FIT= False, SAVE_STATES=False):
    data = data.copy()
    data['idx'] = data['mouse']+str(data['date'])
    data['rt'] = data['response_times'] - data['goCue_trigger_times']
    data['it'] = data['goCue_trigger_times'] - data['start_time']
    data['mt'] = data['first_move'] - data['goCue_trigger_times']
    data['high_prob'] = np.nan
    data.loc[(data['probabilityLeft']==0.1) & (data['choice_1']==0), 'high_prob'] = 0
    data.loc[(data['probabilityLeft']==0.1) & (data['choice_1']==1), 'high_prob'] = 1
    data.loc[(data['probabilityLeft']==0.7) & (data['choice_1']==1), 'high_prob'] = 0
    data.loc[(data['probabilityLeft']==0.7) & (data['choice_1']==0), 'high_prob'] = 1

    # Z-score Rts and fit hmm
    data['rtz'] = np.nan
    data['itz'] = np.nan
    data['mtz'] = np.nan
    
    if METRIC=='rank':
        for mouse in data.mouse.unique():
            print(mouse)
            for date in  data.loc[data['mouse']==mouse,'date'].unique():
                data.loc[(data['mouse']==mouse)&(data['date']==date),'rtz'] = (data.loc[(data['mouse']==mouse)&(data['date']==date),'rt'].rank().to_numpy()-1) / data.loc[(data['mouse']==mouse)&(data['date']==date),'rt'].rank().max()
                data.loc[(data['mouse']==mouse)&(data['date']==date),'itz'] = (data.loc[(data['mouse']==mouse)&(data['date']==date),'it'].rank().to_numpy()-1) / data.loc[(data['mouse']==mouse)&(data['date']==date),'it'].rank().max()
                print(date)

    if METRIC=='zscore':
        for mouse in data.mouse.unique():
            print(mouse)
            for date in  data.loc[data['mouse']==mouse,'date'].unique():
                data.loc[(data['mouse']==mouse)&(data['date']==date),'rtz'] = zscore(data.loc[(data['mouse']==mouse)&(data['date']==date),'rt'].to_numpy(), nan_policy='omit')
                data.loc[(data['mouse']==mouse)&(data['date']==date),'itz'] = zscore(data.loc[(data['mouse']==mouse)&(data['date']==date),'it'].to_numpy(), nan_policy='omit')
                print(date)

    if SINGLE_SESSION_FIT==True:
        learned_transition_mat = np.zeros([2,2])
        data2=pd.DataFrame()
        counter=0
        true_ll = 0
        for mouse in data.mouse.unique():
            for date in  data.loc[data['mouse']==mouse,'date'].unique():
                data1=data.loc[(data['mouse']==mouse)&(data['date']==date)]
                if N_DIMENSIONS==1:
                    obs = data1.loc[(~np.isnan(data1['rtz']))&(~np.isnan(data1['itz'])),['rtz']].to_numpy()
                    obs = obs.reshape(len(obs),1)
                    hmm,ll = fit_hmm (obs,obs_dim=1)
                if N_DIMENSIONS==2:
                    obs = data1.loc[(~np.isnan(data1['rtz']))&(~np.isnan(data1['itz'])),['rtz','itz']].to_numpy()
                    obs = obs.reshape(len(obs),2)
                    hmm,ll = fit_hmm (obs,obs_dim=2)    
                #Analysis of performence across states
                obs_states = hmm.most_likely_states(obs)
                data1['state']=np.nan
                data1.loc[(~np.isnan(data1['rtz']))&(~np.isnan(data1['itz'])),'state']=obs_states
                learned_transition_mat1 = hmm.transitions.transition_matrix
                if data1.loc[data1['state']==1,'rt'].median()<data1.loc[data1['state']==0,'rt'].median():
                    data1.loc[data1['state']==1,'state'] = 'Engaged'
                    data1.loc[data1['state']==0,'state'] = 'Disengaged'
                    engaged_state=1
                else:
                    data1.loc[data1['state']==1,'state'] = 'Disengaged'
                    data1.loc[data1['state']==0,'state'] = 'Engaged'
                    engaged_state=0
                    learned_transition_mat1 = np.flip(learned_transition_mat1)
                data2=pd.concat([data2,data1])
                learned_transition_mat = learned_transition_mat1 + learned_transition_mat
                counter+=1
                true_ll += hmm.log_probability(obs)
        plot_state_statistics(data2)
        plt.show()
        plot_transition_matrix(learned_transition_mat/counter,engaged_state)
        plt.show()
        plot_transitions(data2)
        plt.show()
        if SAVE_STATES==True:
            save_states(data2)
        return data2, true_ll
    if SINGLE_SESSION_FIT==False:
        if N_DIMENSIONS==1:
            obs = data.loc[(~np.isnan(data['rtz']))&(~np.isnan(data['itz'])),['rtz']].to_numpy()
            obs = obs.reshape(len(obs),1)
            hmm,ll = fit_hmm (obs,obs_dim=1)
        if N_DIMENSIONS==2:
            obs = data.loc[(~np.isnan(data['rtz']))&(~np.isnan(data['itz'])),['rtz','itz']].to_numpy()
            obs = obs.reshape(len(obs),2)
            hmm,ll = fit_hmm (obs,obs_dim=2)
        #Analysis of performence across states
        obs_states = hmm.most_likely_states(obs)
        data['state']=np.nan
        data.loc[(~np.isnan(data['rtz']))&(~np.isnan(data['itz'])),'state']=obs_states
        if data.loc[data['state']==1,'rt'].median()<data.loc[data['state']==0,'rt'].median():
            data.loc[data['state']==1,'state'] = 'Engaged'
            data.loc[data['state']==0,'state'] = 'Disengaged'
            engaged_state=1
        else:
            data.loc[data['state']==1,'state'] = 'Disengaged'
            data.loc[data['state']==0,'state'] = 'Engaged'
            engaged_state=0
        data.groupby(['state']).mean()['outcome']
        learned_transition_mat = hmm.transitions.transition_matrix
        true_ll = hmm.log_probability(obs)
        plot_state_statistics(data)
        plt.show()
        plot_transition_matrix(learned_transition_mat,engaged_state)
        plt.show()
        plot_transitions(data)
        plt.show()
        if SAVE_STATES==True:
            save_states(data)
        return data, true_ll
Exemplo n.º 25
0
def fwdAddLKParamComp(img, tmpl, tol, mot_par, q_ind):
	batch_size, k, h, w = img.size()

	dq = torch.zeros(batch_size, 8, 1)

	crit = 0

	itn = 1

	grad_func = dlk.GradientBatch()
	inv_func = dlk.InverseBatchFun

	img_gradx, img_grady = grad_func(img)

	while (itn == 1) or (crit > tol):
		H_tot = reduce(np.dot, np.flip(dlk.param_to_H(mot_par).numpy(), axis=0))
		H_tot = np.expand_dims(H_tot, 0)
		pq = dlk.H_to_param(torch.from_numpy(H_tot))

		img_w, mask_w = dlk.warp_hmg(img, pq)
		mask_w.unsqueeze_(1)
		mask_w = mask_w.repeat(1, k, 1, 1)
		tmpl_mask = tmpl.mul(mask_w)

		res = tmpl_mask - img_w
		res = res.view(batch_size, k * h * w, 1)

		#### - dp/dq

		dpdq = torch.zeros(batch_size, 8, 8)
		mot_par_np = np.squeeze(mot_par.numpy())

		def compute_pq(q_param):
			q_mat = ppba.p_to_H(q_param)
			p_mat = ppba.p_to_H(mot_par_np)

			p_mat = np.concatenate((
				p_mat[0:q_ind, :, :],
				q_mat,
				p_mat[q_ind + 1:, :, :]
				), axis=0)

			p_mat_reduce = np.eye(3)

			for i in range(p_mat.shape[0]):
				p_mat_reduce = np.dot(p_mat[i], p_mat_reduce)

			p_par = ppba.H_to_p(p_mat_reduce)
			return p_par

		# using auto-grad library for computing jacobian (8x8)
		grad_pq = jacobian(compute_pq)

		q_par = np.transpose(mot_par[q_ind, :, :].numpy())

		st()
		# evaluate 8x8 jacobian and store it
		dpdq[0, :, :] = \
			torch.from_numpy(grad_pq(q_par).squeeze(axis=0).squeeze(axis=1))

		# print(dpdq[0, :, :])

		#### - dI/dw and dW/dp

		img_gradx_w, _ = dlk.warp_hmg(img_gradx, pq)
		img_grady_w, _ = dlk.warp_hmg(img_grady, pq)

		img_gradx_w = img_gradx_w.view(batch_size, k * h * w, 1)
		img_grady_w = img_grady_w.view(batch_size, k * h * w, 1)

		x = torch.arange(w)
		y = torch.arange(h)
		X, Y = dlk.meshgrid(x, y)
		H_pq = dlk.param_to_H(pq)
		xy = torch.cat((X.view(1, X.numel()), Y.view(1, Y.numel()), torch.ones(1, X.numel())), 0)
		xy = xy.repeat(batch_size, 1, 1)
		xy_warp = H_pq.bmm(xy)

		# extract warped X and Y, normalizing the homog coordinates
		X_warp = xy_warp[:,0,:] / xy_warp[:,2,:]
		Y_warp = xy_warp[:,1,:] / xy_warp[:,2,:]

		X_warp = X_warp.view(X_warp.numel(), 1)
		Y_warp = Y_warp.view(Y_warp.numel(), 1)

		X_warp = X_warp.repeat(batch_size, k, 1)
		Y_warp = Y_warp.repeat(batch_size, k, 1)

		dIdp = torch.cat((
			X_warp.mul(img_gradx_w), 
			Y_warp.mul(img_gradx_w),
			img_gradx_w,
			X_warp.mul(img_grady_w),
			Y_warp.mul(img_grady_w),
			img_grady_w,
			-X_warp.mul(X_warp).mul(img_gradx_w) - X_warp.mul(Y_warp).mul(img_grady_w),
			-X_warp.mul(Y_warp).mul(img_gradx_w) - Y_warp.mul(Y_warp).mul(img_grady_w)),2)

		#### - dIdq

		dIdq = dIdp.bmm(dpdq)

		#### - compute dq

		dIdq_t = dIdq.transpose(1, 2)

		invH = inv_func(dIdq_t.bmm(dIdq))

		dq = invH.bmm(dIdq_t.bmm(res))

		crit = float(dq.norm(p=2,dim=1,keepdim=True).max())

		mot_par[q_ind, :, :] = mot_par[q_ind, :, :] + dq[0, :, :]

		itn = itn + 1

		print('itn: {:d}, crit: {:.2f}'.format(itn, crit))

	print('finished at iteration ', itn)

	return mot_par