def remap_actions(self):
        """Map actions to correct range for neural network.

        Modifies:
            self._actions values are mapped to a different range.
        """

        # shorten names for clarity
        js_speed, js_ang_speed = self._sim_config['speed'], self._sim_config[
            'ang_speed']
        py_speed, py_ang_speed = self._py_config['speed'], self._py_config[
            'ang_speed']

        # create maps to interpolate values
        speed_map = interp([js_speed['min'], js_speed['max']],
                           [py_speed['min'], py_speed['max']])
        ang_speed_map = interp([js_ang_speed['min'], js_ang_speed['max']],
                               [py_ang_speed['min'], py_ang_speed['max']])

        # loop through data to convert actions to correct range
        for idx, (vel, ang_vel) in enumerate(self._actions):
            vel = self.__clip(vel,
                              min_val=js_speed['min'],
                              max_val=js_speed['max'])
            ang_vel = self.__clip(ang_vel,
                                  min_val=js_ang_speed['min'],
                                  max_val=js_ang_speed['max'])
            self._actions[idx] = [
                float(speed_map(vel)),
                float(ang_speed_map(ang_vel))
            ]
    def compute(self, inputs, outputs):
        n_secs = self.options["number_of_sections"]
        # Prepare inputs ---------------------------------------------------------------------------
        x_root = (
            inputs["data:geometry:wing:MAC:at25percent:x"][0]
            + inputs["data:geometry:horizontal_tail:MAC:at25percent:x:from_wingMAC25"][0]
            - 0.25 * inputs["data:geometry:horizontal_tail:MAC:length"][0]
            - inputs["data:geometry:horizontal_tail:MAC:at25percent:x:local"][0]
        )
        x_tip = x_root + inputs["data:geometry:horizontal_tail:span"][0] / 2 * np.tan(
            inputs["data:geometry:horizontal_tail:sweep_0"][0]
        )
        x_root = x_root
        x_tip = x_tip
        y_tip = inputs["data:geometry:horizontal_tail:span"][0] / 2
        z_root = inputs["data:geometry:horizontal_tail:root:z"][0]
        z_tip = inputs["data:geometry:horizontal_tail:tip:z"][0]

        x_interp = [x_root, x_tip]
        y_interp = [0.0, y_tip]
        z_interp = [z_root, z_tip]

        f_x = interp(y_interp, x_interp)
        f_z = interp(y_interp, z_interp)

        #  Nodes coordinates interpolation ---------------------------------------------------------
        y_le = np.linspace(0.0, y_tip, n_secs + 1)
        x_le = f_x(y_le)
        z_le = f_z(y_le)

        #  Symmetry anc sides concatenation --------------------------------------------------------
        xyz_r = np.hstack((x_le[:, np.newaxis], y_le[:, np.newaxis], z_le[:, np.newaxis]))
        xyz_l = np.hstack((x_le[:, np.newaxis], -y_le[:, np.newaxis], z_le[:, np.newaxis]))

        outputs["data:aerostructural:aerodynamic:horizontal_tail:nodes"] = np.vstack((xyz_r, xyz_l))
示例#3
0
    def test_Run(self):

        wt_layout = generate_random_wt_layout()
        x_g, y_g, z_g = get_T2T_gl_coord(wt_layout)
        dt = wt_layout.wt_array(attr='rotor_diameter')

        # Interpolate power curves to have the same number of elements
        nu = 22
        p_c = np.array([[[np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i],
                interp(wt_layout.wt_array(attr='power_curve')[j][:,0],
                wt_layout.wt_array(attr='power_curve')[j][:,1],
                np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i])] \
                for i in range(nu)] for j in range(wt_layout.n_wt)])

        ct_c = np.array([[[np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i],
                interp(wt_layout.wt_array(attr='c_t_curve')[j][:,0],
                wt_layout.wt_array(attr='c_t_curve')[j][:,1],
                np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i])] \
                for i in range(nu)] for j in range(wt_layout.n_wt)])
        for iwt, wt in enumerate(wt_layout.wt_list):
            wt.power_curve = p_c[iwt, :, :]
            wt.c_t_curve = ct_c[iwt, :, :]

        rho = np.min(wt_layout.wt_array(attr='air_density'))
        ws_ci = wt_layout.wt_array(attr='cut_in_wind_speed')
        ws_co = wt_layout.wt_array(attr='cut_out_wind_speed')
        a1, a2, a3, a4, b1, b2 = [0.5, 0.9, -0.124, 0.13, 15.63, 1.0]
        pars = [a1, a2, a3, a4, b1, b2]
        ws = 8.0
        wd = 270.0
        ti = 0.07
        ng = 5
        inputs = dict(ws=ws, wd=wd, ti=ti, ng=ng)

        P_WT, U_WT, Ct = FortranGCL.gcl(x_g, y_g, z_g, dt, p_c, ct_c, ws, wd,
                                        ti, a1, a2, a3, a4, b1, b2, ng, rho,
                                        ws_ci, ws_co)

        fgcl = FusedFGCL()
        # Setting the inputs
        fgcl.wt_layout = wt_layout
        for k, v in rosettaGCL.iteritems():
            setattr(fgcl, v, inputs[k])
        fgcl.pars = pars
        fgcl.run()
        if np.allclose(P_WT, fgcl.wt_power, rtol=1.e-5, atol=1e-7):
            save(wt_layout, 'failures/FGCLarsenTestCase_'+ \
                time.strftime('%d_%m_%Y__%H_%M')+'.p', \
                fmt=4, proto=-1, logger=None)

        np.testing.assert_allclose(P_WT, fgcl.wt_power, rtol=1.e-5, atol=1e-7)
        np.testing.assert_allclose(U_WT,
                                   fgcl.wt_wind_speed,
                                   rtol=1.e-5,
                                   atol=1e-7)
    def compute(self, inputs, outputs):
        n_secs = self.options["number_of_sections"]

        #  Characteristic lengths and points -------------------------------------------------------
        root_y = 0.0
        tip_y = inputs["data:geometry:horizontal_tail:span"][0] / 2
        root_chord = inputs["data:geometry:horizontal_tail:root:chord"][0]
        root_skin_thickness = inputs["data:geometry:horizontal_tail:root:skin_thickness"][0]
        root_web_thickness = inputs["data:geometry:horizontal_tail:root:web_thickness"][0]
        tip_chord = inputs["data:geometry:horizontal_tail:tip:chord"][0]
        tip_skin_thickness = inputs["data:geometry:horizontal_tail:tip:skin_thickness"][0]
        tip_web_thickness = inputs["data:geometry:horizontal_tail:tip:web_thickness"][0]
        thickness_ratio = inputs["data:geometry:horizontal_tail:thickness_ratio"][0]

        # Beam properties are computed with geometric values corresponding to the inner point
        # This choice is conservative from a mass point of view.
        y = inputs["data:aerostructural:structure:horizontal_tail:nodes"][:n_secs, 1]

        n_spar = inputs["settings:aerostructural:horizontal_tail:n_spar"][0]
        a_spar = inputs["data:geometry:horizontal_tail:spar_area"][0]

        #  HTP Box chord and height computation ----------------------------------------------------
        c_box_root = 0.5 * root_chord  # Box is assumed to extend over 50% of the HTP chord
        c_box_tip = 0.5 * tip_chord
        h_box_root = root_chord * thickness_ratio
        h_box_tip = tip_chord * thickness_ratio

        #  Reference points for interpolation ------------------------------------------------------
        y_interp = [root_y, tip_y]
        c_interp = [c_box_root, c_box_tip]
        h_interp = [h_box_root, h_box_tip]
        t_skin_interp = [root_skin_thickness, tip_skin_thickness]
        t_web_interp = [root_web_thickness, tip_web_thickness]

        #  Box dimensions interpolation ------------------------------------------------------------
        f_c_box = interp(y_interp, c_interp)
        f_h_box = interp(y_interp, h_interp)
        f_t_skin = interp(y_interp, t_skin_interp)
        f_t_web = interp(y_interp, t_web_interp)

        c_box = f_c_box(y)
        h_box = f_h_box(y)
        t_skin = f_t_skin(y)
        t_web = f_t_web(y)

        #  Beam properties computation -------------------------------------------------------------
        beam_box = Beam(c_box, h_box, t_skin, t_web, n_spar, a_spar, type="box")
        beam_box.compute_section_properties()
        a_beam = beam_box.a.reshape((n_secs, 1))
        i1 = beam_box.i1.reshape((n_secs, 1))
        i2 = beam_box.i2.reshape((n_secs, 1))
        j = beam_box.j.reshape((n_secs, 1))
        props = np.hstack((a_beam, i1, i2, j))

        #  Symmetry and outputs --------------------------------------------------------------------
        outputs["data:aerostructural:structure:horizontal_tail:beam_properties"] = np.tile(
            props, (2, 1)
        )
示例#5
0
def read_cable_sparams(filename):
    """ Read data from cable CSV files and return instance of Sparam class """
    d = np.genfromtxt(filename, delimiter=',', skip_header=1, skip_footer=0)
    f = d[:, 0]
    s11 = interp(f, to_complex(d[:,1:3], linear=False, radians=False))
    s21 = interp(f, to_complex(d[:,3:5], linear=False, radians=False))
    s22 = interp(f, to_complex(d[:,5:7], linear=False, radians=False))
    S = Sparam(f, s11=s11, s21=s21, s22=s22)
    return S
示例#6
0
文件: basic.py 项目: wmvanvliet/meet
def interpolateEEG(data, markers, win, interpolate_type='mchs'):
    """
    Interpolates segemnets in the data

    Input:
    ------
    -- data - one or two dimensional array
              1st dimension: channels (can be ommited if single channel)
              2nd dimension: datapoints
    -- markers - marker positions arranged in 1d array
    -- win - iterable of len 2 - determining the window in datapoints to
             be interpolated (win[0] is in, win[1] is out of the window)
    -- interpolate_type: ['linear', 'mchs', 'akima'] - linear or
                         Monotone Cubic Hermite Spline
       or Akima interpolation

    Output:
    -------
    interpolated dataset

    Examples:
    --------
    >>> data = _np.arange(20, dtype=float).reshape(2,-1)
    >>> interpolateEEG(data, [5], [-1,2], 'linear')
    array([[  0.,   1.,   2.,   3.,   4.,   5.,   6.,   7.,   8.,   9.],
           [ 10.,  11.,  12.,  13.,  14.,  15.,  16.,  17.,  18.,  19.]])
    >>> interpolateEEG(data, [5], [-1,2], 'mchs')
    array([[  0.,   1.,   2.,   3.,   4.,   5.,   6.,   7.,   8.,   9.],
           [ 10.,  11.,  12.,  13.,  14.,  15.,  16.,  17.,  18.,  19.]])
    >>> interpolateEEG(data, [5], [-1,2], 'akima')
    array([[  0.   ,   1.   ,   2.   ,   3.   ,   3.625,   5.   ,   6.375,
              7.   ,   8.   ,   9.   ],
           [ 10.   ,  11.   ,  12.   ,  13.   ,  13.625,  15.   ,  16.375,
             17.   ,  18.   ,  19.   ]])
    """
    interpolpts = [_np.arange(m+win[0], m+win[1],1) for m in markers] 
    interpolpts = _np.sort(_np.ravel(interpolpts))
    have_indices = _np.ones(data.shape[1],bool)
    have_indices[interpolpts] = False
    x = _np.arange(data.shape[1])[have_indices]
    if interpolate_type == 'linear':
        from scipy.interpolate import interp1d as interp
        f = interp(x, data[:,have_indices], axis=-1)
        data[:,interpolpts] = f(interpolpts)
    elif interpolate_type in ['mchs', 'akima']:
        if interpolate_type == 'akima':
            from _interp import akima as interp
        elif interpolate_type == 'mchs':
            from _interp import mchi as interp
        if data.ndim == 1:
            data[interpolpts] = interp(x, data[have_indices])
        elif data.ndim == 2:
            for ch in xrange(data.shape[0]):
                data[ch, interpolpts] = interp(x,
                        data[ch, have_indices])
    return data
示例#7
0
文件: basic.py 项目: CoastSunny/meet
def interpolateEEG(data, markers, win, interpolate_type='mchs'):
    """
    Interpolates segemnets in the data

    Input:
    ------
    -- data - one or two dimensional array
              1st dimension: channels (can be ommited if single channel)
              2nd dimension: datapoints
    -- markers - marker positions arranged in 1d array
    -- win - iterable of len 2 - determining the window in datapoints to
             be interpolated (win[0] is in, win[1] is out of the window)
    -- interpolate_type: ['linear', 'mchs', 'akima'] - linear or
                         Monotone Cubic Hermite Spline
       or Akima interpolation

    Output:
    -------
    interpolated dataset

    Examples:
    --------
    >>> data = _np.arange(20, dtype=float).reshape(2,-1)
    >>> interpolateEEG(data, [5], [-1,2], 'linear')
    array([[  0.,   1.,   2.,   3.,   4.,   5.,   6.,   7.,   8.,   9.],
           [ 10.,  11.,  12.,  13.,  14.,  15.,  16.,  17.,  18.,  19.]])
    >>> interpolateEEG(data, [5], [-1,2], 'mchs')
    array([[  0.,   1.,   2.,   3.,   4.,   5.,   6.,   7.,   8.,   9.],
           [ 10.,  11.,  12.,  13.,  14.,  15.,  16.,  17.,  18.,  19.]])
    >>> interpolateEEG(data, [5], [-1,2], 'akima')
    array([[  0.   ,   1.   ,   2.   ,   3.   ,   3.625,   5.   ,   6.375,
              7.   ,   8.   ,   9.   ],
           [ 10.   ,  11.   ,  12.   ,  13.   ,  13.625,  15.   ,  16.375,
             17.   ,  18.   ,  19.   ]])
    """
    interpolpts = [_np.arange(m + win[0], m + win[1], 1) for m in markers]
    interpolpts = _np.unique(_np.ravel(interpolpts))
    have_indices = _np.ones(data.shape[-1], bool)
    have_indices[interpolpts] = False
    x = _np.arange(data.shape[-1])[have_indices]
    if interpolate_type == 'linear':
        from scipy.interpolate import interp1d as interp
        f = interp(x, data[:, have_indices], axis=-1)
        data[:, interpolpts] = f(interpolpts)
    elif interpolate_type in ['mchs', 'akima']:
        if interpolate_type == 'akima':
            from _interp import akima as interp
        elif interpolate_type == 'mchs':
            from _interp import mchi as interp
        if data.ndim == 1:
            data[interpolpts] = interp(x, data[have_indices])
        elif data.ndim == 2:
            for ch in xrange(data.shape[0]):
                data[ch, interpolpts] = interp(x, data[ch, have_indices])
    return data
示例#8
0
def myIK(target, dim=9, w=[1, 1, 1, 1, 1, 1, 15, 15, 15]):
    e = Environment()
    e.StopSimulation()
    e.Load("arm/arm.xml")
    e.Load("arm/table.xml")
    robot = e.GetRobots()[0]
    manip = robot.GetActiveManipulator()
    data = np.load("arm_reach_8.npy")

    if dim == 9:
        nn = interp(data[:, :9], data[:, 9:])
    elif dim == 3:
        nn = interp(data[:, 6:9], data[:, 9:])

    joint_start = nn(target[np.newaxis])[0].tolist()
    robot.SetDOFValues(joint_start, manip.GetArmIndices())
    robot.SetDOFValues([GRIPPER_OPEN_RAD], manip.GetGripperIndices())

    limits = []
    for j in robot.GetJoints():
        limits.append((j.GetLimits()[0][0], j.GetLimits()[1][0]))

    def cost(joints):
        robot.SetDOFValues(joints, manip.GetArmIndices())
        gs = gripperState(manip.GetTransform())

        if dim == 9:
            distCost = np.linalg.norm((gs - target) * np.array(w))
        elif dim == 3:
            distCost = np.linalg.norm((gs[-3:] - target[-3:]))

        collCost = 100 * sum([
            sum([
                1 if e.CheckCollision(f, o) else 0 for o in e.GetBodies()[1:]
            ]) for f in manip.GetChildLinks()
        ])
        return distCost + collCost

    final, fmin, d = fmin_l_bfgs_b(cost,
                                   joint_start,
                                   maxfun=500,
                                   iprint=10,
                                   m=50,
                                   approx_grad=True,
                                   pgtol=1e-12,
                                   factr=1)
    robot.SetDOFValues(final, manip.GetArmIndices())
    finalPose = poseFromMatrix(manip.GetTransform())
    return {
        "joints": final,
        "quat": finalPose[:4],
        "xyz": finalPose[4:],
        "cost": fmin
    }
示例#9
0
def HighPass():
    fileList = []
    vsini = 40.0
    for arg in sys.argv[1:]:
        if 'vsini' in arg:
            vsini = float(arg.split("=")[-1])
        else:
            fileList.append(arg)

    for fname in fileList:
        column_list = []
        fig = plt.figure(1)
        plotgrid = gridspec.GridSpec(3, 1)
        mainaxis = plt.subplot(plotgrid[0:2])
        reducedaxis = plt.subplot(plotgrid[2], sharex=mainaxis)
        orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error",
                                        cont="continuum")
        for order in orders:
            # Linearize
            datafcn = interp(order.x, order.y, k=1)
            errorfcn = interp(order.x, order.err, k=1)
            linear = DataStructures.xypoint(order.x.size)
            linear.x = np.linspace(order.x[0], order.x[-1], linear.size())
            linear.y = datafcn(linear.x)
            linear.err = errorfcn(linear.x)
            linear.cont = FittingUtilities.Continuum(linear.x, linear.y)
            smoothed = HelperFunctions.HighPassFilter(linear, vsini * units.km.to(units.cm))
            mean = np.mean(smoothed)
            std = np.std(smoothed)
            badindices = np.where(np.abs((smoothed - mean) / std > 3.0))[0]
            plt.figure(2)
            plt.plot(linear.x, (smoothed - mean) / std)
            plt.figure(3)
            plt.plot(linear.x, linear.y - smoothed)
            plt.figure(1)
            smoothed[badindices] = 0.0
            smoothed += np.median(linear.cont)
            smoothed /= np.median(linear.cont)
            #linear.y[badindices] = smoothed[badindices]
            mainaxis.plot(linear.x, linear.y / linear.cont, 'k-')
            mainaxis.plot(linear.x, smoothed, 'r-', linewidth=1)
            reducedaxis.plot(linear.x, smoothed)
            columns = {"wavelength": linear.x,
                       "flux": smoothed,
                       "error": linear.err,
                       "continuum": FittingUtilities.Continuum(linear.x, linear.y, fitorder=3, lowreject=3,
                                                               highreject=3)}
            column_list.append(columns)
        outfilename = "%s_filtered.fits" % (fname.split(".fits")[0])
        print "Outputting to %s" % outfilename
        plt.show()
        HelperFunctions.OutputFitsFileExtensions(column_list, fname, outfilename, mode='new')
示例#10
0
    def test_Run(self):

        wt_layout = generate_random_wt_layout()
        x_g,y_g,z_g=get_T2T_gl_coord(wt_layout)
        dt = wt_layout.wt_array(attr='rotor_diameter')

        # Interpolate power curves to have the same number of elements
        nu = 22
        p_c = np.array([[[np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i],
                interp(wt_layout.wt_array(attr='power_curve')[j][:,0],
                wt_layout.wt_array(attr='power_curve')[j][:,1],
                np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i])] \
                for i in range(nu)] for j in range(wt_layout.n_wt)])

        ct_c = np.array([[[np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i],
                interp(wt_layout.wt_array(attr='c_t_curve')[j][:,0],
                wt_layout.wt_array(attr='c_t_curve')[j][:,1],
                np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i])] \
                for i in range(nu)] for j in range(wt_layout.n_wt)])
        for iwt, wt in enumerate(wt_layout.wt_list):
            wt.power_curve = p_c[iwt,:,:]
            wt.c_t_curve = ct_c[iwt,:,:]

        rho = np.min(wt_layout.wt_array(attr='air_density'))
        ws_ci = wt_layout.wt_array(attr='cut_in_wind_speed')
        ws_co = wt_layout.wt_array(attr='cut_out_wind_speed')
        ws=8.0
        wd=270.0
        kj=0.05
        inputs=dict(
            ws=ws,
            wd=wd,
            kj=kj
            )

        P_WT,U_WT, Ct = FortranNOJ.noj(
                        x_g,y_g,z_g,dt,p_c,ct_c,ws,wd,kj,rho,ws_ci,ws_co)

        fnoj = FusedFNOJ()
        # Setting the inputs
        fnoj.wt_layout = wt_layout
        for k,v in rosettaNOJ.iteritems():
            setattr(fnoj, v, inputs[k])
        fnoj.run()
        np.testing.assert_almost_equal(P_WT, fnoj.wt_power)
        np.testing.assert_almost_equal(U_WT, fnoj.wt_wind_speed)
示例#11
0
    def __init__(
        self,
        filepath=r"C:\Users\Gus\GoogleDrive\GeneralShare\Testing\Equipment/Photodiodes/PhotodiodeCal.txt"
    ):
        cal_file = open(filepath, 'r')

        result = []
        lams = []
        effs = []
        for line in cal_file.readlines()[1:]:
            strlist = line.split('\t')
            lam = float(strlist[0])
            eff = float(strlist[1])
            result.append([lam, eff])
            lams.append(lam)
            effs.append(eff)

        cal_file.close()

        result = np.array(result)

        self.array = result
        self.lambdas = lams
        self.effic = effs

        self.area = (3.6e-3)**2  # 13 mm squared

        self.func = interp(lams, effs, kind="cubic")

        self.dark = self.dark_function()
示例#12
0
    def compute(self, inputs, outputs):
        n_secs = self.options["number_of_sections"]

        #  Characteristic points and length --------------------------------------------------------
        x_root = (
            inputs["data:geometry:wing:MAC:at25percent:x"][0] + inputs[
                "data:geometry:vertical_tail:MAC:at25percent:x:from_wingMAC25"]
            [0] -
            inputs["data:geometry:vertical_tail:MAC:at25percent:x:local"][0])
        x_tip = x_root + inputs["data:geometry:vertical_tail:span"][
            0] * np.tan(inputs["data:geometry:vertical_tail:sweep_0"][0])
        z_root = 0.5 * inputs["data:geometry:fuselage:maximum_height"][0]
        z_tip = z_root + inputs["data:geometry:vertical_tail:span"][0]
        root_chord = inputs["data:geometry:vertical_tail:root:chord"][0]
        tip_chord = inputs["data:geometry:vertical_tail:tip:chord"][0]

        #  VTP box centers locations ---------------------------------------------------------------
        x_box_root = x_root + 0.5 * root_chord
        x_box_tip = x_tip + 0.5 * tip_chord

        x_interp = [x_box_root, x_box_tip]
        z_interp = [z_root, z_tip]

        f_x = interp(z_interp, x_interp)

        #  Nodes coordinates interpolation ---------------------------------------------------------

        z_box = np.linspace(z_root, z_tip, n_secs + 1).reshape((n_secs + 1, 1))
        x_box = f_x(z_box)
        y_box = np.zeros((n_secs + 1, 1))

        outputs[
            "data:aerostructural:structure:vertical_tail:nodes"] = np.hstack(
                (x_box, y_box, z_box))
示例#13
0
def read_anritsu_s11(filename):
    """ Read data from Anristu VNA and return instance of Sparam class """
    d = np.genfromtxt(filename, delimiter=',', skip_header=8, skip_footer=1)
    f_mhz = d[:, 0] / 1e6
    s11 = interp(f_mhz, to_complex(d[:,1:], linear=False, radians=False))
    S = Sparam(f_mhz, s11)
    return S
def voigt(x,c1,w1,c2,w2):
	""" Voigt function: convolution of Lorentzian and Gaussian.
		Convolution implemented with the FFT convolve function in scipy.
		NOT NORMALISED """
	
	### Create larger array so convolution doesn't screw up at the edges of the arrays
	# this assumes nicely behaved x-array...
	# i.e. x[0] == x.min() and x[-1] == x.max(), monotonically increasing
	dx = (x[-1]-x[0])/len(x)
	xp_min = x[0] - len(x)/3 * dx
	xp_max = x[-1] + len(x)/3 * dx
	xp = linspace(xp_min,xp_max,3*len(x))
	
	L = lorentzian(xp,c1,w1)
	G = gaussian(xp,c2,w2)
	
	#convolve
	V = conv(L,G,mode='same')
	
	#normalise to unity height !!! delete me later !!!
	V /= V.max()
	
	#create interpolation function to convert back to original array size
	fn_out = interp(xp,V)
	
	return fn_out(x)
示例#15
0
def optimal_cutoff(Y,dist_mat,min_size):
    labels = np.array([sch.fcluster(Y,c,criterion='distance') for c in Y[:,2]])
    score = np.array([metrics.silhouette_score(dist_mat,l) for l in labels[:-min_size]])
    c = Y[:-min_size,2]
    f = interp(c,-score,kind='linear')
    opt_c = opt.fmin(f,x0=c[2*min_size])
    return opt_c
 def generic_interp(self, b0, x):
     f = interp(self.b, x, kind='linear')
     if b0 > max(self.b):
         return f(max(self.b))
     elif b0 < min(self.b):
         return f(min(self.b))
     return f(b0)
 def elogaH(self, b0, H):
     f = interp(self.b, H * np.exp(self.b), kind='linear')
     if b0 > max(self.b):
         return f(max(self.b))
     elif b0 < min(self.b):
         return f(min(self.b))
     return f(b0)
示例#18
0
def create_peak_graph(peak_set, group_size=6000, base=2, noisiness=2):
    graph_set = []
    for dim_peaks in peak_set:  # [[z1 z2 z3 z4 z5], [z...]]
        points = np.full(group_size, base, dtype=float)
        indices = dim_peaks[0]  # [z1 2 3 4 5]
        heights = dim_peaks[1]
        widths = dim_peaks[2]
        for i, ind in enumerate(indices):  # z1
            index = int(ind)
            if widths[i] <= 0 or index < 0 or index > group_size or heights[
                    i] <= 0:
                continue
            left_width = uniform(0, widths[i])
            right_width = widths[i] - left_width
            x = [index - left_width, index, index + right_width]
            y = [0, heights[i], 0]
            f = interp(x, y)
            for j in range(int(index - left_width) + 1,
                           int(index + right_width) + 1):  # need interp
                if(0 <= j < len(points)):
                    points[j] += f(j)
        # now we have to add noise
        noise = np.random.normal(0, noisiness, group_size)
        # for i in range(group_size):
        #     if(not i in indices):  # peaks already have noise
        #         points[i] = abs(points[i] + noise[i])
        #     else:
        #         points[i] = abs(points[i])
        # for i in range(group_size):
        #     points[i] = abs(points[i])
        points += noise
        graph_set.append(np.abs(points))
    return graph_set
    def compute(self, inputs, outputs):
        n_secs = self.options["number_of_sections"]

        #  Characteristic points -------------------------------------------------------------------
        x_root = (
            inputs["data:geometry:wing:MAC:at25percent:x"][0] + inputs[
                "data:geometry:vertical_tail:MAC:at25percent:x:from_wingMAC25"]
            [0] - 0.25 * inputs["data:geometry:vertical_tail:MAC:length"][0] -
            inputs["data:geometry:vertical_tail:MAC:at25percent:x:local"][0])
        x_tip = x_root + inputs["data:geometry:vertical_tail:span"][
            0] * np.tan(inputs["data:geometry:vertical_tail:sweep_0"][0])
        z_root = inputs["data:geometry:fuselage:maximum_height"][0] / 2
        z_tip = inputs["data:geometry:vertical_tail:span"][0] + z_root

        x_interp = [x_root, x_tip]
        z_interp = [z_root, z_tip]

        f_x = interp(z_interp, x_interp)

        #  Nodes coordinates interpolation ---------------------------------------------------------
        z_le = np.linspace(z_root, z_tip, n_secs + 1).reshape((n_secs + 1, 1))
        y_le = np.zeros((n_secs + 1, 1))
        x_le = f_x(z_le)

        outputs[
            "data:aerostructural:aerodynamic:vertical_tail:nodes"] = np.hstack(
                (x_le, y_le, z_le))
def voigt(x, c1, w1, c2, w2):
    """ Voigt function: convolution of Lorentzian and Gaussian.
		Convolution implemented with the FFT convolve function in scipy.
		NOT NORMALISED """

    ### Create larger array so convolution doesn't screw up at the edges of the arrays
    # this assumes nicely behaved x-array...
    # i.e. x[0] == x.min() and x[-1] == x.max(), monotonically increasing
    dx = (x[-1] - x[0]) / len(x)
    xp_min = x[0] - len(x) / 3 * dx
    xp_max = x[-1] + len(x) / 3 * dx
    xp = linspace(xp_min, xp_max, 3 * len(x))

    L = lorentzian(xp, c1, w1)
    G = gaussian(xp, c2, w2)

    #convolve
    V = conv(L, G, mode='same')

    #normalise to unity height !!! delete me later !!!
    V /= V.max()

    #create interpolation function to convert back to original array size
    fn_out = interp(xp, V)

    return fn_out(x)
示例#21
0
    def __init__(
        self,
        filepath="//READYSHARE/USB_Storage/GusFiles/test_data/TLS_intensity_bare5.txt"
    ):

        data = np.loadtxt(filepath, unpack=True)
        lambda_nm = data[:, 0]
        curr_A = data[:, 1]
        power_W = data[:, 2]

        self.array = np.array(data)
        self.lambdas = lambda_nm
        self.curr = curr_A
        self.power = power_W

        self.power_func = interp(self.lambdas, self.power, kind="cubic")
        self.curr_func = interp(self.lambdas, self.curr, kind="cubic")
示例#22
0
def get_cross_section_interps(isos) :
    data = load_cross_sections(isos)
    interps = {}
    for iso in isos :
        E, val = data[iso]['E'], data[iso]['value']
        left, right = data[iso]['value'][0], data[iso]['value'][-1]
        interps[iso] = interp(E, val, bounds_error=False, fill_value=(left, right))
    return interps
示例#23
0
    def test_Run(self):

        wt_layout = generate_random_wt_layout()
        x_g, y_g, z_g = get_T2T_gl_coord(wt_layout)
        dt = wt_layout.wt_array(attr='rotor_diameter')

        # Interpolate power curves to have the same number of elements
        nu = 22
        p_c = np.array([[[np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i],
                interp(wt_layout.wt_array(attr='power_curve')[j][:,0],
                wt_layout.wt_array(attr='power_curve')[j][:,1],
                np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i])] \
                for i in range(nu)] for j in range(wt_layout.n_wt)])

        ct_c = np.array([[[np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i],
                interp(wt_layout.wt_array(attr='c_t_curve')[j][:,0],
                wt_layout.wt_array(attr='c_t_curve')[j][:,1],
                np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i])] \
                for i in range(nu)] for j in range(wt_layout.n_wt)])
        for iwt, wt in enumerate(wt_layout.wt_list):
            wt.power_curve = p_c[iwt, :, :]
            wt.c_t_curve = ct_c[iwt, :, :]

        rho = np.min(wt_layout.wt_array(attr='air_density'))
        ws_ci = wt_layout.wt_array(attr='cut_in_wind_speed')
        ws_co = wt_layout.wt_array(attr='cut_out_wind_speed')
        ws = 8.0
        wd = 270.0
        kj = 0.05
        inputs = dict(ws=ws, wd=wd, kj=kj)

        P_WT, U_WT, Ct = FortranNOJ.noj(x_g, y_g, z_g, dt, p_c, ct_c, ws, wd,
                                        kj, rho, ws_ci, ws_co)

        fnoj = FusedFNOJ()
        # Setting the inputs
        fnoj.wt_layout = wt_layout
        for k, v in rosettaNOJ.iteritems():
            setattr(fnoj, v, inputs[k])
        fnoj.run()
        np.testing.assert_almost_equal(P_WT, fnoj.wt_power)
        np.testing.assert_almost_equal(U_WT, fnoj.wt_wind_speed)
示例#24
0
def read_uu_sparams(filename):
    """ Read data from cable CSV files and return instance of Sparam class """
    d = np.genfromtxt(filename, skip_header=0, skip_footer=0)
    print d
    f = d[:, 0]

    s21 = interp(f, to_complex(d[:,1:3], linear=False, radians=False))
    S = Sparam(f, s21=s21)
    return S
示例#25
0
    def compute(self, inputs, outputs):

        n_secs = self.options["number_of_sections"]

        #  Characteristic points and lengths -------------------------------------------------------
        x_root = (inputs["data:geometry:wing:MAC:at25percent:x"][0] + inputs[
            "data:geometry:horizontal_tail:MAC:at25percent:x:from_wingMAC25"]
                  [0] - inputs[
                      "data:geometry:horizontal_tail:MAC:at25percent:x:local"]
                  [0])
        x_tip = x_root + inputs["data:geometry:horizontal_tail:span"][
            0] * 0.5 * np.tan(
                inputs["data:geometry:horizontal_tail:sweep_0"][0])
        y_root = 0.0
        y_tip = inputs["data:geometry:horizontal_tail:span"][0] * 0.5
        z_root = inputs["data:geometry:horizontal_tail:root:z"][0]
        z_tip = inputs["data:geometry:horizontal_tail:tip:z"][0]
        root_chord = inputs["data:geometry:horizontal_tail:root:chord"][0]
        tip_chord = inputs["data:geometry:horizontal_tail:tip:chord"][0]

        #  HTP Box centers locations ---------------------------------------------------------------

        x_box_root = x_root + 0.5 * root_chord  # Box is assumed centered along mid-chord axis
        x_box_tip = x_tip + 0.5 * tip_chord

        x_interp = [x_box_root, x_box_tip]
        y_interp = [y_root, y_tip]
        z_interp = [z_root, z_tip]

        f_x = interp(y_interp, x_interp)
        f_z = interp(y_interp, z_interp)

        #  Nodes coordinates interpolations --------------------------------------------------------
        y_box = np.linspace(y_root, y_tip, n_secs + 1).reshape((n_secs + 1, 1))
        x_box = f_x(y_box)
        z_box = f_z(y_box)

        xyz_r = np.hstack((x_box, y_box, z_box))
        xyz_l = np.hstack((x_box, -y_box, z_box))

        outputs[
            "data:aerostructural:structure:horizontal_tail:nodes"] = np.vstack(
                (xyz_r, xyz_l))
示例#26
0
 def exponential(self, x, A, T, bg):
     '''
     uses the transmission for the exponential term, not the constant background.
     '''
     omega = -cm_to_omega(x)
     return (
         A *
         (np.exp(old_div(
             (old_div(constants.hbar, constants.k)) * omega, T)) - 1)**
         -1) * interp(self.shifts, self.transmission)(x) + bg
示例#27
0
 def exponential2(self, x, A, T, bg):
     '''
     uses the a more conplicated exponential 
     '''
     omega = -cm_to_omega(x)
     return A * (
         ((np.exp(old_div(
             (old_div(constants.hbar, constants.k)) * omega, T)) - 1)**-1) +
         (np.exp((old_div(constants.hbar, constants.k)) * omega / 298.) - 1)
         **-1) * interp(self.shifts, self.transmission)(x) + bg
示例#28
0
def calc_sigma_13(k, R):

    # Function to calculate the variance of the displacement field \sigma^2{\psi} = \frac{1}{6\pi^2}\int{dkR_1\left(k)}

    q, I0 = dosph(0, k, R, -2)

    sigma_func = interp(q, npi2 * I0 / 6.0)
    sigma_0 = sigma_func(min(q))

    return (5.0 / 21.0) * sigma_0
示例#29
0
def push_forward(fvals, grid, dt, shift, sigma, rho, mu_jump, sigma_jump,
                 jumps, i, cont):
    f = interp(grid, fvals, bounds_error=False, fill_value=0.0)
    x0 = np.linspace(-5, 5, 400)
    gauss = GAUSS_CONST * np.exp(-0.5 * x0 * x0)
    height = np.random.normal(mu_jump, sigma_jump)
    SHIFT = shift + sigma * np.sqrt(1 - rho**2) * np.sqrt(
        dt) * x0 + height * jumps[i] - cont
    shifted_f = f(grid[:, None] - SHIFT)
    f_conv = integrate.trapz(shifted_f * gauss, x0, axis=1)
    return f_conv
示例#30
0
def calc_eta(k, P):

    # Function to calculate eta^2_E = \frac{1}{6\pi^2}\int{dk}k^2P_L

    q, I0 = dosph(0, k, P, 0)
    eta_vals = npi2 * I0 / 6.0

    eta_func = interp(q, eta_vals)
    eta_0 = eta_func(min(q))

    return eta_0
示例#31
0
def regrid_field(x_nat, y_nat, data, x_new, y_new):
    #from scipy.interpolate import interp2d
    from scipy.interpolate import RectBivariateSpline as interp
    """
	Function returns data on x_nat/y_nat grid at x,y locations.
	len(x) * len(y) == len(data)
	"""
    #_Some of the data is flipped and flopped, so allow for transposition
    try:
        f = interp(x_nat, y_nat, data)
    except TypeError:
        f = interp(x_nat, y_nat, data.transpose())
    help(f)
    #_Actual Regridding
    regrid = f(x_new, y_new)
    dbg((x_nat.shape, y_nat.shape, data.shape, x_new.shape, y_new.shape,
         regrid.shape),
        l=9)

    return regrid
示例#32
0
def calc_sigma_psi(k, P):

    # Function to calculate the variance of the displacement field \sigma^2{\psi} = \frac{1}{6\pi^2}\int{dkP_L\left(k)}

    q, I0 = dosph(0, k, P, -2)
    sigma_vals = npi2 * I0 / 6.0

    sigma_func = interp(q, sigma_vals)
    sigma_0 = sigma_func(min(q))

    return sigma_0
示例#33
0
def reasonable_MS_guess(Mmin=1, Mmax=15, howmany=200):
    '''
    Generates a tuple vector with reasonable guesses for building a MS in the
    HR diagram.
    They are picked by interpolation from the ones given in the manual.
    D. Vallés

    Parameters:
    Mmin: low-mass end of the interval to generate guesses
    Mmax: high-mass end of the interval to generate guesses
    howmany: num. of guesses to generate

    Returns:
    5 lists containing, respectively, the mass, central pressure, central
    density,radius and luminosity of the guesses
    '''
    # reference values
    M_ref = [1, 3, 15]
    p_ref = [1.482e17, 1.141e17, 2.769e16]
    T_ref = [1.442e7, 2.347e7, 3.275e7]
    R_ref = [6.932e10, 1.276e11, 3.289e11]
    L_ref = [0.9083, 89.35, 1.960e4]  # it seems that one is logarithmically
    #spaced!

    # interpolating function
    p_int = interp(M_ref, p_ref, fill_value='extrapolate')
    T_int = interp(M_ref, T_ref, fill_value='extrapolate')
    R_int = interp(M_ref, R_ref, fill_value='extrapolate')
    logL_int = interp(M_ref, np.log(L_ref), fill_value='extrapolate')

    # interpolated values
    M = np.linspace(Mmin, Mmax, howmany)
    p = p_int(M)
    T = T_int(M)
    R = R_int(M)
    L = np.exp(logL_int(M))

    # models
    models = [[M[i], p[i], T[i], R[i], L[i]] for i in range(M.size)]

    return models
示例#34
0
def myIK(target, dim=9, w=[1,1,1,1,1,1,15,15,15]):
	e = Environment()
	e.StopSimulation()
	e.Load("arm/arm.xml")
	e.Load("arm/table.xml")
	robot = e.GetRobots()[0]
	manip = robot.GetActiveManipulator()
	data = np.load("arm_reach_8.npy")

	if dim == 9:
		nn = interp(data[:,:9], data[:,9:])
	elif dim == 3:
		nn = interp(data[:,6:9], data[:,9:])


	joint_start = nn(target[np.newaxis])[0].tolist()
	robot.SetDOFValues(joint_start, manip.GetArmIndices())
	robot.SetDOFValues([GRIPPER_OPEN_RAD], manip.GetGripperIndices())



	limits = []
	for j in robot.GetJoints():
		limits.append((j.GetLimits()[0][0], j.GetLimits()[1][0]))
	
	def cost(joints):
		robot.SetDOFValues(joints, manip.GetArmIndices())
		gs = gripperState(manip.GetTransform())

		if dim == 9:
			distCost = np.linalg.norm ( (gs - target) * np.array(w) )
		elif dim == 3:
			distCost = np.linalg.norm( (gs[-3:] - target[-3:]) )

		collCost = 100 * sum([sum([1 if e.CheckCollision(f, o) else 0 for o in e.GetBodies()[1:]]) for f in manip.GetChildLinks()])
		return distCost + collCost

	final, fmin, d = fmin_l_bfgs_b(cost, joint_start, maxfun=500, iprint=10, m=50, approx_grad=True, pgtol=1e-12, factr=1)
	robot.SetDOFValues(final, manip.GetArmIndices())
	finalPose = poseFromMatrix(manip.GetTransform())
	return {"joints": final, "quat": finalPose[:4], "xyz": finalPose[4:], "cost":fmin}
示例#35
0
def read_s2p_s11(filename, s11_col=7):
    """ Read data from S2P VNA file and return instance of Sparam class.

    Only reads S11 data, defaults to column 7 (sometimes it is column 1)
    """
    c = s11_col
    with open(filename) as fh:
        data = np.loadtxt(fh.readlines()[23:])		# Load data and ignore header
    f_mhz  = data[:, 0] * 1e3 # To MHz
    s11    = to_complex(data[:, c:c+2], linear=False, radians=False)
    s11_i  = interp(f_mhz, s11)
    return Sparam(f_mhz, s11_i)
示例#36
0
 def __get_amplitude_factor__(self, coordinate_relative_to_port_position):
     positions = self.mode_profile[0]
     fields = self.mode_profile[1]
     from scipy.interpolate import interp1d as interp
     i = interp(x=positions,
                y=fields,
                kind='linear',
                copy=True,
                bounds_error=False,
                fill_value=0.0)
     f = float(i(coordinate_relative_to_port_position[1]))
     return f
示例#37
0
    def remap_states(self):
        """Map states to correct range for neural network.

        Modifies:
            self._states values are mapped to a different range.
        """

        js_world = self._sim_config['world_size']
        py_world = self._py_config['world_size']

        x_map = interp([0, js_world['x']],
                       [-1. * js_world['x'] / 2., js_world['x'] / 2.])

        # reverse first argument since in JS, y values increase as you move down
        y_map = interp([js_world['y'], 0],
                       [-1. * js_world['y'] / 2., js_world['y'] / 2.])

        for idx, (x, y, theta) in enumerate(self._states):
            self._states[idx] = [
                float(x_map(x)),
                float(y_map(y)), theta % (2 * np.pi)
            ]
示例#38
0
 def do(self):
 	self.setup_plots()
 
     tstart = time.time()
     self.temp_start = montana.temperature['platform']
 
     for i in range(self.X.shape[0]): # make sure all points are not out of range of piezos
         self.nav.check_range(self.X[i][0], self.Y[i][0], self.Z[i][0])
 
     ## Loop over X values
     for i in range(self.X.shape[0]):
         self.nav.goto(self.X[i][0], self.Y[i][0], self.Z[i][0]) # goes to beginning of scan
         
         Vstart = {'x': self.X[i][0], 'y': self.Y[i][0], 'z': self.Z[i][0]} 
         Vend = {'x': self.X[i][-1], 'y': self.Y[i][-1], 'z': self.Z[i][-1]}
         
         out, V, t = self.piezos.sweep(Vstart, Vend) # sweep over Y
         
         interp_func = interp(out['y'], V[self.sig_in])
         self.V[i][:] = interp_func(self.Y[i][:]) # changes from actual output data to give desired number of points
         
         self.last_full_out = out['y']
         self.last_full_sweep = V[self.sig_in]
         
         self.last_interp_out = self.Y[i][:]
         self.last_interp_sweep = self.V[i][:]
         
         # Do the same for capacitance
         interp_func = interp(out['y'], V[self.cap_in])
         self.C[i][:] = interp_func(self.Y[i][:])
                     
         self.plot()  
         
     self.nav.goto_seq(*self.start_pos) #Go back whence you came! *arg expands the tuple
     self.save()
     
     tend = time.time()
     print('Scan took %f minutes' %((tend-tstart)/60))
     return
示例#39
0
def _linear_distr_blade(blade, nr_points=None):
    """
    Interpolate the blade.dat data onto linearly distributed radial
    positions
    """
    if nr_points is None:
        nr_points = blade.shape[0]
    # make a linear distribution of radial positions
    radius = np.linspace(blade[0,0], blade[-1,0], num=nr_points)
    blade_new = sp.zeros((nr_points, blade.shape[1]))
    blade_new[:,0] = radius
    # and interpolate all points from the hawtopt result on a linear grid
    for k in range(1,blade.shape[1]):
        blade_new[:,k] = interp(blade[:,0], blade[:,k], radius)

    return blade_new
示例#40
0
 def calScanLength(self, level=5):
     """calculate the scanning length and scanning starting point
     the scanning starting point and scanning length are calculated using cubic interpolation at multiple level
     """
     self.scanLengthLevel = level
     self.scanLength = np.zeros(level)
     self.scanStart = np.zeros(level)
     levels = np.linspace(0.1, 0.9, level)
     x = np.arange(self.arraySize)
     for i, l in enumerate(levels):
         f_interp = interp(x, self.scanRate - l)
         roots = f_interp.roots()
         self.scanLength[i] = abs(roots[1] - roots[0])
         if self.scanDirect == 0:
             self.scanStart[i] = min(roots)
         else:
             self.scanStart[i] = max(roots)
def main4():
    linelist = "../Scripts/LineList.dat"
    lines, strengths = np.loadtxt(linelist, unpack=True)
    strengths = 1.0 - strengths

    fname = "HIP_70384.fits"
    orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error")

    for i, order in enumerate(orders):
        DATA = interp(order.x, order.y)
        order.x, xspacing = np.linspace(order.x[0], order.x[-1], order.x.size, retstep=True)
        order.y = DATA(order.x)
        order.cont = FittingUtilities.Continuum(order.x, order.y)

        left = np.searchsorted(lines, order.x[0])
        right = np.searchsorted(lines, order.x[-1])
        print right - left + 1
        unbroadened = order.copy()
        unbroadened.y = np.zeros(unbroadened.x.size)
        unbroadened.cont = np.ones(unbroadened.x.size)
        deltav = xspacing / np.median(order.x) * 3e5
        print deltav
        factor = 10. / deltav
        for j, line in enumerate(lines[left:right]):
            x = line
            y = strengths[j + left]
            idx = np.searchsorted(unbroadened.x, line)
            unbroadened.y[idx] = -y * factor
        unbroadened.y += 1.0
        #model = Broaden(order, unbroadened, m=401, dimension=20)
        model2 = RotBroad.Broaden2(unbroadened.copy(), 100 * units.km.to(units.cm), linear=True)
        model3 = RotBroad.Broaden2(unbroadened.copy(), 140 * units.km.to(units.cm), linear=True)
        model2 = FittingUtilities.ReduceResolution(model2, 60000)
        model3 = FittingUtilities.ReduceResolution(model3, 60000)

        unbroadened.y = (unbroadened.y - 1.0) / factor + 1.0
        #plt.plot(model.x, model.y)
        plt.plot(order.x, order.y / order.cont)
        plt.plot(unbroadened.x, unbroadened.y)
        plt.plot(model2.x, model2.y)
        plt.plot(model3.x, model3.y)
        plt.show()
示例#42
0
def tandem_JV(V, Eg1, Eg2, series):

    if(Eg2 > Eg1):
        return 0
    
    Jph1 = Jsc(Eg1)
    Jph2 = Jsc(Eg2) - Jph1

    if (Jph1 > Jph2) and (series):
        Jph2 = (Jph1+Jph2)/2
        Jph1 = Jph2

    J0_1 = planck(Eg1)
    J0_2 = planck(Eg2)
    
    # sun
    # Eg1
    # Eg2
    # grnd
    
    Rs = 0
    Rsh = 0
    ERE = 1
    
    JV1 = JV(V, Jph1, J0_1, Rs, Rsh, ERE)
    JV2 = JV(V, Jph2, J0_2, Rs, Rsh, ERE)
    
#    plt.plot(V, JV1, V, JV2)
#    ylim(-10,60)

    if series:
        basis = JV1
        target = JV2

        f = interp(sorted(target), sorted(V, reverse=True), bounds_error = False, fill_value = 0)
        V_I = np.array([f(x) for x in basis])
        V_T = [x + y for x, y in zip(V, V_I)]
#        plt.plot(V_T,basis)

        return max([a*b for a,b in zip(V_T,basis)])
    else:
        return max(V*JV1)+max(V*JV2)
示例#43
0
def screen(seeingfile,xs=None,cutoff=None,mode='amp'):
	'''Generate a phase screen'''
	try:
		seeing = pf.getdata(seeingfile)
	except:
		if mode == 'amp':
			seeing = kolmogorov_scint(2080)
		else: 
			seeing = kolmogorov_spectrum(2080,cutoff=None)

	seeing = np.sqrt(shift(seeing)) + 0j # center and square root to get amplitudes; 0j to make it complex!

	seeingx = np.arange(2080)*0.5
	seeingx -= seeingx.max()
	sxx,syy = np.meshgrid(seeingx,seeingx)

	# #interpolate seeing to the correct basis

	noise = np.random.standard_normal(np.shape(seeing))+0j # generate noise in pupil plane

	noise = shift(fft(shift(noise)))

	rprim = 5.093/2. # Palomar

	seeing *= 2.*rprim*noise # multiply amplitudes by pupil plane noise
	# remember seeing is normalised to the pupil diameter in metres!

	seeing = shift(ifft(shift(seeing)))
	if xs == None:
		newx = np.linspace(-4*rprim,4*rprim,2080)
	else:
		newx = np.linspace(xs.min(),xs.max(),2080)

	interpfun = interp(newx,newx,seeing)

	return interpfun
def Broaden(data, model, oversampling=5, m=201, dimension=15):
    n = data.x.size * oversampling

    #n must be even, and m must be odd!
    if n % 2 != 0:
        n += 1
    if m % 2 == 0:
        m += 1

    #resample data
    Spectrum = interp(data.x, data.y / data.cont)
    Model = interp(model.x, model.y)
    xnew = np.linspace(data.x[0], data.x[-1], n)
    ynew = Spectrum(xnew)
    model_new = Model(xnew)

    #Make 'design matrix'
    design = np.zeros((n - m, m))
    for j in range(m):
        for i in range(m / 2, n - m / 2 - 1):
            design[i - m / 2, j] = model_new[i - j + m / 2]
    design = mat(design)

    #Do Singular Value Decomposition
    try:
        U, W, V_t = svd(design, full_matrices=False)
    except np.linalg.linalg.LinAlgError:
        outfilename = "SVD_Error.log"
        outfile = open(outfilename, "a")
        np.savetxt(outfile, np.transpose((data.x, data.y, data.cont)))
        outfile.write("\n\n\n\n\n")
        np.savetxt(outfile, np.transpose((model.x, model.y, model.cont)))
        outfile.write("\n\n\n\n\n")
        outfile.close()
        sys.exit("SVD did not converge! Outputting data to %s" % outfilename)

    #Invert matrices:
    #   U, V are orthonormal, so inversion is just their transposes
    #   W is a diagonal matrix, so its inverse is 1/W
    W1 = 1.0 / W
    U_t = np.transpose(U)
    V = np.transpose(V_t)

    #Remove the smaller values of W
    W1[dimension:] = 0
    W2 = diagsvd(W1, m, m)

    #Solve for the broadening function
    spec = np.transpose(mat(ynew[m / 2:n - m / 2 - 1]))
    temp = np.dot(U_t, spec)
    temp = np.dot(W2, temp)
    Broadening = np.dot(V, temp)

    #Make Broadening function a 1d array
    spacing = xnew[2] - xnew[1]
    xnew = np.arange(model.x[0], model.x[-1], spacing)
    model_new = Model(xnew)
    Broadening = np.array(Broadening)[..., 0]


    #If we get here, the broadening function looks okay.
    #Convolve the model with the broadening function
    model = DataStructures.xypoint(x=xnew)
    Broadened = interp(xnew, np.convolve(model_new, Broadening, mode="same"))
    model.y = Broadened(model.x)

    return FittingUtilities.RebinData(model, data.x)
def main1():
    #Parse command line arguments
    fileList = []
    vsini = 100.0
    resolution = 60000
    Tmin, Tmax, Tstep = 8000, 8800, 200
    Zmin, Zmax, Zstep = -0.5, 0.5, 0.5
    loggmin, loggmax, loggstep = 4.0, 4.0, 1.0
    model_dir = "%s/School/Research/Models/Sorted/Stellar/Vband/" % (os.environ["HOME"])
    for arg in sys.argv[1:]:
        if "vsini" in arg:
            vsini = float(arg.split("=")[-1])
        elif "Tmin" in arg:
            Tmin = float(arg.split("=")[-1])
        elif "Tmax" in arg:
            Tmax = float(arg.split("=")[-1])
        elif "Tstep" in arg:
            Tstep = float(arg.split("=")[-1])
        elif "Zmin" in arg:
            Zmin = float(arg.split("=")[-1])
        elif "Zmax" in arg:
            Zmax = float(arg.split("=")[-1])
        elif "Zstep" in arg:
            Zstep = float(arg.split("=")[-1])
        elif "loggmin" in arg:
            loggmin = float(arg.split("=")[-1])
        elif "loggmax" in arg:
            loggmax = float(arg.split("=")[-1])
        elif "loggstep" in arg:
            loggstep = float(arg.split("=")[-1])
        elif "modeldir" in arg:
            model_dir = arg.split("=")[-1]
        elif "resolution" in arg:
            resolution = float(arg.split("=")[-1])
        else:
            fileList.append(arg)

    if not model_dir.endswith("/"):
        model_dir = model_dir + "/"

    reduce_resolution = True
    v_res = 3e5 / resolution
    if v_res < 0.1 * vsini:
        reduce_resolution = False
        print "Will not reduce detector resolution: %g\t%g" % (v_res, vsini)

    #Read in all of the necessary model files
    allmodels = os.listdir(model_dir)
    file_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(str)))
    for model in allmodels:
        if "BSTAR_MODELS" in model:
            T = float(model[3:6]) * 100
            logg = float(model[7:11])
            Z = float(model[11:15])
        elif "PHOENIX-ACES" in model:
            T = float(model[3:5]) * 100
            logg = float(model[6:10])
            Z = float(model[10:14])
        elif "PHOENIX2004" in model:
            T = float(model[3:5]) * 100
            logg = float(model[6:9])
            Z = float(model[9:13])
        elif "KURUCZ" in model:
            T = float(model[3:5]) * 100
            logg = float(model[6:10])
            Z = float(model[10:14])
        else:
            continue

        #Only save the filenames if in the correct T, logg, and Z range
        if (T >= Tmin and T <= Tmax and
                    logg >= loggmin and logg <= loggmax and
                    Z >= Zmin and Z <= Zmax):

            if file_dict[T][logg][Z] == "":
                file_dict[T][logg][Z] = model
            elif "KURUCZ" in model:
                #Prefer the KURUCZ models that I make over everything else
                file_dict[T][logg][Z] = model
            elif "KURUCZ" in file_dict[T][logg][Z]:
                continue
            elif "PHOENIX-ACES" in model and "PHOENIX2004" in file_dict[T][logg][Z]:
                #Prefer PHOENIX_ACES over PHOENIX2004 (ACES was made in 2009)
                file_dict[T][logg][Z] = model
            else:
                print "Two models with the same T, logg, and Z!"
                print "(1):", file_dict[T][logg][Z]
                print "(2):", model
                inp = raw_input("Which one do you want to use? ")
                if inp == "2":
                    file_dict[T][logg][Z] = model

    #Now, actually read in the models we saved and store as xypoints
    #model_dict = defaultdict( lambda : defaultdict( lambda: defaultdict( DataStructures.xypoint ) ) )
    model_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(interp)))
    for T in file_dict:
        for logg in file_dict[T]:
            for Z in file_dict[T][logg]:
                print "Reading file %s" % file_dict[T][logg][Z]
                x, y = np.loadtxt(model_dir + file_dict[T][logg][Z], usecols=(0, 1), unpack=True)
                #model_dict[T][logg][Z] = DataStructures.xypoint(x=x*units.angstrom.to(units.nm)/1.00026,
                #                                                y=10**y)
                model = DataStructures.xypoint(x=x * units.angstrom.to(units.nm) / 1.00026, y=10 ** y)
                model.cont = FittingUtilities.Continuum(model.x, model.y, fitorder=15, lowreject=1.5, highreject=10)
                model = RotBroad.Broaden(model, vsini * units.km.to(units.cm), )
                model_dict[T][logg][Z] = interp(model.x, model.y)


    #Done reading in the models. Now, loop over the actual data and try to fit
    for fname in fileList:
        orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error")

        #Loop over the models
        best_chisq = 9e99
        best_T = 0
        best_logg = 0
        best_Z = 0
        best_rv = 0
        for T in model_dict:
            for logg in model_dict[T]:
                for Z in model_dict[T][logg]:
                    #First, find the rv from all the orders
                    rv = []
                    for order in orders:
                        order.cont = FittingUtilities.Continuum(order.x, order.y)
                        model = DataStructures.xypoint(x=order.x, y=model_dict[T][logg][Z](order.x))
                        model.cont = FittingUtilities.Continuum(model.x, model.y, lowreject=1.5, highreject=10)
                        if reduce_resolution:
                            model = FittingUtilities.ReduceResolution(model, 60000)
                        #model = RotBroad.Broaden(model, vsini*units.km.to(units.cm))
                        offset = FittingUtilities.CCImprove(order, model, be_safe=False)
                        rv.append(-offset / order.x.mean() * constants.c.cgs.value)

                    #Apply the median rv to all, and determine X^2
                    rv = np.median(rv)
                    chisq = 0.0
                    norm = 0.0
                    for order in orders:
                        order.cont = FittingUtilities.Continuum(order.x, order.y)
                        model = DataStructures.xypoint(x=order.x,
                                                       y=model_dict[T][logg][Z](
                                                           order.x * (1 + rv / constants.c.cgs.value)))
                        model.cont = FittingUtilities.Continuum(model.x, model.y, lowreject=1.5, highreject=10)
                        if reduce_resolution:
                            model.y /= model.cont
                            model = FittingUtilities.ReduceResolution(model, 60000)
                            model.y *= model.cont
                        model.y *= model.cont
                        chisq += np.sum((order.y - model.y / model.cont * order.cont) ** 2 / order.err ** 2)
                        norm += order.size()
                        #plt.plot(order.x, order.y/order.cont, 'k-')
                        #plt.plot(model.x, model.y/model.cont, 'r-')
                    #plt.show()
                    chisq /= float(norm)
                    print T, logg, Z, rv, chisq
                    if chisq < best_chisq:
                        best_chisq = chisq
                        best_T = T
                        best_logg = logg
                        best_Z = Z
                        best_rv = rv

        print "Best fit values:"
        print "T: %g\nlog(g): %g\n[Fe/H]: %g " % (best_T, best_logg, best_Z)

        #Subtract best model
        model_fcn = model_dict[best_T][best_logg][best_Z]
        for order in orders:
            order.cont = FittingUtilities.Continuum(order.x, order.y)
            model = DataStructures.xypoint(x=order.x,
                                           y=model_fcn(order.x * (1 + best_rv / constants.c.cgs.value)))
            model.cont = FittingUtilities.Continuum(model.x, model.y, lowreject=1.5, highreject=10)
            if reduce_resolution:
                model = FittingUtilities.ReduceResolution(model, 60000)
            plt.figure(1)
            plt.plot(order.x, order.y / order.cont, 'k-')
            plt.plot(model.x, model.y / model.cont, 'r-')
            plt.figure(3)
            plt.plot(order.x, order.y / (model.y * order.cont))
            order.y -= model.y / model.cont * order.cont
            plt.figure(2)
            plt.plot(order.x, order.y / order.cont)
        plt.show()
def main3():
    model_dir = "%s/School/Research/Models/Sorted/Stellar/Vband/" % (os.environ["HOME"])
    modelfile = "%slte86-4.00-0.5-alpha0.KURUCZ_MODELS.dat.sorted" % model_dir
    vsini = 150.0
    beta = 1.0
    x, y = np.loadtxt(modelfile, usecols=(0, 1), unpack=True)
    MODEL = interp(x * units.angstrom.to(units.nm) / 1.00026, 10 ** y)
    xlin = np.linspace(x[0], x[-1], x.size)
    #model = DataStructures.xypoint(x=xlin, y=MODEL(xlin))
    #model = DataStructures.xypoint(x=x*units.angstrom.to(units.nm)/1.00026, y=10**y)
    #model.cont = FittingUtilities.Continuum(model.x, model.y, fitorder=15, lowreject=1.5, highreject=10)
    #model2 = RotBroad.Broaden(model, vsini*units.km.to(units.cm))

    fname = "HIP_70384.fits"
    orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error")

    for i, order in enumerate(orders):
        DATA = interp(order.x, order.y)
        xlin = np.linspace(order.x[0], order.x[-1], order.x.size)
        order = DataStructures.xypoint(x=xlin, y=DATA(xlin))
        order.cont = FittingUtilities.Continuum(order.x, order.y)
        extended = np.append(np.append(order.y[::-1] / order.cont[::-1], order.y / order.cont),
                             order.y[::-1] / order.cont[::-1])
        plt.plot(order.x, order.y)
        plt.plot(order.x, order.cont)
        plt.show()
        plt.plot(extended)
        plt.show()

        unbroadened = DataStructures.xypoint(x=xlin, y=MODEL(xlin))
        unbroadened.cont = FittingUtilities.Continuum(unbroadened.x, unbroadened.y, fitorder=4, lowreject=1.5,
                                                      highreject=10)
        plt.plot(unbroadened.x, unbroadened.y)
        plt.plot(unbroadened.x, unbroadened.cont)
        plt.show()

        #Fit broadening
        #left = np.searchsorted(model.x, 2*order.x[0] - order.x[-1])
        #right = np.searchsorted(model.x, 2*order.x[-1] - order.x[0])
        #unbroadened = model[left:right]
        size = unbroadened.size()

        #model2 = Broaden(order, unbroadened, m=401, dimension=20)
        #model2 = FittingUtilities.RebinData(model2, order.x)
        ycorr = np.correlate(extended - 1.0, unbroadened.y / unbroadened.cont - 1.0, mode='same')[size:-size]
        #ycorr -= ycorr.min()
        plt.plot(ycorr)
        plt.show()

        model2 = order.copy()
        model2.y = np.correlate(extended, ycorr / ycorr.sum(), mode='same')[size:-size]

        model2.cont = FittingUtilities.Continuum(model2.x, model2.y, lowreject=1.5, highreject=10)
        model2.y = (model2.y / model2.cont - 1) * 10.0 + model2.cont

        plt.figure(1)
        plt.plot(order.x, order.y / order.cont, 'k-')
        plt.plot(unbroadened.x, unbroadened.y / unbroadened.cont, 'g-')
        plt.plot(model2.x, model2.y / model2.cont, 'r-')
        plt.figure(3)
        plt.plot(order.x, order.y / (model2.y * order.cont))
        order.y -= model2.y / model2.cont * order.cont
        plt.figure(2)
        plt.plot(order.x, order.y / order.cont)
        plt.show()
示例#47
0
def JV(V, Eg, nm, spectrum, Rs, Rsh, ERE):  # ERE is in fractions, not %
    Jph = Jsc(Eg, nm, spectrum)
    J0 = planck(Eg)
    if Rs == 0:
        return [Jph-J0*(exp(x/kT) - 1)/ERE for x in V]
    else:
        return [brentq(JV_Root, 70, -1E10, args=(x, J0, Jph, Eg, Rs, Rsh, ERE)) for x in V]

# <codecell>

V = np.arange(-1, 3, 0.001)
J = JV(V, 1.1, nm, LED_I, 0, Inf, 1)

# <codecell>

f = interp(J, V, bounds_error = False)
f(-60)

# <codecell>

plt.plot(V, J)
xlabel('Voltage (V)')
ylabel('Current (mA/cm$^2$)')
ylim(-10,60)

# <codecell>

ERE = 1E-4 # this is in fractions, not %
Rs = 0
Rsh = Inf
bandgaps = linspace(0.3, 3, 50)
示例#48
0
if __name__ == "__main__":
    # Initialize fitter
    fitter = TelluricFitter.TelluricFitter()
    fitter.SetTelluricLineListFile(linelist)
    LineList = np.loadtxt(linelist)
    logfile = open("fitlog.txt", "w")

    #Find and read in blaze function (MUST BE IN CURRENT DIRECTORY!)
    files = os.listdir("./")
    blazefile = [fname for fname in files if fname.startswith("BLAZE")][0]
    blaze_orders = FitsUtils.MakeXYpoints(blazefile, errors=2)
    blaze_functions = []
    blaze_errors = []
    for order in blaze_orders:
        blaze_functions.append(interp(order.x, order.y))
        blaze_errors.append(interp(order.x, order.err))

    #filename is the name of the file to re-correct, and should be given first
    fname = sys.argv[1]
    header = pyfits.getheader(fname)
    orders = FitsUtils.MakeXYpoints(fname, errors=2)

    date = header["DATE-OBS"]
    time = header["UT"]
    t_seg = time.split(":")
    time = 3600 * float(t_seg[0]) + 60 * float(t_seg[1]) + float(t_seg[2])

    #Read in weather information (archived data is downloaded from weather.as.utexas.edu
    infile = open(weather_file)
    lines = infile.readlines()
示例#49
0
    def test_Run(self):

        wt_layout = generate_random_wt_layout()
        x_g,y_g,z_g=get_T2T_gl_coord(wt_layout)
        dt = wt_layout.wt_array(attr='rotor_diameter')

        # Interpolate power curves to have the same number of elements
        nu = 22
        p_c = np.array([[[np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i],
                interp(wt_layout.wt_array(attr='power_curve')[j][:,0],
                wt_layout.wt_array(attr='power_curve')[j][:,1],
                np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i])] \
                for i in range(nu)] for j in range(wt_layout.n_wt)])

        ct_c = np.array([[[np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i],
                interp(wt_layout.wt_array(attr='c_t_curve')[j][:,0],
                wt_layout.wt_array(attr='c_t_curve')[j][:,1],
                np.linspace(wt_layout.wt_array(attr='cut_in_wind_speed')[j],
                wt_layout.wt_array(attr='cut_out_wind_speed')[j],nu)[i])] \
                for i in range(nu)] for j in range(wt_layout.n_wt)])
        for iwt, wt in enumerate(wt_layout.wt_list):
            wt.power_curve = p_c[iwt,:,:]
            wt.c_t_curve = ct_c[iwt,:,:]

        rho = np.min(wt_layout.wt_array(attr='air_density'))
        ws_ci = wt_layout.wt_array(attr='cut_in_wind_speed')
        ws_co = wt_layout.wt_array(attr='cut_out_wind_speed')
        a1,a2,a3,a4,b1,b2=[0.5,0.9,-0.124,0.13,15.63,1.0]
        pars=[a1,a2,a3,a4,b1,b2]
        ws=8.0
        wd=270.0
        ti=0.07
        ng=5
        inputs=dict(
            ws=ws,
            wd=wd,
            ti=ti,
            ng=ng
            )

        P_WT,U_WT, Ct = FortranGCL.gcl(
                        x_g,y_g,z_g,dt,p_c,ct_c,ws,wd,ti,
                        a1,a2,a3,a4,b1,b2,ng,rho,ws_ci,ws_co)

        fgcl = FusedFGCL()
        # Setting the inputs
        fgcl.wt_layout = wt_layout
        for k,v in rosettaGCL.iteritems():
            setattr(fgcl, v, inputs[k])
        fgcl.pars=pars
        fgcl.run()
        if  np.allclose(P_WT, fgcl.wt_power, rtol=1.e-5, atol=1e-7):
            save(wt_layout, 'failures/FGCLarsenTestCase_'+ \
                time.strftime('%d_%m_%Y__%H_%M')+'.p', \
                fmt=4, proto=-1, logger=None)

        np.testing.assert_allclose(P_WT, fgcl.wt_power, rtol=1.e-5, atol=1e-7)
        np.testing.assert_allclose(U_WT, fgcl.wt_wind_speed, rtol=1.e-5, atol=1e-7)
示例#50
0
def MedianAdd(fileList, outfilename="Total.fits"):
    all_data = []
    numorders = []
    medians = []
    for fname in fileList:
        observation = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", cont="continuum",
                                             errors="error")
        all_data.append(observation)
        numorders.append(len(observation))
        medians.append([np.median(order.y) for order in observation])

    if any(n != numorders[0] for n in numorders):
        print "Error! Some of the files had different numbers of orders!"
        for i in range(len(fileList)):
            print fileList[i], numorders[i]
        sys.exit()

    # If we get this far, all is well. Add each order indidually
    numorders = numorders[0]
    if outfilename == "None":
        outfilename = "Total.fits"
    column_list = []
    for i in range(numorders):
        x = all_data[0][i].x
        total = np.zeros((len(all_data), x.size))
        error = np.zeros(x.size)
        norm = 0.0
        for j, observation in enumerate(all_data):
            observation[i].y[observation[i].y < 0.0] = 0.0
            flux = interp(observation[i].x, observation[i].y / medians[j][i])
            error += interp(observation[i].x, observation[i].err ** 2, k=1)(x)
            total[j] = flux(x)
            norm += medians[j][i]

            #pylab.figure(2)
            #for j in range(total.shape[0]):
            #pylab.(x, total[j])
        flux = np.median(total, axis=0) * norm
        cont = FittingUtilities.Continuum(x, flux, fitorder=3, lowreject=1.5, highreject=5)
        #Set up data structures for OutputFitsFile
        columns = {"wavelength": x,
                   "flux": flux,
                   "continuum": cont,
                   "error": np.sqrt(error)}
        column_list.append(columns)

        #pylab.figure(1)
        #pylab.plot(x, flux/cont)
        #pylab.plot(total.x, total.cont)

    print "Outputting to %s" % outfilename
    #pylab.show()
    FitsUtils.OutputFitsFileExtensions(column_list, fileList[0], outfilename, mode="new")

    #Add the files used to the primary header of the new file
    hdulist = pyfits.open(outfilename, mode='update')
    header = hdulist[0].header
    for i in range(len(fileList)):
        header.set("FILE%i" % (i + 1), fileList[i], "File %i used in Co-Adding" % (i + 1))
    hdulist[0].header = header
    hdulist.flush()
    hdulist.close()
示例#51
0
    def do(self):
        self.setup_plots()

        ## Start time and temperature
        self.filename = time.strftime('%Y%m%d_%H%M%S') + '_scan'
        self.timestamp = time.strftime("%Y-%m-%d @ %I:%M%:%S%p")
        tstart = time.time()
        self.temp_start = self.montana.temperature['platform']

        ## make sure all points are not out of range of piezos before starting anything
        for i in range(self.X.shape[0]):
            self.piezos.check_lim({'x':self.X[i,:],
                                    'y':self.Y[i,:],
                                    'z':self.Z[i,:]
                                    }
                                )

        ## Loop over Y values
        for i in range(self.X.shape[1]):

            ## Explicitly go to first point of scan
            self.piezos.V = {'x': self.X[0,i],
                            'y': self.Y[0,i],
                            'z': self.Z[0,i]
                            }
            self.array.reset()
            time.sleep(3)

            ## Do the sweep
            Vstart = {'x': self.X[0,i], 'y': self.Y[0,i], 'z': self.Z[0,i]}
            Vend = {'x': self.X[-1,i], 'y': self.Y[-1,i], 'z': self.Z[-1,i]}
            out, V, t = self.piezos.sweep(Vstart, Vend) # sweep over X

            ## Save linecuts
            linecuts[str(i)] = {"Vstart": Vstart,
                                "Vend": Vend,
                                "Vsquid": {"Vdc": V[self.sig_in], 
                                           "Vac_x": V[self.sig_in_ac_x],
                                           "Vac_y": V[self.sig_in_ac_y]}}

            ## Interpolate to the number of lines
            interp_func = interp(out['x'], V[self.sig_in])
            self.V[:,i] = interp_func(self.X[:,i]) # changes from actual output data to give desired number of points

            interp_func = interp(out['x'], V[self.sig_in_ac_x])
            self.Vac_x[:,i] = interp_func(self.X[:,i])

            interp_func = interp(out['x'], V[self.sig_in_ac_y])
            self.Vac_y[:,i] = interp_func(self.X[:,i])

            interp_func = interp(out['x'], V[self.cap_in])
            self.C[:,i] = interp_func(self.X[:,i])

            self.last_full_out = out['x']
            self.last_full_sweep = V[self.sig_in]
            self.save_line(i, Vstart)

            self.last_interp_out = self.X[:,i]
            self.last_interp_sweep = self.V[:,i]

            self.plot()

        self.piezos.V = 0
        self.save()

        tend = time.time()
        print('Scan took %f minutes' %((tend-tstart)/60))
        return
示例#52
0
def interpmap( xsamp, ysamp, xin, yin, zref ):
    i = interp( numpy.asarray( [xin, yin] ).T, zref, fill_value=0.0 )
    return i( numpy.asarray( [xsamp, ysamp] ).T )
示例#53
0
    if any(n != numorders[0] for n in numorders):
        print "Error! Some of the files had different numbers of orders!"
        for i in range(len(fileList)):
            print fileList[i], numorders[i]
        sys.exit()

    # If we get this far, all is well. Add each order indidually
    numorders = numorders[0]
    outfilename = "%s.fits" % (name.replace(" ", "_"))
    print "outputting to %s" % outfilename
    column_list = []
    for i in range(numorders):
        total = all_data[0][i].copy()
        total.err = total.err ** 2
        for observation in all_data[1:]:
            flux = interp(observation[i].x, observation[i].y)
            error = interp(observation[i].x, observation[i].err ** 2, k=1)
            total.y += flux(total.x)
            total.err += error(total.x)
        total.err = np.sqrt(total.err)
        total.cont = FindContinuum.Continuum(total.x, total.y, fitorder=3, lowreject=2, highreject=5)

        #Set up data structures for OutputFitsFile
        columns = {"wavelength": total.x,
                   "flux": total.y,
                   "continuum": total.cont,
                   "error": total.err}
        column_list.append(columns)
        pylab.plot(total.x, total.y)
        pylab.plot(total.x, total.cont)
示例#54
0
##########################################################################################################################################################
# fitting scheme
J_array = np.linspace(14,22,200)
J_new 	= np.empty([0])
min_LikeJ  = np.empty([0])
min_ra_arr = np.empty([0])
min_r0_arr = np.empty([0])

for J in J_array:                                                # scan over an array of J values
    r0_new  = np.empty([0])
    ra_new  = np.empty([0])
    LikeJr0 = np.empty([0])
    for j,r0 in enumerate(r0_array):                             # for each J scan over an array of r0 values
        LikeJra = np.zeros_like(ra_array)
        for i in range(ra_array.size): LikeJra[i] = logLike(10**J,i,j)
        interp_Like_ra = interp(ra_array,LikeJra)                  # build the profile likelihood along ra
        
        eval_Like_ra = np.logspace(log10(ra_array.min()),log10(ra_array.max()),1e3)
        min_Like_ra  = interp_Like_ra(eval_Like_ra).min()
        min_ra 	     = eval_Like_ra[np.where(interp_Like_ra(eval_Like_ra)==min_Like_ra)[0][0]]
        
        if ra_array[1]<min_ra<ra_array[-2]:
            LikeJr0 = np.append(LikeJr0,min_Like_ra)
            ra_new  = np.append(ra_new,min_ra)
            r0_new  = np.append(r0_new,r0)
    
    if LikeJr0.size>3:
        interp_ra = interp(r0_new,ra_new)
        interp_r0 = interp(r0_new,LikeJr0)                  # build the profile likelihood along r0
        
        eval_Like_r0 = np.logspace(log10(r0_new.min()),log10(r0_new.max()),1e3)
示例#55
0
    for fname in fileList:
        if extensions:
            orders = FitsUtils.MakeXYpoints(fname, extensions=extensions, x="wavelength", y="flux", errors="error")
            if tellurics:
                model_orders = FitsUtils.MakeXYpoints(fname, extensions=extensions, x="wavelength", y="model")
                for i, order in enumerate(orders):
                    orders[i].cont = FindContinuum.Continuum(order.x, order.y, lowreject=2, highreject=2)
                    orders[i].y /= model_orders[i].y

        else:
            orders = FitsUtils.MakeXYpoints(fname, errors=2)
        numorders = len(orders)
        for i, order in enumerate(orders[::-1]):
            #Only use the middle half of each order (lots of noise on the edges)
            DATA = interp(order.x, order.y)
            CONT = interp(order.x, order.cont)
            ERROR = interp(order.x, order.err)
            left = int(order.size() / 4.0)
            right = int(order.size() * 3.0 / 4.0 + 0.5)
            order.x = np.linspace(order.x[left], order.x[right], right - left + 1)
            order.y = DATA(order.x)
            order.cont = CONT(order.x)
            order.err = ERROR(order.x)


            #Remove bad regions from the data
            for region in badregions:
                left = np.searchsorted(order.x, region[0])
                right = np.searchsorted(order.x, region[1])
                if left == 0 or right == order.size():
示例#56
0
def radial( container, h=None ):
    
    
    # Create mesh and define function space
    # Number of discretization points
    
    # At this distance, the correlation is
    # approximately 10^-3
    if "square" in container.mesh_name:
        ran = 20.
    elif "parallelogram" in container.mesh_name:
        ran = 5.
    elif  "antarctica" in container.mesh_name:
        ran = 8e3
    elif "cube" in container.mesh_name:
        ran = 2.

    # Given a range, we should be able to determine
    # N from the mesh parameter
    if h == None:
        h = container.mesh_obj.hmin()
    N = int( ran / h )
        
    mesh_obj = IntervalMesh( N+1, 0, ran )
    V = FunctionSpace( mesh_obj, "CG", 1 ) 
    u = TrialFunction(V)
    v = TestFunction(V)
    f = Constant( 0.0 ) 

    # The factors dont REALLY matter, since they cancel out
    
    d = container.dim
    areaOfUnitSphere = 2 * math.pi**(d/2.0) / math.gamma(d/2.0)
    if d == 2:
        X = Expression( str(areaOfUnitSphere) + "* x[0]     ", degree=4) 
    elif d == 3:
        X = Expression( str(areaOfUnitSphere) + "* x[0]*x[0]", degree=4)
    else:
        raise ValueError( "Dimension has to be 2 or 3." )
    
    kappa = container.kappa
    a = X * (kappa*kappa*u*v +  inner(grad(u), grad(v))) * dx 
    m = X *              u*v                             * dx 
    L = f *                v                             * dx

    A, b = assemble_system ( a, L )
    M = assemble( m )

    # Get G1 ###########################

    # Impose rhs delta function at origin
    delta = PointSource ( V, Point ( 0.0 ), 1.0  ) 
    delta.apply ( b )

    # Compute solution
    G1_func = Function(V)
    solve ( A, G1_func.vector(), b )

    # Get G2 via solving for G1 #######
    G2_func = Function(V)
    MG1_func = M*G1_func.vector()
    solve ( A, G2_func.vector(), MG1_func )
        
    coo = np.ravel( mesh_obj.coordinates() ) 
    G1 = []
    G2 = []
    for x in coo:
        G1.append( G1_func(x) ) 
        G2.append( G2_func(x) )

    G1 = np.array( G1 )
    G2 = np.array( G2 )
    
    dG1 = np.zeros( len(G1) )
    dG2 = np.zeros( len(G2) )
    
    # Derivative via finite difference, basically
    dG1[0:-1] = ( G1[1:] - G1[0:-1] ) / h  
    dG2[0:-1] = ( G2[1:] - G2[0:-1] ) / h
    
    # import pdb
    # pdb.set_trace()
    G1  = interp( coo, G1,  kind = 'linear' )
    G2  = interp( coo, G2,  kind = 'linear' )
    dG1 = interp( coo, dG1, kind = 'zero'   )
    dG2 = interp( coo, dG2, kind = 'zero'   )
        
    return G1, dG1, G2, dG2
                                   y=10 ** y)
    # model.cont = FittingUtilities.Continuum(model.x, model.y, fitorder=21, lowreject=1.5, highreject=20)

    output_list = []
    for fname in sys.argv[1:]:
        logfilename = "fitlog_%s.txt" % fname
        logfile = open(logfilename, "w")
        logfile.write("Fitting Information file %s\n" % fname)
        logfile.close()
        orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error")
        for ordernum, order in enumerate(orders):
            print "Order %i" % ordernum
            if ordernum == 66:
                continue
            #Linearize
            datafcn = interp(order.x, order.y)
            x = np.linspace(order.x[0], order.x[-1], order.size())
            order = DataStructures.xypoint(x=x, y=datafcn(x))
            order.cont = FittingUtilities.Continuum(order.x, order.y)
            logfile = open(logfilename, "a")
            logfile.write("\n*****************************\n")
            logfile.write("Order %i:\n" % ordernum)
            logfile.write("*****************************\n")
            logfile.close()
            model2 = FitData(model, order, 140, 0.6, 60000, nlines=100, logfilename=logfilename)

            plt.figure(1)
            plt.plot(order.x, order.y / order.cont, 'k-')
            plt.plot(model2.x, model2.y, 'r-')
            plt.figure(2)
            plt.plot(order.x, order.y / (order.cont * model2.y))
示例#58
0
def Filter(fname, vsini=100, numiters=100, lowreject=3, highreject=3):
    orders = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error")
    column_list = []
    for i, order in enumerate(orders):
        smoothed = HelperFunctions.IterativeLowPass(
            order, vsini * units.km.to(units.cm), numiter=numiters, lowreject=lowreject, highreject=highreject
        )
        plt.plot(order.x, order.y)
        plt.plot(order.x, smoothed)
        plt.show()
        continue

        done = False
        x = order.x.copy()
        y = order.y.copy()
        indices = np.array([True] * x.size)
        iteration = 0
        while not done and iteration < numiters:
            done = True
            iteration += 1
            smoothed = FittingUtilities.savitzky_golay(y, window_size, smoothorder)
            residuals = y / smoothed
            if iteration == 1:
                # Save the first and last points, for interpolation
                first, last = smoothed[0], smoothed[-1]
            mean = residuals.mean()
            std = residuals.std()
            print residuals.size, x.size, y.size
            # plt.plot((residuals - mean)/std)
            # plt.show()
            badindices = np.where(
                np.logical_or((residuals - mean) / std > highreject, (residuals - mean) / std < -lowreject)
            )[0]
            if badindices.size > 1 and y.size - badindices.size > 2 * window_size and iteration < numiters:
                done = False
                x = np.delete(x, badindices)
                y = np.delete(y, badindices)

        print "iter = %i" % iteration
        if x[0] > order.x[0]:
            x = np.append(np.array([order.x[0]]), x)
            smoothed = np.append(np.array([first]), smoothed)
        if x[-1] < order.x[-1]:
            x = np.append(x, np.array([order.x[-1]]))
            smoothed = np.append(smoothed, np.array([last]))
        print x.size, y.size, smoothed.size
        smooth_fcn = interp(x, smoothed, k=1)
        smoothed = smooth_fcn(order.x)

        order.cont = FittingUtilities.Continuum(order.x, order.y)
        plt.figure(1)
        plt.plot(order.x, order.y / order.cont, "k-")
        plt.plot(order.x, smoothed / order.cont, "r-")
        # plt.figure(2)
        # plt.plot(order.x, order.y/smoothed)
        # plt.plot(order.x, smoothed)
        # orders[i].y /= smoothed
        column = {
            "wavelength": order.x,
            "flux": order.y / smoothed,
            "continuum": np.ones(order.x.size),
            "error": order.err,
        }
        column_list.append(column)
    plt.show()
    outfilename = "%s_smoothed.fits" % (fname.split(".fits")[0])
    print "Outputting to %s" % outfilename
            vsini = float(arg.split("=")[-1]) * units.km.to(units.cm)
        elif "-rv" in arg:
            vel = float(arg.split("=")[-1])
        elif "-temp" in arg:
            T = float(arg.split("=")[-1])
        else:
            fileList.append(arg)

    for fname in fileList:
        orders_original = FitsUtils.MakeXYpoints(fname, extensions=True, x="wavelength", y="flux", errors="error")

        #Loop over orders, removing bad parts
        numorders = len(orders_original)
        for i, order in enumerate(orders_original[::-1]):
            #Linearize
            DATA = interp(order.x, order.y)
            CONT = interp(order.x, order.cont)
            ERROR = interp(order.x, order.err)
            left, right = 20, -20
            order.x = np.linspace(order.x[left], order.x[right], order.size())
            order.y = DATA(order.x)
            order.cont = CONT(order.x)
            order.err = ERROR(order.x)

            #Interpolate over bad pixels
            if i in badpixels_by_order:
                left, right = badpixels_by_order[i][0], badpixels_by_order[i][1]
                m = (order.y[right] - order.y[left]) / (order.x[right] - order.x[left])
                order.y[left:right] = m * (order.x[left:right] - order.x[left]) + order.y[left]
                order.err[left:right] = 9e9
示例#60
0
##########################################################################################################################################################
# fitting scheme
J_array = np.linspace(15,25,100)
LikeJ = np.zeros_like(r0_array)
J_new = np.empty([0])
min_LikeJ = np.empty([0])
num = 8
div = np.size(r0_array)/num
acc = 3.
for J in J_array:
    min_r0 = np.zeros(shape=(div,2))
    for j,r0 in enumerate(r0_array):
        log10rho0 = sciopt.minimize_scalar(deltaJ,args=(J,j)).x
        LikeJ[j] = logLike(10**log10rho0*r0**3,j)
    spline_LikeJ = interp(r0_array,LikeJ)
    
    a, b = 0, div-1
    for i in range(num):
        loc_min_r0 = sciopt.minimize_scalar(spline_LikeJ,method='Bounded',bounds=(r0_array[a],r0_array[b]))
        if (r0_array[a]+r0_array[a+1])/acc<loc_min_r0.x<(r0_array[b-1]+r0_array[b])/acc:
            min_r0[i,:] = (loc_min_r0.x,loc_min_r0.fun)
        a = b
        b += div
    
    min_r0 = np.delete(min_r0,np.where(min_r0==0.),axis=0)
    new_min_r0 = np.array([min_r0[i] for i in sorted(range(len(min_r0)),key = lambda k : min_r0[k,1])])
    if new_min_r0.size!=0:
        min_LikeJ = np.append(min_LikeJ,new_min_r0[0,1])
        J_new = np.append(J_new,J)