示例#1
0
 def animate_notebook(self,
                      follow=None,
                      t_start=None,
                      t_stop=None,
                      t_delta=None):
     import time
     t = time.time()
     rate = 0.02
     for value in self.simulate(t_start,
                                t_stop,
                                t_delta,
                                animate=True,
                                follow=follow):
         wait_time = (t + rate) - time.time()
         if wait_time > 0:
             time.sleep(wait_time)
示例#2
0
 def check_timesup():
     if timeout is not None:
         if time.time() - process_start_time > timeout * float(np.sum(mask)):
             logging.warning('process time reached timeout * number of binned pixels = {}*{} s'.format(timeout, np.nansum(mask)))
             logging.info(orb.utils.parallel.get_stats_str(job_server))
             return True
     return False
示例#3
0
def julian_date(unix_t=None):
    '''Return (current) time as a Julian date.
    Parameters
    ----------
    unix_t : seconds since the Epoch, default=now

    Returns
    -------
    jd : float, julian date'''
    if unix_t is None:
        unix_t = time.time()
    t = astropy.time.Time(unix_t, format='unix')
    return t.jd
示例#4
0
def unix_time(jd=None):
    '''Return (current) time, in seconds since the Epoch (00:00:00 
    Coordinated Universal Time (UTC), Thursday, 1 January 1970).
    Parameters
    ----------
    jd : float, julian date, default=now

    Returns
    -------
    t : float, seconds since the Epoch'''
    if jd is None:
        return time.time()
    else:
        t = astropy.time.Time(jd, format='jd')
        return t.unix
示例#5
0
def update(frame):
    """
    Handler function to update the plot - only changed items. Currently we redraw
    only Field-of-View, ISS (most speedy object) and time caption.
    """

    if benchmark:
        end = time.time()
        global start
        print(end - start)
        start = end

    global obs_time
    obs_time += animation_interval * time_scale * u.ms

    star_chart.update(obs_time)
    text_line[0].set_text("{0}, ( {1:.2f}, {2:.2f} )\n\
                            {3}\n".format(
        location_str, obs_loc.lat, obs_loc.lon,
        str(obs_time + (utc_offset * u.hour))[:19]))

    # objects to redraw
    return anim_tuple
示例#6
0
import numpy as np
from astropy.time import Time as time
from astropy.io import ascii as io
from astropy.table import QTable as qt
import astropy.coordinates as co
import astropy.units as u
from datetime import datetime

loc = co.EarthLocation(lat='41.505493d', lon='-81.681290d')  #
# sets our location

now = time.now()

if now.value.hour > 17:
    tonight = time(datetime(int(now.value.year), int(now.value.month),
                            int(now.value.day) + 1, 3, 0, 0),
                   location=loc)
else:
    tonight = time(datetime(int(now.value.year), int(now.value.month),
                            int(now.value.day), 3, 0, 0),
                   location=loc)
# gives current time, then the time at about midnight
# if before noon, it gives the "previous" night, so that late-night observing isn't interfered with
# astropy's time routines give UTC time, so this is 11 PM our time

sidnow = now.sidereal_time(kind='apparent',
                           longitude='-81.681290d',
                           model='IAU2000A')
sidereal = tonight.sidereal_time(kind='apparent',
                                 longitude='-81.681290d',
                                 model='IAU2000A')
hmi_directory = 'D:\\data\\smft-hmi\\hmi\\'

s_flist = os.listdir(smft_directory)
h_flist = os.listdir(hmi_directory)

s_files_list = list(smft_directory + s_flist[i] for i in range(len(s_flist)))
h_files_list = list(hmi_directory + h_flist[i] for i in range(len(h_flist)))

#list of regions to use
#reg_list = [0, 1, 3, 6, 8]
reg_list = [2]

poly_coeffs = np.array(
    [-1.66244474e-11, -1.61810733e-09, 7.03341500e-05, 7.74491899e-04])

start = time.time()

nodes = 5

#threshold_list_s = np.linspace(0.35, 0.5, nodes)
#threshold_list_w = np.linspace(0.55, 0.7, nodes)
threshold_matrix = np.zeros((nodes, nodes))

threshold_list_s = [0.4]
threshold_list_w = [0.70]

scale_list = []
time_list = []

#arrays for images
smft_pic_arr = []
示例#8
0
    def process_by_pixel(self, func, args=list(), modules=list(), out=dict(),
                         kwargs=dict(),
                         mask=None, binning=1,
                         timeout=None):
        """Parallelize a function taking binned spectra of the cube as
        an input. All pixels are gone through unless a mask is passed
        which indicates the pixels that must be processed. The typical
        results returned are maps.

        :param func: The parallelized function. Must be func(spectrum,
          *args, kwargs_dict) which returns a dict of floating values
          (e.g. {a:1.9, b:5.6, ...}) or a 1d array of floats. If it
          returns a dict out must be set to dict(), its default
          value. If a 1d array of size N is returned, the out param must
          be set to a 3d array of shape (cube.dimx, cube.dimy, N). If
          supplied, kwargs are passed to the function as the last
          argument in a dict object.  Note also that velocity will not
          be corrected on the fly at data extraction so that the called
          function must handle it.

        :param args: List of arguments passed to the
          function. arguments can be a function in which case the
          function must be f(x,y), x and y being the pixel
          coordinates. Note that x and y can also be lists of
          coordinates in which case the function must return a list of
          data.

        :param modules: Modules to import to run the function.

        :param out: depends on the returned values of func. See param
          func.

        :param kwargs: kwargs of the function func. If supplied,
          kwargs are passed to the function as the last argument in a
          dict object.

        :param mask: a 2d array of bool. Ones giving the pixels on
          which the function must be applied.

        :param binning: On-the-fly data binning.

        .. note:: Any argument with a shape equal to the x,y shape of
          the cube (or the binned x,y shape) will be mapped, i.e., the
          argument passed to the vector function will be the value
          corresponding to the position of the extracted
          spectrum. (works also for 3d shaped arguments, the 3rd
          dimension can have any size)

        """

        def process_in_row(*args):
            """Basic row processing for a vector function"""
            import marshal, types
            import numpy as np
            import logging
            import orb.utils.log

            # remove last argument which gives the process_in_row options as a dict
            # WARNING :must be the first line of the function
            process_in_row_args = args[-1]
            args = tuple(args[:-1])

            if process_in_row_args['debug']:
                orb.utils.log.setup_socket_logging()
                
            mapped = process_in_row_args['mapped']
            
            ## function is unpicked
            _code = marshal.loads(args[0])
            _func = types.FunctionType(_code, globals(), '_func')
            irow_data = np.squeeze(args[1])
            irow_data = np.atleast_2d(irow_data)
            out_row = list()

            for i in range(irow_data.shape[0]):
                iargs_list = list()
                # remap arguments
                for iarg, j in zip(args[2:], range(len(args[2:]))):
                    if mapped[j]:
                        iarg = np.squeeze(iarg)
                        shape = iarg.shape
                        if len(shape) > 0:
                            if shape[0] == irow_data.shape[0]:
                                iarg = iarg[i, ...]
                        
                    iargs_list.append(iarg)

                # last arg gives the kwargs which are eventually passed as a dict
                ikwargs_keys = iargs_list.pop(-1)
                ikwargs = dict()
                for ikey in range(len(ikwargs_keys)):
                    ikwargs[ikwargs_keys[-(ikey + 1)]] = iargs_list.pop(-1)
                #for ikey in ikwargs:
                #    logging.debug('{} {}'.format(ikey, ikwargs[ikey]))
                iargs_list.append(ikwargs)
                try:
                    out_row.append(_func(irow_data[i,:], *iargs_list))
                    #out_row.append(str([type(iarg) for iarg in iargs_list]))
                    #out_row.append(str(len(mapped)))
                except Exception as e:
                    out_row.append(traceback.format_exc())
                    logging.warning('Exception occured in process_in_row at function call level: {}'.format(e))

            return out_row

        ## function must be serialized (or picked)
        job_server, ncpus = orb.utils.parallel.init_pp_server()
        
        func = marshal.dumps(func.__code__)

        binning = int(binning)

        binned_shape = orb.utils.image.nanbin_image(
            np.ones((self.dimx, self.dimy)),
            int(binning)).shape

        def isbinned(_data):
            if (_data.shape[0] == self.dimx
                and _data.shape[1] == self.dimy):
                return False
            elif (_data.shape[0] == binned_shape[0]
                  and _data.shape[1] == binned_shape[1]):
                return True
            else: raise Exception('Strange data shape {}. Must be correctly binned ({}, {}) or unbinned ({}, {})'.format(_data.shape, binned_shape[0], binned_shape[1], self.dimx, self.dimy))


        # check outfile
        out_is_dict = True
        if not isinstance(out, dict):
            out_is_dict = False
            orb.utils.validate.is_ndarray(out, object_name='out')
            if out.ndim < 2:
                raise TypeError('out must be at least a 2d numpy.ndarray')
            if binning == 1:
                if (out.shape[0], out.shape[1]) != (int(self.dimx), int(self.dimy)):
                    raise TypeError('out.shape must be {}'.format((self.dimx, self.dimy)))
            else:
                if not isbinned(out):
                    raise TypeError('out.shape must be {}'.format((binned_shape[0], binned_shape[1])))
        # check mask
        if not mask is None:
            orb.utils.validate.is_2darray(mask, object_name='mask')
            if mask.shape != (self.dimx, self.dimy):
                raise TypeError('mask.shape must be {}'.format((self.dimx, self.dimy)))

        else:
            mask = np.ones((self.dimx, self.dimy), dtype=bool)

        if binning > 1:
            if not isbinned(mask):
                mask = orb.utils.image.nanbin_image(mask, int(binning))

        mask[np.nonzero(mask)] = 1
        mask = mask.astype(bool)


        # add kwargs to args
        kwargs_keys = list(kwargs.keys())
        for key in kwargs_keys:
            args.append(kwargs[key])
        args.append(kwargs_keys)
        logging.info('passed mapped kwargs : {}'.format(kwargs_keys))


        # check arguments
        # reshape passed arguments
        mapped = np.zeros(len(args), dtype=bool)
        for i in range(len(args)):
            new_arg = args[i]
            is_map = False
            shape = None
            try:
                shape = new_arg.shape
            except AttributeError: pass
            except KeyError: pass
            else:
                if new_arg.ndim < 2: pass
                else:
                    if not isbinned(new_arg) and new_arg.ndim < 4:
                        new_arg = orb.utils.image.nanbin_image(new_arg, int(binning))
                        is_map = True
                    elif isbinned(new_arg):
                        is_map = True
                    else:
                        raise TypeError('Data shape {} not handled'.format(new_arg.shape))
            if shape is None and callable(new_arg):
                # assume new_arg is a function of x, y
                try:
                    new_arg(self.dimx/2, self.dimy/2)
                except Exception as e:
                    raise Exception('argument is callable but does not show the proper behaviour spectrum = f(x, y): {}'.format(e))
                is_map = True            

            args[i] = new_arg
            mapped[i] = is_map

        # get pixel positions grouped by row
        xy = list()
        for i in range(mask.shape[1]):
            _X = np.nonzero(mask[:,i])[0]
            if len(_X) > 0:
                xy.append((_X, np.ones(len(_X), dtype=np.int64) * i))
        logging.info('{} rows to fit'.format(len(xy)))

        # jobs will be passed by row
        all_jobs_indexes = list(range(len(xy)))
        all_jobs_nb = len(all_jobs_indexes)

        # jobs submit / retrieve loop
        jobs = list()

        # timeout setup
        process_start_time = time.time()

        def check_timesup():
            if timeout is not None:
                if time.time() - process_start_time > timeout * float(np.sum(mask)):
                    logging.warning('process time reached timeout * number of binned pixels = {}*{} s'.format(timeout, np.nansum(mask)))
                    logging.info(orb.utils.parallel.get_stats_str(job_server))
                    return True
            return False

        progress = orb.core.ProgressBar(all_jobs_nb)
        while len(all_jobs_indexes) > 0 or len(jobs) > 0:
            if check_timesup(): break


            # submit jobs
            while len(jobs) < ncpus and len(all_jobs_indexes) > 0:
                if check_timesup(): break

                timesup = check_timesup()

                timer = dict()
                timer['job_submit_start'] = time.time()

                ix, iy = xy[all_jobs_indexes[0]]
                
                timer['job_load_data_start'] = time.time()

                # raw rows extraction (warning: velocity must be
                # corrected by the function itself)

                # commented but useful if we want to timeout data access
                # GET_DATA_TIMEOUT = 10 # timeout to get a data vector in s

                # outdict = orb.utils.parallel.timed_process(
                #     get_data, GET_DATA_TIMEOUT, args=[self.cube, ix, iy, binning])
                # if 'irow' in outdict:
                #     irow = outdict['irow']
                # else:
                #     logging.warning('timeout reached on data extraction')
                #     break
                irow = self.get_data(
                    min(ix) * binning, (max(ix) + 1) * binning,
                    iy[0] * binning, (iy[0] + 1) * binning,
                    0, self.dimz, silent=True)


                if binning > 1:
                    irow = orb.utils.image.nanbin_image(irow, binning) * binning**2

                irow = np.atleast_2d(irow)
                irow = irow[ix - min(ix), :]

                timer['job_load_data_end'] = time.time()

                all_args = list()
                all_args.append(func)
                all_args.append(irow)

                # extract values of mapped arguments
                for i in range(len(args)):
                    if mapped[i]:
                        if callable(args[i]):
                            all_args.append(args[i](ix, iy))
                        else:
                            all_args.append(np.copy(args[i][ix, iy, ...]))
                    else:
                        all_args.append(args[i])

                # process in row args are passed as the last argument (WARNING do not add
                # other arguments afterward)
                all_args.append({'debug':self.debug,
                                 'timeout':timeout,
                                 'mapped':mapped})

                timer['job_submit_end'] = time.time()

                # job submission
                jobs.append([
                    job_server.submit(
                        process_in_row,
                        args=tuple(all_args),
                        modules=tuple(modules)),
                    (ix, iy), time.time(), timer, all_jobs_indexes[0]])
                all_jobs_indexes.pop(0)
                progress.update(all_jobs_nb - len(all_jobs_indexes))


            # retrieve all finished jobs
            unfinished_jobs = list()
            for i in range(len(jobs)):
                ijob, (ix, iy), stime, timer, ijob_index = jobs[i]
                if ijob.job.ready():
                    logging.debug('job {} ({}, {}) finished'.format(ijob_index, ix, iy))
                    logging.debug('job {} time since submission: {} s'.format(
                        ijob_index, time.time() - stime))
                    logging.debug('job {} submit time: {} s'.format(
                        ijob_index, timer['job_submit_end'] - timer['job_submit_start']))
                    logging.debug('job {} load data time: {} s'.format(
                        ijob_index, timer['job_load_data_end'] - timer['job_load_data_start']))

                    res_row = ijob()
                    for irow in range(len(res_row)):
                        res = res_row[irow]
                        if out_is_dict:
                            if not isinstance(res, dict):
                                raise TypeError('function result must be a dict if out is a dict but it is {}'.format(type(res)))
                            for ikey in list(res.keys()):
                                # create the output array if not set
                                if ikey not in out and res[ikey] is not None:
                                    if np.size(res[ikey]) > 1:
                                        if res[ikey].ndim > 1:
                                            raise TypeError('must be a 1d array of floats')
                                        try: float(res[ikey][0])
                                        except TypeError: raise TypeError('must be an array of floats')
                                    else:
                                        try:
                                            float(res[ikey])
                                        except TypeError:
                                            raise TypeError('If out dict maps are not set (i.e. out is set to a default dict()) returned values must be a dict of float or a 1d array of floats')
                                    _iout = np.empty(
                                        (self.dimx//binning,
                                         self.dimy//binning,
                                         np.size(res[ikey])),
                                        dtype=float)

                                    _iout = np.squeeze(_iout)
                                    out[ikey] = _iout
                                    out[ikey].fill(np.nan)

                                if res[ikey] is not None:
                                    try:
                                        out[ikey][ix[irow], iy[irow], ...] = res[ikey]
                                    except:
                                        print('exception1:', res)
                        else:
                            try:
                                out[ix[irow], iy[irow], ...] = res
                            except:
                                print('exception2:', res, out[ix[irow], iy[irow], ...].shape, out.shape)
                    logging.debug('job {} time (whole loop): {} s'.format(
                        ijob_index, time.time() - stime))

                elif timeout is not None:
                    _job_elapsed_time_by_pixel = (time.time() - stime) / np.size(ix)
                    if _job_elapsed_time_by_pixel < timeout:
                        unfinished_jobs.append(jobs[i]) # continue waiting
                    else:
                        logging.warning('job {} timeout for pixels {}, {}'.format(ijob_index, ix, iy[0]))
                        logging.info(orb.utils.parallel.get_stats_str(job_server))
                else:
                    unfinished_jobs.append(jobs[i])
            jobs = unfinished_jobs

        progress.end()

        orb.utils.parallel.close_pp_server(job_server)
        
        return out
示例#9
0
    def process_by_region(self, func, regions, subtract, args=list(), modules=list(),
                          depfuncs=list()):

        """Parallelize a function applied to a list of integrated
        regions extracted from the spectral cube.

        the function must be defined as func(spectrum_bundle, *args)

        theta_orig is the mean original incident angle in the integrated region.
        """
        job_server, ncpus = orb.utils.parallel.init_pp_server()
        
        all_jobs = [(i, regions[i]) for i in range(len(regions))] # jobs to submit

        # jobs submit / retrieve loop
        out = list()
        jobs = list() # submitted and unfinished jobs
        all_jobs_nb = len(all_jobs)
        progress = orb.core.ProgressBar(all_jobs_nb)
        while len(all_jobs) > 0 or len(jobs) > 0:
            while_loop_start = time.time()

            # submit jobs
            while len(jobs) < ncpus and len(all_jobs) > 0:
                timer = dict()
                timer['job_submit_start'] = time.time()

                timer['job_load_data_start'] = time.time()
                # raw lines extraction (warning: velocity must be
                # corrected by the function itself)
                ispectrum = self.get_spectrum_from_region(
                    all_jobs[0][1])
                if subtract is not None:
                    ispectrum.subtract_sky(subtract)

                timer['job_load_data_end'] = time.time()

                all_args = list()
                all_args.append(ispectrum.to_bundle())
                for iarg in args:
                    all_args.append(iarg)

                timer['job_submit_end'] = time.time()
                # job submission
                jobs.append([
                    job_server.submit(
                        func,
                        args=tuple(all_args),
                        modules=tuple(modules)),
                    all_jobs[0], time.time(), timer])
                all_jobs.pop(0)
                progress.update(all_jobs_nb - len(all_jobs))


            # retrieve all finished jobs
            unfinished_jobs = list()
            for i in range(len(jobs)):
                ijob, (iregion_index, iregion), stime, timer = jobs[i]
                if ijob.job.ready():

                    logging.debug('job time since submission: {} s'.format(
                        time.time() - stime))
                    logging.debug('job submit time: {} s'.format(
                        timer['job_submit_end'] - timer['job_submit_start']))
                    logging.debug('job load data time: {} s'.format(
                        timer['job_load_data_end'] - timer['job_load_data_start']))

                    out.append((iregion_index, ijob(), ispectrum))
                    logging.debug('job time (whole loop): {} s'.format(time.time() - stime))
                else:
                    unfinished_jobs.append(jobs[i])
            jobs = unfinished_jobs


        progress.end()

        orb.utils.parallel.close_pp_server(job_server)

        # reorder out
        ordered_out = list()
        for i in range(all_jobs_nb):
            ok = False
            for iout in out:
                if iout[0] == i:
                    ordered_out.append(iout[1:])
                    ok = True
                    break
            if not ok:
                raise Exception('at least one of the processed region is not in the results list')

        return ordered_out
示例#10
0
                                           height=leusch_alt)

for lat in galactic_latitude_array:
    for long in galactic_longitude_array:
        galactic_1d_grid.append((lat, long))

galactic_1d_grid = np.array(galactic_1d_grid)
pointings = astropy.coordinates.SkyCoord(galactic_1d_grid[:, 1],
                                         galactic_1d_grid[:, 0],
                                         frame='galactic',
                                         unit='deg')

# iterate through points
i = 0
for pointing in pointings:
    obs_time = Time(time.time(), format='unix', location=leusch)
    pointings_altaz = astropy.coordinates.AltAz(location=leusch,
                                                obstime=obs_time)
    final_pointings_altaz = pointing.transform_to(pointings_altaz)
    alt = final_pointings_altaz.alt.deg
    az = final_pointings_altaz.az.deg
    if alt < 85 and alt > 15 and az > 5 and az < 350:
        dish.point(final_pointings_altaz.alt.deg, final_pointings_altaz.az.deg)

        # noise on
        noise.on()
        spec.read_spec('../Data/celestial_pole-noiseon_' + str(i) + '.fits', 2,
                       (galactic_1d_grid[i][0], galactic_1d_grid[i][1]))

        # noise off
        noise.off()
示例#11
0
          handletextpad=2,
          borderpad=2,
          ncol=5,
          bbox_to_anchor=(1 + 0.05, 0.425))

#
# animation and showing
#

# objects to redraw
anim_tuple = elements_to_animate + text_line

# display time elapsed between last two animation updates
if benchmark:
    import time
    start = time.time()


def update(frame):
    """
    Handler function to update the plot - only changed items. Currently we redraw
    only Field-of-View, ISS (most speedy object) and time caption.
    """

    if benchmark:
        end = time.time()
        global start
        print(end - start)
        start = end

    global obs_time
示例#12
0
    name="Boresight",
    sampling_rate_hz=sim.parameters["planet_scanning"]["sampling_rate_hz"])
obs, = sim.create_observations(detectors=[det])

utils.print_inputs(simulation=sim,
                   detector=det,
                   instrument=instr,
                   strategy=strat)

sim.generate_spin2ecl_quaternions(
    scanning_strategy=ss.LBScanningStrategy(metadata=metadata)
)  #Get quaternions responsible for rotation from spin-axis to ecliptic plane

planet = planet_params["planet_name"]
solar_system_ephemeris.set("builtin")
start = time.time()
log.warning(
    "Calculating jupiter positions over time, might take a long while...")
icrs_pos = get_body_barycentric(
    planet, obs.get_times(astropy_times=True),
    utils.empty_print(1))  #get jupiter position in barycentric coordinates
utils.sep_title("Positions")
print("Calculated " + str(planet) + " positions in Barycentric coordinates.")
print("Computation time: " + str(time.time() - start) + " seconds")
utils.empty_print(1)
filename = "outputs/TEST" + str(planet) + "_barycentric"
print("Writing outputs to file " + str(filename))
utils.write_to_file(filename, icrs_pos)
print("Positions saved to file...")

ecl_vec = (
# Loop through baselines
nvis_total = 0
for t in range(0, ha_steps):

    # Determine start end end HA
    ha0 = ha_start + t * (ha_end - ha_start) / ha_steps
    ha1 = ha_start + (t + 1) * (ha_end - ha_start) / ha_steps
    tstep = args.tsnap / ha_steps

    nant = ants_xyz.shape[0]
    auvw0 = numpy.array(
        [xyz_to_uvw(ants_xyz[a], ha0, dec) for a in range(nant)])
    auvw1 = numpy.array(
        [xyz_to_uvw(ants_xyz[a], ha1, dec) for a in range(nant)])

    start_time = time.time()
    a1, a0 = numpy.meshgrid(range(nant), range(nant))
    a0, a1 = a0[a0 < a1], a1[a0 < a1]

    # Determine extreme points
    uvw0 = auvw0[a0] - auvw0[a1]
    uvw1 = auvw1[a0] - auvw1[a1]

    # Scale by frequency
    uvw00 = uvw0 * args.freq_min * 1000000 / c
    uvw01 = uvw0 * args.freq_max * 1000000 / c
    uvw10 = uvw1 * args.freq_min * 1000000 / c
    uvw11 = uvw1 * args.freq_max * 1000000 / c
    assert (numpy.max(
        numpy.abs([uvw00[:, 2], uvw01[:, 2], uvw10[:, 2], uvw11[:, 2]])) <
            args.wmax)
#use RA and dec of galactic north pole initially
# ra = 12 hr 54.1 min
# dec = 27 7 arcmin
ra = 193.5  # degrees
dec = 27.1167  # degrees

#set frequency and amplitude
lo = ugradio.agilent.SynthDirect()
lo.set_frequency(635, 'MHz')

#print("LO is: " + str(lo.get_frequency()))
# start with noise on
noise = ugradio.leusch.LeuschNoise()
noise.on()

jd = ugradio.timing.julian_date(time.time())
alt, az = ugradio.coord.get_altaz(ra, dec, jd)
#dish.point(alt,az)

spec = ugradio.leusch.Spectrometer()
spec.read_spec('lab4data1_attempt_2-noiseon.fits', 20, (193.5, 27.1167))
print(spec.int_time())

#switch to noise off
noise.off()

#dish.point(alt,az)

spec.read_spec('lab4data1_attempt_2-noiseoff.fits', 20, (193.5, 27.1167))
print(spec.int_time())
 
 if oflow:
  print 'Overflows detected -- consider increasing FFT shift'
 else:
  print 'No overflows detected'

 print 'Setting accumulation length to %.2f spectra' % opts.acc_len #* 2 * 512/ (c1*10e6)
 fpga.write_int('acc_len', opts.acc_len)
  

 print 'Triggering sync'
 fpga.write_int('cnt_rst', 0)
 fpga.write_int('sw_sync', 0)
 fpga.write_int('sw_sync', 1)
    
 trig_time = time.time()
 fpga.write_int('sw_sync', 0)
 fpga.write_int('cnt_rst', 1)
 fpga.write_int('cnt_rst', 0)

 this_acc = 0
 this_acc_time = trig_time
 file_start_time = time.time()

 data  = []
 times = []
 n1=opts.nnumber
 while(True):
  try:
   latest_acc = fpga.read_int('acc_cnt')
   latest_acc_time = time.time()