Beispiel #1
0
    def draw(self, monofields, step, I_bin, t_bin, save, plot):
        if len(self.ims) > 0:
            [im.remove() for im in self.ims]
            self.ims = []

        dprint(step)
        for i, (plane, xys) in enumerate(zip(self.planes, self.all_xys)):

            for ip, xy in enumerate(xys):
                x, y = xy
                amplitude_series = np.sum(
                    monofields[:, plane, :, x,
                               y], axis=1)[:step]  # containing all the objects
                # amplitude_series = self.sum_chunk(amplitude_series, t_bin)

                timesteps = (np.arange(sp.numframes) * sp.sample_time)[:step]
                # timesteps = timesteps[::t_bin]

                intensity = np.abs(amplitude_series)**2

                binned_intensity = self.sum_chunk(intensity, t_bin)
                I, bins = np.histogram(binned_intensity,
                                       bins=np.arange(np.min(binned_intensity),
                                                      np.max(binned_intensity),
                                                      I_bin))

                self.ims.append(self.axes[i, 1].plot(amplitude_series.real,
                                                     amplitude_series.imag,
                                                     marker='o',
                                                     label=str(xy),
                                                     color=self.colors[ip])[0])
                self.ims.append(self.axes[i,
                                          2].plot(timesteps,
                                                  np.angle(amplitude_series),
                                                  marker='o',
                                                  color=self.colors[ip])[0])
                self.ims.append(self.axes[i, 3].plot(timesteps,
                                                     intensity,
                                                     marker='o',
                                                     color=self.colors[ip])[0])

                self.ims.append(self.axes[i, 4].step(bins[:-1],
                                                     I,
                                                     color=self.colors[ip])[0])

                if plot:
                    plotLogLMap(intensity,
                                np.arange(0, 250, 25),
                                np.arange(0, 250, 25),
                                effExpTime=t_bin)

                # print(plotLogLMap(I))
                if save:
                    with open(self.savefile, 'ab') as handle:
                        # pickle.dump([bins[:-1], I, t_bin], handle, protocol=pickle.HIGHEST_PROTOCOL)
                        pickle.dump((intensity, t_bin),
                                    handle,
                                    protocol=pickle.HIGHEST_PROTOCOL)

        self.fig.canvas.draw()
Beispiel #2
0
    def create_bad_pix(self, QE_map, plot=False):
        amount = int(self.array_size[0] * self.array_size[1] *
                     (1. - mp.pix_yield))

        bad_ind = random.sample(
            list(range(self.array_size[0] * self.array_size[1])), amount)

        dprint(
            f"Bad indices = {len(bad_ind)}, # MKID pix = { self.array_size[0]*self.array_size[1]}, "
            f"Pixel Yield = {mp.pix_yield}, amount? = {amount}")

        # bad_y = random.sample(y, amount)
        bad_y = np.int_(np.floor(bad_ind / self.array_size[1]))
        bad_x = bad_ind % self.array_size[1]

        # print(f"responsivity shape  = {responsivities.shape}")
        QE_map = np.array(QE_map)

        QE_map[bad_x, bad_y] = 0

        if plot:
            plt.xlabel('x pix')
            plt.ylabel('y pix')
            plt.title('Bad Pixel Map')
            plt.imshow(QE_map)
            cax = plt.colorbar()
            cax.set_label('Responsivity')
            plt.show()

        return QE_map
Beispiel #3
0
    def save_plane(self, location=None):
        """
        Saves the complex field at a specified location in the optical system. If the function is called by
        wfo.loop_collection, the plane is saved AFTER the function is applied

        Note that the complex planes saved are not summed by object, interpolated over wavelength, nor masked down
        to the sp.maskd_size.

        :param location: name of plane where field is being saved
        :return: self.save_E_fields
        """
        if sp.verbose:
            dprint(f"saving plane at {location}")

        if location is not None and location in sp.save_list:
            E_field = np.zeros(
                (1, np.shape(self.wf_collection)[0],
                 np.shape(self.wf_collection)[1], sp.grid_size, sp.grid_size),
                dtype=np.complex64)
            samp_lambda = np.zeros(ap.n_wvl_init)

            for iw, sources in enumerate(self.wf_collection):
                samp_lambda[iw] = proper.prop_get_sampling(
                    self.wf_collection[iw, 0])
                for io, wavefront in enumerate(sources):
                    wf = proper.prop_shift_center(wavefront.wfarr)
                    E_field[0, iw, io] = copy.copy(wf)

            self.Efield_planes = np.vstack((self.Efield_planes, E_field))
            self.saved_planes.append(location)
            self.plane_sampling.append(samp_lambda)
Beispiel #4
0
    def save_out_to_disk(self, plot=False):
        out = Slapper()
        out.probe = Slapper()
        out.ts = Slapper()

        # Probe Info
        out.probe.direction = self.probe_ax
        out.probe.amp = self.probe_amp
        out.probe.width = self.probe_w
        out.probe.height = self.probe_h
        out.probe.shift = self.probe_shift
        out.probe.spacing = self.probe_spacing
        out.probe.DM_cmd_cycle = self.DM_probe_series
        out.probe.phs_interval = self.phs_intervals

        # Timeseries Info
        out.ts.start = 0
        out.ts.n_probes = self.n_probes
        out.ts.phase_cycle = self.phase_cycle
        out.ts.probe_integration_time = self.probe_integration_time
        out.ts.t_one_cycle = self.time_for_one_cycle
        out.ts.null_time = self.null_time
        out.ts.elapsed_time = 0
        out.ts.n_cycles = 0
        out.ts.n_cmds = sp.numframes  # TODO verify this-this was just a late night hack
        out.ts.cmd_tstamps = self.cmd_tstamps

        save_location = iop.testdir + f"/{iop.testname}_CDIparams.pkl"
        dprint(f'save_location={save_location}')
        with open(save_location, 'wb') as handle:
            pickle.dump(out, handle, protocol=pickle.HIGHEST_PROTOCOL)
        handle.close()

        # Fig
        if plot:
            if self.n_probes >= 4:
                nrows = 2
                ncols = self.n_probes//2
                figheight = 8
            else:
                nrows = 1
                ncols = self.n_probes
                figheight = 4

            fig, subplot = plt.subplots(nrows, ncols, figsize=(10, figheight))
            fig.subplots_adjust(wspace=0.5, right=0.85, left=0.05)
            fig.suptitle('Probe Series')

            for ax, ix in zip(subplot.flatten(), range(out.ts.n_probes)):
                im = ax.imshow(out.probe.DM_cmd_cycle[ix], interpolation='none', origin='lower')
                ax.set_title(f"Probe " + r'$\theta$=' + f'{out.ts.phase_cycle[ix]/np.pi:.2f}' + r'$\pi$')

            cax = fig.add_axes([0.9, 0.2, 0.03, 0.6])  # Add axes for colorbar @ position [left,bottom,width,height]
            cb = fig.colorbar(im, orientation='vertical', cax=cax)  #
            cb.set_label('Probe Height (m)')

        return out
Beispiel #5
0
    def add_bad_pix(self, QE_map_all, bad_ind):
        dprint(len(bad_ind))
        QE_map = np.array(QE_map_all, copy=True)
        if len(bad_ind) > 0:
            bad_y = np.int_(np.floor(bad_ind / mp.array_size[1]))
            bad_x = bad_ind % mp.array_size[1]
            QE_map[bad_x, bad_y] = 0

        return QE_map
Beispiel #6
0
    def make_fields_master(self, plot=False):
        """ The master fields file of which all the photons are seeded from according to their device

        :return:
        """

        backup_fields = os.path.join(self.masterdir, 'fields_planet_slices.h5')

        ap.companion = False
        self.contrast = copy.deepcopy(ap.contrast)

        telescope = Telescope(usesave=False)
        fields = telescope()['fields']

        unoccultname = os.path.join(iop.testdir, f'telescope_unoccult')
        self.psf_template = self.get_unoccult_psf(unoccultname)
        # grid(self.psf_template)

        if plot:
            grid(fields[0], logZ=True, title='make_fields_master')

        if os.path.exists(backup_fields):
            fields_master = fields
        else:
            assert len(fields.shape) == 6

            collapse_comps = np.zeros(
                (sp.numframes, ap.n_wvl_final, sp.grid_size, sp.grid_size))
            # grid(self.psf_template)
            for (x, y), scaling in zip(
                    np.array(ap.companion_xy) * 20, self.contrast):
                cube = copy.deepcopy(self.psf_template)
                print(x, y, scaling)
                cube = np.roll(cube, -int(x), 2)
                cube = np.roll(cube, -int(y), 1)
                cube *= scaling
                # grid(cube)
                collapse_comps += cube

            obs_seq = np.abs(fields[:, -1])**2

            fields_master = np.zeros(
                (sp.numframes, ap.n_wvl_final, 2, sp.grid_size, sp.grid_size))
            # collapse_comps = np.sum(obs_seq[:, :, 1:], axis=2)
            fields_master[:, :, 0] = obs_seq[:, :, 0]
            fields_master[:, :, 1] = collapse_comps
            if plot:
                grid(fields_master[0], logZ=True, title='star and comps cube')

            dprint(
                f"Reduced shape of obs_seq = {np.shape(fields_master)} (numframes x nwsamp x 2 x grid x grid)"
            )

            os.rename(iop.fields, backup_fields)
            telescope.save_fields(fields_master)

        return fields_master
Beispiel #7
0
 def update_device(self, new_cam, orig_cam, val, i):
     new_cam.params['mp'].dark_counts = True
     new_cam.dark_bright = val
     new_cam.dark_per_step = self.params[
         'sp'].sample_time * new_cam.dark_bright * new_cam.array_size[
             0] * new_cam.array_size[1] * new_cam.dark_pix_frac
     dprint('dark_per_step:', new_cam.dark_per_step, 'sample_time: ',
            self.params['sp'].sample_time, 'dark_bright: ',
            new_cam.dark_bright, 'pixels: ',
            new_cam.array_size[0] * new_cam.array_size[1],
            new_cam.dark_pix_frac)
     return new_cam
Beispiel #8
0
def offset_companion(wf, step=0):
    """
    offsets the companion wavefront using the 2nd and 3rd order Zernike Polynomials (X,Y tilt)
    companion(s) contrast and location(s) set in params

    We don't call this function via wfo.loop_collection because we need to know which object (io) we are on, which
    is not supported in the current format. This is the only acception to applying loop_collection

    Important: this function must be called AFTER any calls to proper.prop_define_entrance, which normalizes the
    intensity, because we scale the intensity of the planet relative to the star via the user-parameter ap.contrast.

    If you have a focused system, and do not scale the grid sampling of the system by wavelength, we account
    for that here (thus the if/else statements). This is because we shift the companion's location in the focal plane
    by proper.prop_zernikes, which scales the x/y tilt (zernike orders 2 and 3) by wavelength to account for the
    presumed resampling based on wavelength. We thus counteract that in the case of sp.focused_sys=True

    Wavelength/contrast scaling scales the contrast ratio between the star and planet as a function of wavelength.
    This ratio is given by ap.C_spec, and the scale ranges from 1/ap.C_spec to 1 as a function of ap.n_wvl_init. The
        gradient ap.C_spec should be chosen carefully to consider the number of wavelengths and spectral type of the
        star and planet in the simulation.

    :param wf: singe wavefront object from wfo.wf_collection, shape=(grid_sz, grid_sz)
    :return: nothing implicitly returned but the given wfo initiated in Wavefronts class has been altered to give the
        appropriate wavefront for a planet in the focal plane
    """
    if ap.companion is True and wf.name != 'star':
        cont_scaling = np.linspace(1. / ap.C_spec, 1, ap.n_wvl_init)

        # Shifting the Array
        if sp.focused_sys:
            # Scaling into lambda/D AND scaling by wavelength
            xloc = ap.companion_xy[wf.ib-1][0] * wf.lamda / tp.entrance_d \
                   * ap.wvl_range[0] / wf.lamda # * (-1)**(iw%2)
            yloc = ap.companion_xy[wf.ib-1][1] * wf.lamda / tp.entrance_d \
                    *  ap.wvl_range[0] / wf.lamda  # / (2*np.pi)   * (-1)**(iw%2)
        else:
            # Scaling Happens Naturally!
            xloc = ap.companion_xy[wf.ib-1][0]*1e-6
            yloc = ap.companion_xy[wf.ib-1][1]*1e-6

        if tp.rot_rate != 0 and step != 0:
            angle = np.deg2rad(tp.rot_rate * step * sp.sample_time)
            rot_matrix = [[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]
            xloc, yloc = np.dot(rot_matrix, np.array([xloc,yloc]))
            dprint(tp.rot_rate, angle, rot_matrix, xloc, yloc)

        proper.prop_zernikes(wf, [2, 3], np.array([xloc, yloc]))  # zernike[2,3] = x,y tilt

        ##############################################
        # Wavelength/Contrast  Scaling the Companion
        ##############################################
        wf.wfarr *= np.sqrt(ap.contrast[wf.ib-1])
Beispiel #9
0
def debugging_noise(metric_test):
    """ debugging the throughput issue by first looking at the input noise """
    comp = 'star'
    metric_test.get_rebinned_cubes(metric_test.master_fields, comps=comp != 'star')
    nmetric = len(metric_test.metric.cams[comp])
    mid_ind = int((nmetric - 1) / 2)
    dprint(mid_ind)
    cam = metric_test.metric.cams['comp'][mid_ind]  # middle ind assuming
    wavemet_cube = [metric_test.metric.cams[comp][i].rebinned_cube[:, 0] for i in range(len(metric_test.metric.vals))]
    # body_spectra(wavemet_cube)
    median_fwhm = cam.lod
    mask = cam.QE_map == 0
    wsamples = np.linspace(metric_test.wvl_range[0], metric_test.wvl_range[1], metric_test.n_wvl_final)
    median_wave = (wsamples[-1] + wsamples[0]) / 2

    for im, metcube in enumerate(wavemet_cube):
        broadband_image = np.sum(metcube, axis=0)
        # quick2D(broadband_image, title='broadband image')
        median_noise, vector_radd = noise_per_annulus(broadband_image, separation=median_fwhm, fwhm=median_fwhm,
                                                      mask=mask)
        plt.plot(vector_radd * cam.platescale, median_noise, label=f'met val: {metric_test.metric.vals[im]}')
    plt.legend()
    plt.title('broadband noise-separation curve for different metrics')
    plt.show()

    # split those lines into component wavelengths and plot the result of each metric on a new page
    for im, metcube in enumerate(wavemet_cube):
        body_spectra(metcube, title=f'met val: {metric_test.metric.vals[im]}', show=False)
        plt.figure()
        for iw, waveimage in enumerate(metcube):
            fwhm = median_fwhm  # * wsamples[iw]/median_wave
            print('wavelength=', wsamples[iw], 'met val=', metric_test.metric.vals[im], 'fwhm:', fwhm)
            median_noise, vector_radd = noise_per_annulus(waveimage, separation=fwhm, fwhm=fwhm, mask=mask)
            plt.title(f'met val: {metric_test.metric.vals[im]}')
            plt.plot(vector_radd * cam.platescale, median_noise, label=f'wave: {int(wsamples[iw] * 1e9)}')
        plt.legend()
    plt.show()

    metwave_cube = np.transpose(wavemet_cube, (1, 0, 2, 3))

    # split those lines into component wavelengths and plot the result of each common wavelength on a new page
    for iw, wavecube in enumerate(metwave_cube):
        body_spectra(wavecube, title=f'wave: {int(wsamples[iw] * 1e9)}', show=False)
        plt.figure()
        for im, metimage in enumerate(wavecube):
            fwhm = median_fwhm  # * wsamples[iw]/median_wave
            print('wavelength=', wsamples[iw], 'met val=', metric_test.metric.vals[im], 'fwhm:', fwhm)
            median_noise, vector_radd = noise_per_annulus(metimage, separation=fwhm, fwhm=fwhm, mask=mask)
            plt.title(f'wave: {int(wsamples[iw] * 1e9)}')
            plt.plot(vector_radd * cam.platescale, median_noise, label=f'wave: {int(wsamples[iw] * 1e9)}')
        plt.legend()
    plt.show()
Beispiel #10
0
    def get_packets(self):
        intensity = np.abs(self.fields)**2

        if self.dp.resamp:
            nyq_sampling = self.dp.band[0] * 1e-9 * 360 * 3600 / (4 * np.pi * tp.diam)
            sampling = nyq_sampling * tp.beam_ratio * 2  # nyq sampling happens at tp.beam_ratio = 0.5
            x = np.arange(-self.dp.grid_size * sampling / 2, self.dp.grid_size * sampling / 2, sampling)
            xnew = np.arange(-self.dp.array_size[0] * self.dp.platescale / 2, self.dp.array_size[0] * self.dp.platescale / 2, self.dp.platescale)
            mkid_cube = np.zeros((len(intensity), self.dp.array_size[0], self.dp.array_size[1]))
            for s, slice in enumerate(intensity):
                f = interpolate.interp2d(x, x, slice, kind='cubic')
                mkid_cube[s] = f(xnew, xnew)
            mkid_cube = mkid_cube * np.sum(intensity) / np.sum(mkid_cube)
            intensity = mkid_cube

        intensity[intensity < 0] *= -1

        if self.dp.QE_var:
            intensity *= self.dp.QE_map[:intensity.shape[1], :intensity.shape[1]]

        if hasattr(self.dp, 'star_phot'): self.dp.star_photons_per_s = self.dp.star_phot
        num_events = int(self.dp.star_photons_per_s * self.dp.sample_time * np.sum(intensity))

        if sp.verbose:
            dprint(f'star flux: {self.dp.star_photons_per_s}, cube sum: {np.sum(intensity)}, num events: {num_events}')

        photons = self.sample_cube(intensity, num_events)
        # photons = spec.calibrate_phase(photons)
        # photons = temp.assign_calibtime(photons, step)

        if self.dp.dark_counts:
            dark_photons = MKIDs.get_bad_packets(self.dp, step, type='dark')
            photons = np.hstack((photons, dark_photons))

        if self.dp.hot_pix:
            hot_photons = MKIDs.get_bad_packets(self.dp, step, type='hot')
            photons = np.hstack((photons, hot_photons))

        if self.dp.phase_uncertainty:
            photons[1] *= self.dp.responsivity_error_map[np.int_(photons[2]), np.int_(photons[3])]
            photons, idx = MKIDs.apply_phase_offset_array(photons, self.dp.sigs)

        if self.dp.phase_background:
            thresh = -photons[1] > 3 * self.dp.sigs[-1, np.int_(photons[3]), np.int_(photons[2])]
            photons = photons[:, thresh]

        if self.dp.remove_close:
            stem = pipe.arange_into_stem(photons.T, (self.dp.array_size[0], self.dp.array_size[1]))
            stem = MKIDs.remove_close(stem)
            photons = pipe.ungroup(stem)

        return photons.T
Beispiel #11
0
    def cut_max_count(self, datacube):
        image = np.sum(datacube, axis=0)
        max_counts = self.max_count * sp.sample_time
        maxed = image > max_counts
        scaling_map = max_counts / image
        scaling_map[scaling_map > 1] = 1
        scaled_cube = datacube * scaling_map
        dprint(
            f'datacube went from {np.sum(datacube)} to {np.sum(scaled_cube)} '
            f'(factor of {np.sum(scaled_cube)/np.sum(datacube)}) during max count cut'
        )

        return scaled_cube
Beispiel #12
0
 def update_device(self, new_cam, orig_cam, val, i):
     params = [mp]
     save_state = save_params(params)
     mp.array_size = np.array([val] * 2)
     new_cam = Camera(
         usesave=False)  # these two args mean no fields will be produced
     new_cam.usesave = orig_cam.usesave  # new cam will have usesave False so set it here to what you actually want
     new_cam.lod = (val / self.median_val) * mp.lod
     dprint(val, self.median_val, new_cam.lod)
     new_cam.platescale = mp.platescale * self.median_val / val
     new_cam.array_size = np.array([val, val])
     restore_params(save_state, params)
     # new_cam.name = os.path.join(self.testdir, f'camera_{self.name}={val}_comp={obj}.pkl')
     return new_cam
Beispiel #13
0
def gen_CDI_phase_stream():
    """
    generate an array of phases per timestep for the CDI algorithm

    currently, I assume the timestream is not that long. Should only use this for short timestreams, and use a more
    efficient code for long simulations (scale of minutes or more)

    :return: phase_list  array of phases to use in CDI probes
    """
    phase_series = np.zeros(sp.numframes) * np.nan

    # Repeating Probe Phases for Integration time
    if cdip.phase_integration_time > sp.sample_time:
        phase_hold = cdip.phase_integration_time / sp.sample_time
        phase_1cycle = np.repeat(cdip.phase_list, phase_hold)
    elif cdip.phase_integration_time == sp.sample_time:
        phase_1cycle = cdip.phase_list
    else:
        raise ValueError(
            f"Cannot have CDI phase probe integration time less than sp.sample_time"
        )

    # Repeating Cycle of Phase Probes for Simulation Duration
    full_simulation_time = sp.numframes * sp.sample_time
    time_for_one_cycle = len(
        phase_1cycle) * cdip.phase_integration_time + cdip.null_time
    n_phase_cycles = full_simulation_time / time_for_one_cycle
    dprint(f"number of phase cycles = {n_phase_cycles}")
    if n_phase_cycles < 0.5:
        if cdip.n_probes > sp.numframes:
            warnings.warn(
                f"Number of timesteps in sp.numframes is less than number of CDI phases \n"
                f"not all phases will be used")
            phase_series = phase_1cycle[0:sp.numframes]
        else:
            warnings.warn(
                f"Total length of CDI integration time for all phase probes exceeds full simulation time \n"
                f"Not all phase probes will be used")
            phase_series = phase_1cycle[0:sp.numframes]
    elif 0.5 < n_phase_cycles < 1:
        phase_series[0:len(phase_1cycle)] = phase_1cycle
        dprint(f"phase_seris  = {phase_series}")
    else:
        n_full = np.floor(n_phase_cycles)
        raise NotImplementedError(f"Whoa, not implemented yet. Hang in there")
        # TODO implement

    return phase_series
Beispiel #14
0
    def __init__(self, config=None):

        self.contrasts = np.power(np.ones((config['data']['num_indata']))*10, config['data']['contrasts'])
        if config['data']['null_frac'] > 0:
            self.contrasts[::int(1 / config['data']['null_frac'])] = 0
        invalid_contrast = np.array(config['data']['contrasts']) > 0
        self.contrasts[invalid_contrast] = 0
        maxcont = np.argmax(self.contrasts)
        self.contrasts = np.append(self.contrasts[maxcont], np.delete(self.contrasts, maxcont))
        config['data']['contrasts'] = np.log10(self.contrasts)
        disp = config['data']['lods']
        angle = config['data']['angles']
        self.lods = (np.array([np.sin(np.deg2rad(angle)),np.cos(np.deg2rad(angle))])*disp).T

        self.spectra = [(config['data']['star_spectra'], p_spec) for p_spec in config['data']['planet_spectra']]
        dprint(self.contrasts)
Beispiel #15
0
    def readfield(self, path, filename):

        try:
            data_r, hdr = getdata(path + filename + '_r.fits', header=True)
            data_i = getdata(path + filename + '_i.fits')
        except:
            dprint('FileNotFoundError. Waiting...')
            import time
            time.sleep(10)
            data_r, hdr = getdata(path + filename + '_r.fits', header=True)
            data_i = getdata(path + filename + '_i.fits')

        field = np.array(data_r, dtype=complex)
        field.imag = data_i

        return (field)
Beispiel #16
0
    def degrade_photons(self, photons, plot=False):
        if plot:
            grid(self.rebin_list(photons), title='before degrade')

        if mp.dark_counts:
            dark_photons = self.get_bad_packets(type='dark')
            dprint(photons.shape, dark_photons.shape, 'dark')
            photons = np.hstack((photons, dark_photons))

        if mp.hot_pix:
            hot_photons = self.get_bad_packets(type='hot')
            photons = np.hstack((photons, hot_photons))
            # stem = add_hot(stem)

        if plot:
            grid(self.rebin_list(photons), title='after bad')

        if mp.phase_uncertainty:
            photons[1] *= self.responsivity_error_map[np.int_(photons[2]),
                                                      np.int_(photons[3])]
            photons, idx = self.apply_phase_offset_array(photons, self.sigs)

        # thresh =  photons[1] < self.basesDeg[np.int_(photons[3]),np.int_(photons[2])]
        if mp.phase_background:
            thresh = -photons[1] > 3 * self.sigs[-1,
                                                 np.int_(photons[3]),
                                                 np.int_(photons[2])]
            photons = photons[:, thresh]

        if mp.remove_close:
            stem = self.arange_into_stem(
                photons.T, (self.array_size[0], self.array_size[1]))
            stem = self.remove_close(stem)
            photons = self.ungroup(stem)

        if plot:
            grid(self.rebin_list(photons), title='after remove close')

        # This step was taking a long time
        # stem = arange_into_stem(photons.T, (self.array_size[0], self.array_size[1]))
        # cube = make_datacube(stem, (self.array_size[0], self.array_size[1], ap.n_wvl_final))
        # # ax7.imshow(cube[0], origin='lower', norm=LogNorm(), cmap='inferno', vmin=1)
        # cube /= self.QE_map
        # photons = ungroup(stem)

        return photons
Beispiel #17
0
    def save_class(self):

        num_input = len(self.chunked_photons)  # 16

        reorder = np.apply_along_axis(np.random.permutation, 1,
                                      np.ones((num_input, self.chunked_photons.shape[1])) * np.arange(self.chunked_photons.shape[1])).astype(np.int)

        self.data = np.array([self.chunked_photons[o, order] for o, order in enumerate(reorder)])
        if config['task'] == 'part_seg':
            self.labels = np.ones((num_input), dtype=int) #* self.label
            self.pids = np.array([self.chunked_pids[o, order] for o, order in enumerate(reorder)])[:, :, 0]
        else:
            self.labels = np.array([self.chunked_pids[o, order] for o, order in enumerate(reorder)])[:, :, 0]

        if config['pointnet_version'] == 2:
            if self.train_type == 'train':
                self.smpw = [self.chunked_pids.size/(self.chunked_pids == o).sum() for o in range(self.num_classes)]

                # labelweights, _ = np.histogram(self.chunked_pids, range(self.num_classes+1))
                # labelweights = labelweights.astype(np.float32)
                # labelweights = labelweights/np.sum(labelweights)
                # self.smpw = 1/np.log(1.2+labelweights)
                # self.smpw = labelweights
                dprint(self.smpw)
            else:
                self.smpw = np.ones((self.num_classes))

        # self.data = self.data[:, :, [1, 3, 0, 2]]

        if self.debug:
            self.display_2d_hists()

        with h5py.File(self.outfile, 'w') as hf:

            hf.create_dataset('data', data=self.data)
            hf.create_dataset('label', data=self.labels)
            if config['task'] == 'part_seg':
                hf.create_dataset('pid', data=self.pids)
            if config['pointnet_version'] == 2:
                hf.create_dataset('smpw', data=self.smpw)
            if self.astro:
                hf.attrs[u'contrast'] = self.astro[0]
                hf.attrs[u'loc'] = self.astro[1] * 10
                hf.attrs[u'spec'] = self.astro[2][1]
Beispiel #18
0
    def __init__(self, iteration=0, name=investigation):
        self.name = str(iteration)
        self.masterdir = os.path.join(iop.datadir, name, self.name, 'master')

        self.nbranch = 2
        self.ncomp = 7
        self.fc_snr = 100
        self.througput_file = os.path.join(
            self.masterdir,
            f'throughput_nbranch={self.nbranch}_ncomp={self.ncomp}.pkl')

        params = {
            'ap': ap,
            'tp': tp,
            'atmp': atmp,
            'cdip': cdip,
            'iop': iop,
            'sp': sp,
            'mp': mp
        }
        iop.update_testname(self.masterdir)

        if sp.verbose:
            for param in params.values():
                pprint(param.__dict__)

        self.fields = self.make_fields_master()

        dprint(iop.fields)
        self.cam = Camera(fields=False, usesave=True)
        if self.cam.usesave:
            self.cam.save_instance()

        # if tp.detector == 'mkid':
        #     self.cam = subs.get_form_photons(self.fields, self.cam, comps=False)
        # elif tp.detector == 'ideal':
        #     self.cam = subs.get_ideal_photons(self.fields, self.cam, comps=False)
        # else:
        #     raise NotImplementedError

        self.wsamples = np.linspace(ap.wvl_range[0], ap.wvl_range[1],
                                    ap.n_wvl_final)
        self.scale_list = self.wsamples / (ap.wvl_range[1] - ap.wvl_range[0])
Beispiel #19
0
    def get_bad_packets(self, type='dark'):
        if type == 'hot':
            n_device_counts = self.total_hot
        elif type == 'dark':
            n_device_counts = self.total_dark
            dprint(self.total_dark, mp.dark_bright, sp.sample_time)
        else:
            print("type currently has to be 'hot' or 'dark'")
            raise AttributeError

        if n_device_counts % 1 > np.random.uniform(0, 1, 1):
            n_device_counts += 1

        n_device_counts = int(n_device_counts)
        photons = np.zeros((4, n_device_counts))
        if n_device_counts > 0:
            if type == 'hot':
                phases = np.random.uniform(-120, 0, n_device_counts)
                hot_ind = np.random.choice(range(len(self.hot_locs[0])),
                                           n_device_counts)
                bad_pix = self.hot_locs[:, hot_ind]
            elif type == 'dark':
                dist = Distribution(gaussian(
                    0, 0.25, np.linspace(0, 1, mp.res_elements)),
                                    interpolation=False)
                phases = dist(n_device_counts)[0]
                max_phase = max(phases)
                phases = -phases * 120 / max_phase
                bad_pix_options = self.create_false_pix(self.dark_pix_frac *
                                                        self.array_size[0] *
                                                        self.array_size[1])
                bad_ind = np.random.choice(range(len(bad_pix_options[0])),
                                           n_device_counts)
                bad_pix = bad_pix_options[:, bad_ind]

            photons[0] = np.random.uniform(sp.startframe * sp.sample_time,
                                           sp.numframes * sp.sample_time,
                                           n_device_counts)
            photons[1] = phases
            photons[2:] = bad_pix

        return photons
Beispiel #20
0
    sim = mm.RunMedis(params=params,
                      name='mkid_param_invest/figure2_mkid_demo',
                      product='fields')
    observation = sim()
    fields = observation['fields']
    # grid(fields)

    # form_photons = os.path.join(params['iop'].testdir, 'form_photons.pkl')
    #
    # if os.path.exists(form_photons):
    #     print(f'loading formatted photon data from {form_photons}')
    #     with open(form_photons, 'rb') as handle:
    #         cam, fig = pickle.load(handle)

    # else:
    dprint((fields.shape))
    cam = Camera(params, fields=False, usesave=True)
    if cam.usesave:
        cam.save()

    cam.photons = np.empty((0, 4))
    dprint(len(fields))
    cam.rebinned_cube = np.zeros(
        (params['sp'].numframes, params['ap'].n_wvl_final,
         params['mp'].array_size[1], params['mp'].array_size[0]))
    for step in range(len(fields)):
        dprint(step, fields.shape)
        spectralcubes = np.abs(fields[step, -1, :, :])**2

        if step == 0:
            step_packets, fig = get_packets_plots(spectralcubes,
Beispiel #21
0
    def pca_rebinned_cubes_old(self, comps=True):
        maps = []

        if comps:
            for cam in self.metric.cams['comp']:
                dprint(cam.rebinned_cube.shape)
                SDI = pca.pca(cam.rebinned_cube,
                              angle_list=np.zeros(
                                  (cam.rebinned_cube.shape[1])),
                              scale_list=self.scale_list,
                              mask_center_px=None,
                              adimsdi='double',
                              ncomp=self.ncomp,
                              ncomp2=None,
                              collapse='median')
                maps.append(SDI)
            return maps

        else:
            rad_samps, thruputs, noises, conts = [], [], [], []
            for cam in self.metric.cams['star']:
                frame_nofc = pca.pca(cam.rebinned_cube,
                                     angle_list=np.zeros(
                                         (cam.rebinned_cube.shape[1])),
                                     scale_list=self.scale_list,
                                     mask_center_px=None,
                                     adimsdi='double',
                                     ncomp=7,
                                     ncomp2=None,
                                     collapse='median')

                # quick2D(frame_nofc, logZ=False, title='frame_nofc', show=True)

                fwhm = cam.lod if hasattr(cam, 'lod') else mp.lod
                mask = cam.QE_map == 0
                noise_samp, rad_samp = contrcurve.noise_per_annulus(
                    frame_nofc, separation=1, fwhm=fwhm, mask=mask)

                if metric_name == 'array_size':
                    _, vector_radd = contrcurve.noise_per_annulus(
                        frame_nofc,
                        separation=fwhm,
                        fwhm=fwhm,
                        wedge=(0, 360),
                        mask=mask)

                else:
                    vector_radd = self.vector_radd

                # crop the noise and radial sampling measurements the limits of the throughput measurement
                radmin = vector_radd.astype(int).min()
                cutin1 = np.where(rad_samp.astype(int) == radmin)[0][0]
                noise_samp = noise_samp[cutin1:]
                rad_samp = rad_samp[cutin1:]
                radmax = vector_radd.astype(int).max()
                cutin2 = np.where(rad_samp.astype(int) == radmax)[0][0]
                noise_samp = noise_samp[:cutin2 + 1]
                rad_samp = rad_samp[:cutin2 + 1]

                if metric_name == 'array_size':
                    throughput = np.interp(
                        rad_samp * cam.platescale,
                        self.vector_radd.values * self.master_cam.platescale,
                        self.throughput)
                # elif metric_name == 'dark_bright':
                #     throughput = self.manual_throughput(cam.rebinned_cube, frame_nofc)
                else:
                    throughput = self.throughput

                win = min(noise_samp.shape[0] - 2, int(2 * fwhm))
                if win % 2 == 0: win += 1
                noise_samp_sm = savgol_filter(noise_samp,
                                              polyorder=2,
                                              mode='nearest',
                                              window_length=win)

                starphot = 1.1
                sigma = 5
                cont_curve_samp = (
                    (sigma * noise_samp_sm) / throughput) / starphot
                cont_curve_samp[cont_curve_samp < 0] = 1
                cont_curve_samp[cont_curve_samp > 1] = 1

                thruputs.append(throughput)
                noises.append(noise_samp_sm)
                conts.append(cont_curve_samp)
                rad_samp = cam.platescale * rad_samp
                rad_samps.append(rad_samp)
                maps.append(frame_nofc)

            return maps, rad_samps, thruputs, noises, conts
Beispiel #22
0
    def pca_rebinned_cubes(self):
        maps, rad_samps, thruputs, noises, conts = [], [], [], [], []
        cams = self.metric.cams
        colors = [f'C{i}' for i in range(len(self.metric.cams['star']))]
        # fig, axes = plt.subplots(1,6)
        fig, axes = plt.subplots(1, 3)

        dprint(np.shape(axes))
        for i in range(len(cams['comp'])):
            comp_cube = cams['comp'][i].rebinned_cube
            dprint(comp_cube.shape)
            frame_comp = pca.pca(comp_cube,
                                 angle_list=np.zeros((comp_cube.shape[1])),
                                 scale_list=self.scale_list,
                                 mask_center_px=None,
                                 adimsdi='double',
                                 ncomp=self.ncomp,
                                 ncomp2=None,
                                 collapse=self.collapse)
            maps.append(frame_comp)

            nocomp_cube = cams['star'][i].rebinned_cube
            frame_nocomp = pca.pca(nocomp_cube,
                                   angle_list=np.zeros((nocomp_cube.shape[1])),
                                   scale_list=self.scale_list,
                                   mask_center_px=None,
                                   adimsdi='double',
                                   ncomp=self.ncomp,
                                   ncomp2=None,
                                   collapse=self.collapse)

            median_fwhm = cams['star'][i].lod if hasattr(
                cams['star'][i], 'lod') else mp.lod
            median_wave = (self.wsamples[-1] + self.wsamples[0]) / 2
            fwhm = median_fwhm * self.wsamples / median_wave

            mask = cams['star'][i].QE_map == 0

            print('check whether to be using mask here?!')
            noise_samp, rad_samp = contrcurve.noise_per_annulus(
                frame_nocomp, separation=1, fwhm=median_fwhm)
            # mask=mask)

            xy = np.array(
                ap.companion_xy
            ) * 20 * cams['star'][i].sampling / cams['star'][i].platescale
            px, py = (cams['star'][i].array_size[::-1] / 2 - xy).T
            cx, cy = cams['star'][i].array_size[::-1] / 2
            dists = np.sqrt((py - cy)**2 + (px - cx)**2)
            # grid(np.sum(comp_cube, axis=1), title='in comp time collapse', show=False)
            # injected_flux=[contrcurve.aperture_flux(np.sum(comp_cube-nocomp_cube, axis=1)[i], py, px, fwhm[i]/1.5, ap_factor=1) for i in range(comp_cube.shape[0])]
            injected_flux = [
                contrcurve.aperture_flux(np.sum(comp_cube, axis=1)[i],
                                         py,
                                         px,
                                         fwhm[i] / 1.5,
                                         ap_factor=1,
                                         plot=False)
                for i in range(comp_cube.shape[0])
            ]
            # [axes[i+3].plot(dists, influx_wave) for influx_wave in injected_flux]
            injected_flux = np.mean(injected_flux, axis=0)

            # if i ==0 or i == len(self.metric.cams['star'])-1:
            # grid(comp_cube, title='comp', show=False)
            # grid(nocomp_cube, title='no', show=False)
            # grid(comp_cube-nocomp_cube, title='diff', show=False)
            # grid([np.sum(comp_cube, axis=(0,1))], title='sum comp', logZ=True, show=False)
            # grid([np.sum(nocomp_cube, axis=(0,1))], title='sum nocomp', logZ=True, show=False)
            # grid([np.sum(comp_cube, axis=(0,1))-np.sum(nocomp_cube, axis=(0,1))], title='sum then diff', logZ=True, show=False)
            # grid([], title='comp', logZ=True, show=False)
            grid([frame_comp, frame_nocomp, frame_comp - frame_nocomp],
                 title='comp, nocomp, diff',
                 logZ=False,
                 show=False,
                 vlim=(-2e-7, 2e-7))  # vlim=(-2,2))#, vlim=(-2e-7,2e-7))
            grid([
                np.sum(comp_cube, axis=1)[::4],
                np.sum(nocomp_cube, axis=1)[::4]
            ],
                 title='input: comp, no comp',
                 logZ=False,
                 show=False,
                 vlim=(-1e-5, 1e-5))  # vlim=(-2,2))#, vlim=(-1e-5,1e-5))
            grid(np.sum(comp_cube - nocomp_cube, axis=1)[::4],
                 title='diiff input cube',
                 logZ=False,
                 show=False)  #, vlim=(-2,2))
            # grid([(frame_comp-frame_nocomp)/(np.sum(comp_cube-nocomp_cube, axis=(0,1)))], title='throughput', logZ=True, show=False)

            # recovered_flux = contrcurve.aperture_flux((frame_comp - frame_nocomp), py, px, median_fwhm/1.5, ap_factor=1, )
            recovered_flux = contrcurve.aperture_flux((frame_comp),
                                                      py,
                                                      px,
                                                      median_fwhm / 1.5,
                                                      ap_factor=1,
                                                      plot=False)
            # thruput = recovered_flux*1e6 #/ injected_flux
            thruput = recovered_flux / injected_flux

            thruput[np.where(thruput < 0)] = 0

            # plt.figure()
            axes[0].plot(dists, thruput, c=colors[i])
            axes[1].plot(dists, injected_flux, c=colors[i])
            axes[2].plot(dists,
                         recovered_flux,
                         c=colors[i],
                         label=f'{self.metric.vals[i]}')
            axes[2].legend()
            # plt.plot(rad_samp, noise_samp)
            # plt.show()

            win = min(noise_samp.shape[0] - 2, int(2 * median_fwhm))
            if win % 2 == 0: win += 1
            noise_samp_sm = savgol_filter(noise_samp,
                                          polyorder=2,
                                          mode='nearest',
                                          window_length=win)

            # thruput_mean_log = np.log10(thruput + 1e-5)
            # f = InterpolatedUnivariateSpline(dists, thruput_mean_log, k=2)
            # thruput_interp_log = f(rad_samp)
            # thruput_interp = 10 ** thruput_interp_log
            # thruput_interp[thruput_interp <= 0] = np.nan  # 1e-5

            # thruput_interp = np.interp(rad_samp, dists, thruput)

            from scipy.interpolate import interp1d
            # f = interp1d(dists, thruput, fill_value='extrapolate')
            # thruput_interp = f(rad_samp)
            thruput_com = thruput.reshape(4, -1, order='F').mean(axis=0)
            dists_com = dists.reshape(4, -1, order='F').mean(axis=0)

            thruput_com_log = np.log10(thruput_com + 1e-5)
            f = interp1d(dists_com, thruput_com_log, fill_value='extrapolate')
            thruput_interp_log = f(rad_samp)
            thruput_interp = 10**thruput_interp_log

            axes[0].plot(dists_com, thruput_com, marker='o', c=colors[i])

            print(thruput, thruput_interp)
            axes[0].plot(rad_samp, thruput_interp, c=colors[i])

            starphot = 1.1 / 2
            sigma = 5
            cont_curve_samp = (
                (sigma * noise_samp_sm) / thruput_interp) / starphot
            cont_curve_samp[cont_curve_samp < 0] = 1
            cont_curve_samp[cont_curve_samp > 1] = 1

            thruputs.append(thruput_interp)
            noises.append(noise_samp_sm)
            conts.append(cont_curve_samp)
            rad_samp = cams['star'][i].platescale * rad_samp
            rad_samps.append(rad_samp)
            # maps.append(frame_nocomp)
        plt.show(block=True)
        return maps, rad_samps, thruputs, noises, conts
Beispiel #23
0
    def __call__(self, debug=True):

        dprint(self.performance_data)
        # self.performance_data = '/Users/dodkins/MKIDSim/mkid_param_invest/figure3_develop_mkid_lowerflux/1/max_count/performance_data.pkl'
        if not os.path.exists(self.performance_data):
            # self.metric.create_adapted_cams()

            comps_ = [True, False]
            pca_products = []
            for comps in comps_:
                if hasattr(self.metric, 'get_rebinned_cubes'):
                    self.metric.get_rebinned_cubes(self.master_fields,
                                                   comps=comps)
                else:
                    self.get_rebinned_cubes(self.master_fields, comps=comps)

            #     pca_products.append(self.pca_rebinned_cubes(comps))
            #
            # maps = pca_products[0]
            # rad_samps = pca_products[1][1]
            # thruputs = pca_products[1][2]
            # noises = pca_products[1][3]
            # conts = pca_products[1][4]

            maps, rad_samps, thruputs, noises, conts = self.pca_rebinned_cubes(
            )

            with open(self.performance_data, 'wb') as handle:
                pickle.dump((maps, rad_samps, thruputs, noises, conts,
                             self.metric.vals),
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)
        else:
            with open(self.performance_data, 'rb') as handle:
                performance_data = pickle.load(handle)
                if len(performance_data) == 6:
                    maps, rad_samps, thruputs, noises, conts, self.metric.vals = performance_data
                else:
                    maps, rad_samps, conts, self.metric.vals = performance_data

        if debug:
            try:
                contrcurve_plot(self.metric.vals, rad_samps, thruputs, noises,
                                conts)
                if self.metric.name != 'array_size':
                    grid(maps, logZ=False,
                         title=self.metric.name)  #, vlim=(-2,2)) for no-normed
                else:
                    pass
                plt.show(block=True)
            except UnboundLocalError:
                dprint('thruputs and noises not saved in old versions :(')
                # raise UnboundLocalError
                pass

            combo_performance(maps,
                              rad_samps,
                              conts,
                              self.metric.vals,
                              self.metric.name, [0, -1],
                              savedir=self.testdir)

        return {'maps': maps, 'rad_samps': rad_samps, 'conts': conts}
Beispiel #24
0
    metric_names = [
        'array_size', 'g_mean', 'numframes', 'R_mean', 'dark_bright',
        'pix_yield'
    ]
    # metric_names = ['ideal_placeholder']
    # metric_names = ['dark_bright']
    metric_names = ['max_count']
    # metric_names = ['array_size', 'g_mean']

    # collect the data
    all_cont_data = []
    for r in range(repeats):

        obs = ObservatoryMaster(iteration=r)

        dprint(iop.testdir)

        comp_images, cont_data, metric_multi_list, metric_vals_list = [], [], [], []
        for metric_name in metric_names:

            # plt.rcParams["axes.prop_cycle"] = plt.cycler("color", plt.cm.viridis(np.linspace(0, 1, len(param.metric_multiplier))))

            metric_config = metrics.get_metric(metric_name, master_cam=obs.cam)
            metric_test = MetricTester(obs, metric_config)
            metric_results = metric_test()

            comp_images.append(metric_results['maps'])
            cont_data.append(
                [metric_results['rad_samps'], metric_results['conts']])

            # # store the mutlipliers but flip those that achieve better contrast when the metric is decreasing
Beispiel #25
0
tp.ao_act = 60
tp.rotate_atmos = False
tp.rotate_sky = False
tp.f_lens = 200.0 * tp.entrance_d
tp.occult_loc = [0, 0]

# Saving
sp.save_to_disk = False  # save obs_sequence (timestep, wavelength, x, y)
sp.save_list = ['detector']  # list of locations in optics train to save
# sp.skip_planes = ['coronagraph']  # ['wfs', 'deformable mirror']  # list of locations in optics train to save
sp.quick_detect = True
sp.debug = False
sp.verbose = True

if __name__ == '__main__':
    # =======================================================================
    # Run it!!!!!!!!!!!!!!!!!
    # =======================================================================

    sim = mm.RunMedis(name='general_example', product='photons')
    observation = sim()
    fp_sampling = sim.cam.platescale
    rebinned_photons = sim.cam.rebinned_cube

    dprint(f"Sampling in focal plane is {fp_sampling}")
    for o in range(len(ap.contrast) + 1):
        print(rebinned_photons.shape)
        datacube = rebinned_photons[o]
        print(o, datacube.shape)
        grid(datacube, logZ=True, title='Spectral Channels')
Beispiel #26
0
def get_packets_plots(datacubes, step, cam, plot=False):

    FoV_datacubes = np.zeros(
        (2, params['ap'].n_wvl_final, params['mp'].array_size[0],
         params['mp'].array_size[1]))
    for d in range(2):
        datacube = datacubes[:, d]

        if params['mp'].resamp:
            nyq_sampling = params['ap'].wvl_range[0] * 360 * 3600 / (
                4 * np.pi * params['tp'].entrance_d)
            sampling = nyq_sampling * params[
                'sp'].beam_ratio * 2  # nyq sampling happens at params['tp'].beam_ratio = 0.5
            x = np.arange(-params['sp'].grid_size * sampling / 2,
                          params['sp'].grid_size * sampling / 2, sampling)
            xnew = np.arange(-cam.array_size[0] * cam.platescale / 2,
                             cam.array_size[0] * cam.platescale / 2,
                             cam.platescale)
            mkid_cube = np.zeros(
                (len(datacube), cam.array_size[0], cam.array_size[1]))
            for s, slice in enumerate(datacube):
                f = interpolate.interp2d(x, x, slice, kind='cubic')
                mkid_cube[s] = f(xnew, xnew)
            mkid_cube = mkid_cube * np.sum(datacube) / np.sum(mkid_cube)
            datacube = mkid_cube

        datacube[datacube < 0] *= -1
        FoV_datacubes[d] = datacube

    if plot:
        fig = plt.figure(figsize=(11, 6))
        ax1 = fig.add_subplot(231)
        ax1.set_ylabel(r'rows')
        ax1.set_xlabel(r'columns')
        props = dict(boxstyle='square', facecolor='k', alpha=0.5)
        # ax1.text(0.05, 0.85, 'Device FoV', transform=ax1.transAxes, fontweight='bold',
        #          color='w', fontsize=16, bbox=props)
        ax1.text(-0.11,
                 1.05,
                 'i',
                 transform=ax1.transAxes,
                 color='k',
                 fontsize=21,
                 fontname='Times New Roman')
        ax1.set_title('Step 1')
        im = ax1.imshow(np.sum(FoV_datacubes, axis=0)[0][::-1],
                        origin='lower',
                        norm=SymLogNorm(1e-9),
                        cmap='inferno',
                        vmin=1e-9,
                        vmax=1e-6)
        divider = make_axes_locatable(ax1)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        cb = fig.colorbar(
            im, cax=cax, orientation='vertical',
            norm=SymLogNorm(1e-9))  #, format=ticker.FuncFormatter(fmt))

    if params['mp'].QE_var:
        FoV_datacubes = FoV_datacubes * cam.QE_map[
            np.newaxis, np.newaxis, :datacube.shape[1], :datacube.shape[1]]

    if plot:
        ax2 = fig.add_subplot(232)
        ax2.set_ylabel(r'rows')
        ax2.set_xlabel(r'columns')
        ax2.set_title('Step 2')
        ax2.text(-0.11,
                 1.05,
                 'ii',
                 transform=ax2.transAxes,
                 color='k',
                 fontsize=21,
                 fontname='Times New Roman')
        # ax2.text(0.05, 0.75, 'Responsivity\n correction', transform=ax2.transAxes, fontweight='bold',
        #          color='w', fontsize=16, bbox=props)
        im = ax2.imshow(np.sum(FoV_datacubes, axis=0)[0][::-1],
                        origin='lower',
                        norm=SymLogNorm(1e-9),
                        cmap='inferno',
                        vmin=1e-9,
                        vmax=1e-6)
        divider = make_axes_locatable(ax2)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        cb = fig.colorbar(im,
                          cax=cax,
                          orientation='vertical',
                          norm=SymLogNorm(1e-9))

    object_photons = []
    for d in range(2):
        num_events = int(params['ap'].star_flux * params['sp'].sample_time *
                         np.sum(FoV_datacubes[d]))
        if params['sp'].verbose:
            dprint(
                f"star flux: {params['ap'].star_flux}, cube sum: {np.sum(FoV_datacubes[d])}, num events: {num_events}"
            )

        photons = cam.sample_cube(FoV_datacubes[d], num_events)
        photons = cam.calibrate_phase(photons)
        photons = cam.assign_calibtime(photons, step)

        if plot:

            colors = ['#d62728', '#1f77b4']
            alphas = [0.15, 0.95]
            zorders = [0, -1]
            if d == 0:
                ax3 = fig.add_subplot(233, projection='3d')
                ax3.view_init(elev=25., azim=-45)
                ax3.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
                ax3.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
                ax3.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
                ax3.set_xlabel('$\phi$')
                ax3.set_ylabel('columns')
                ax3.set_zlabel('rows')

            ax3.scatter(photons[1],
                        photons[3],
                        photons[2],
                        s=1,
                        alpha=alphas[d],
                        color=colors[d],
                        zorder=zorders[d])
            ax3.set_title('Step 3', y=0, pad=163, verticalalignment="top")
            ax3.text2D(-0.11,
                       1.05,
                       'iii',
                       transform=ax3.transAxes,
                       color='k',
                       fontsize=21,
                       fontname='Times New Roman')
        object_photons.append(photons)
        # fig.tight_layout()

    if plot:
        ax4 = fig.add_subplot(234)
        ax4.hist(object_photons[0][1],
                 bins=range(-120, 0, 2),
                 histtype='stepfilled',
                 color='#d62728',
                 alpha=0.95,
                 label='Star')
        ax4.hist(object_photons[0][1],
                 bins=range(-120, 0, 2),
                 histtype='step',
                 color='k',
                 alpha=0.95)
        ax4.set_yscale('log')

    photons = np.append(object_photons[0], object_photons[1], axis=1)
    if params['mp'].dark_counts:
        dark_photons = cam.get_bad_packets(step, type='dark')
        photons = np.hstack((photons, dark_photons))

    params['mp'].hot_pix = False
    if params['mp'].hot_pix:
        hot_photons = cam.get_bad_packets(step, type='hot')
        photons = np.hstack((photons, hot_photons))

    if plot:
        ax4.hist(object_photons[1][1],
                 bins=range(-120, 0, 2),
                 histtype='stepfilled',
                 color='#1f77b4',
                 alpha=0.95,
                 label='Planet')
        ax4.hist(object_photons[1][1],
                 bins=range(-120, 0, 2),
                 histtype='step',
                 color='k',
                 alpha=0.95)

        # ax4.hist(hot_photons[1], bins=range(-120,0,2), alpha=0.5, color='m', histtype='stepfilled', label='Hot')
        # ax4.hist(hot_photons[1], bins=range(-120,0,2), histtype='step', color='k')

        ax4.hist(dark_photons[1],
                 bins=range(-120, 0, 2),
                 alpha=0.75,
                 color='#ff7f0e',
                 histtype='stepfilled',
                 label='Dark')
        ax4.hist(dark_photons[1],
                 bins=range(-120, 0, 2),
                 histtype='step',
                 color='k')
        ax4.legend(loc='upper right')
        ax4.set_xlabel('Phase (deg)')
        ax4.set_title('Step 4')
        ax4.text(-0.11,
                 1.05,
                 'iv',
                 transform=ax4.transAxes,
                 color='k',
                 fontsize=21,
                 fontname='Times New Roman')

    if params['mp'].phase_uncertainty:
        photons[1] *= cam.responsivity_error_map[np.int_(photons[2]),
                                                 np.int_(photons[3])]
        photons, idx = cam.apply_phase_offset_array(photons, cam.sigs)

    if plot:
        ax5 = fig.add_subplot(235)
        ax5.hist(photons[1],
                 bins=range(-120, 0, 2),
                 alpha=0.5,
                 histtype='stepfilled',
                 color='#2ca02c',
                 label='Degraded')
        ax5.hist(photons[1],
                 bins=range(-120, 0, 2),
                 histtype='step',
                 color='k')

    dprint(photons.shape)
    thresh = -photons[1] > 3 * cam.sigs[
        0, np.int_(photons[3]), np.int_(photons[
            2])]  #cam.basesDeg[np.int_(photons[3]),np.int_(photons[2])]
    photons = photons[:, thresh]

    if plot:
        ax5.hist(photons[1],
                 bins=range(-120, 0, 2),
                 alpha=0.95,
                 histtype='stepfilled',
                 rwidth=0.9,
                 color='#9467bd',
                 label='Detected')
        ax5.hist(photons[1],
                 bins=range(-120, 0, 2),
                 histtype='step',
                 color='k')
        ax5.set_xlabel('Phase (deg)')
        ax5.legend(loc='upper right')
        ax5.set_title('Steps 5 and 7')
        ax5.set_yscale('log')
        ax5.text(-0.11,
                 1.05,
                 'v',
                 transform=ax5.transAxes,
                 color='k',
                 fontsize=21,
                 fontname='Times New Roman')

    dprint(photons.shape)

    if params['sp'].verbose: print("Completed Readout Loop")

    if plot:
        return photons.T, fig
    else:
        return photons.T
Beispiel #27
0
def get_form_photons(fields,
                     cam,
                     comps=True,
                     plot=False,
                     collapse_time_first=False,
                     norm=False):
    """
    Alternative to cam.__call__ that allows the user to specify whether the spectracube contains the planets

    :param fields: ndarray
    :param cam: mkids.Camera()
    :param comps: bool

    :return:
    mkids.Camera()
    """
    dprint(cam.name)
    if os.path.exists(cam.name):
        print(f'loading cam rebined_cube save at {cam.name}')
        with open(cam.name, 'rb') as handle:
            cam = pickle.load(handle)
    else:
        if comps:
            fourcube = np.sum(fields, axis=2)
        else:
            fourcube = fields[:, :, 0]

        fourcube = np.abs(fourcube)**2
        dprint(np.sum(fourcube))
        fourcube = cam.rescale_cube(fourcube)

        max_steps = cam.max_chunk(fourcube)
        num_chunks = int(np.ceil(len(fourcube) / max_steps))
        dprint(fourcube.shape, max_steps,
               len(fourcube) / max_steps, num_chunks)
        # cam.photons = np.empty((4,0))
        cam.rebinned_cube = np.zeros_like(fourcube)
        for chunk in range(num_chunks):
            photons = cam.get_photons(fourcube[chunk * max_steps:(chunk + 1) *
                                               max_steps],
                                      chunk_step=chunk * max_steps)
            photons = cam.degrade_photons(photons)
            # cam.photons = np.hstack((cam.photons, photons))
            # dprint(photons.shape, cam.photons.shape)
            cam.rebinned_cube[chunk * max_steps:(chunk + 1) *
                              max_steps] = cam.rebin_list(
                                  photons,
                                  time_inds=[
                                      chunk * max_steps,
                                      (chunk + 1) * max_steps
                                  ])
        # cam.rebinned_cube = cam.rebin_list(cam.photons)

        cam.photons = None

        for step in range(len(fields)):
            print(step, cam.max_count)
            if cam.max_count:
                cam.rebinned_cube[step] = cam.cut_max_count(
                    cam.rebinned_cube[step])

        if norm:
            cam.rebinned_cube /= np.sum(cam.rebinned_cube)  # /sp.numframes

        if collapse_time_first:
            grid(cam.rebinned_cube, title='comp sum', show=False, logZ=True)
            cam.rebinned_cube = np.median(cam.rebinned_cube,
                                          axis=0)[np.newaxis]
            grid(cam.rebinned_cube, title='comp sum', show=False, logZ=True)

        cam.rebinned_cube = np.transpose(cam.rebinned_cube, (1, 0, 2, 3))

        if plot:
            grid(cam.rebinned_cube, show=True, title='get form photons')

        if cam.usesave:
            cam.save_instance()

    return cam
Beispiel #28
0
    def occulter(self, wf):

        n = int(proper.prop_get_gridsize(wf))
        ofst = 0  # no offset
        ramp_sign = 1  # sign of charge is positive
        ramp_oversamp = 11.  # vortex is oversampled for a better discretization

        # f_lens = tp.f_lens #conf['F_LENS']
        # diam = tp.diam#conf['DIAM']
        charge = 2  #conf['CHARGE']
        pixelsize = 5  #conf['PIXEL_SCALE']
        Debug_print = False  #conf['DEBUG_PRINT']

        coron_temp = os.path.join(iop.testdir, 'coron_maps/')
        if not os.path.exists(coron_temp):
            os.mkdir(coron_temp)

        if charge != 0:
            wavelength = proper.prop_get_wavelength(wf)
            gridsize = proper.prop_get_gridsize(wf)
            beam_ratio = pixelsize * 4.85e-9 / (wavelength / tp.entrance_d)
            # dprint((wavelength,gridsize,beam_ratio))
            calib = str(charge) + str('_') + str(int(
                beam_ratio * 100)) + str('_') + str(gridsize)
            my_file = str(coron_temp + 'zz_perf_' + calib + '_r.fits')

            if (os.path.isfile(my_file) == True):
                if (Debug_print == True):
                    print("Charge ", charge)
                vvc = self.readfield(
                    coron_temp,
                    'zz_vvc_' + calib)  # read the theoretical vortex field
                vvc = proper.prop_shift_center(vvc)
                scale_psf = wf._wfarr[0, 0]
                psf_num = self.readfield(coron_temp, 'zz_psf_' +
                                         calib)  # read the pre-vortex field
                psf0 = psf_num[0, 0]
                psf_num = psf_num / psf0 * scale_psf
                perf_num = self.readfield(
                    coron_temp,
                    'zz_perf_' + calib)  # read the perfect-result vortex field
                perf_num = perf_num / psf0 * scale_psf
                wf._wfarr = (
                    wf._wfarr - psf_num
                ) * vvc + perf_num  # the wavefront takes into account the real pupil with the perfect-result vortex field

            else:  # CAL==1: # create the vortex for a perfectly circular pupil
                if (Debug_print == True):
                    dprint(f"Vortex Charge= {charge}")

                f_lens = 200.0 * tp.entrance_d
                wf1 = proper.prop_begin(tp.entrance_d, wavelength, gridsize,
                                        beam_ratio)
                proper.prop_circular_aperture(wf1, tp.entrance_d / 2)
                proper.prop_define_entrance(wf1)
                proper.prop_propagate(wf1, f_lens,
                                      'inizio')  # propagate wavefront
                proper.prop_lens(
                    wf1, f_lens,
                    'focusing lens vortex')  # propagate through a lens
                proper.prop_propagate(wf1, f_lens, 'VC')  # propagate wavefront

                self.writefield(coron_temp, 'zz_psf_' + calib,
                                wf1.wfarr)  # write the pre-vortex field
                nramp = int(n * ramp_oversamp)  # oversamp
                # create the vortex by creating a matrix (theta) representing the ramp (created by atan 2 gradually varying matrix, x and y)
                y1 = np.ones((nramp, ), dtype=np.int)
                y2 = np.arange(0, nramp,
                               1.) - (nramp / 2) - int(ramp_oversamp) / 2
                y = np.outer(y2, y1)
                x = np.transpose(y)
                theta = np.arctan2(y, x)
                x = 0
                y = 0
                vvc_tmp = np.exp(1j * (ofst + ramp_sign * charge * theta))
                theta = 0
                vvc_real_resampled = cv2.resize(
                    vvc_tmp.real, (0, 0),
                    fx=1 / ramp_oversamp,
                    fy=1 / ramp_oversamp,
                    interpolation=cv2.INTER_LINEAR
                )  # scale the pupil to the pupil size of the simualtions
                vvc_imag_resampled = cv2.resize(
                    vvc_tmp.imag, (0, 0),
                    fx=1 / ramp_oversamp,
                    fy=1 / ramp_oversamp,
                    interpolation=cv2.INTER_LINEAR
                )  # scale the pupil to the pupil size of the simualtions
                vvc = np.array(vvc_real_resampled, dtype=complex)
                vvc.imag = vvc_imag_resampled
                vvcphase = np.arctan2(vvc.imag,
                                      vvc.real)  # create the vortex phase
                vvc_complex = np.array(np.zeros((n, n)), dtype=complex)
                vvc_complex.imag = vvcphase
                vvc = np.exp(vvc_complex)
                vvc_tmp = 0.
                self.writefield(coron_temp, 'zz_vvc_' + calib,
                                vvc)  # write the theoretical vortex field

                proper.prop_multiply(wf1, vvc)
                proper.prop_propagate(wf1, f_lens, 'OAP2')
                proper.prop_lens(wf1, f_lens)
                proper.prop_propagate(wf1, f_lens, 'forward to Lyot Stop')
                proper.prop_circular_obscuration(
                    wf1, 1.,
                    NORM=True)  # null the amplitude iside the Lyot Stop
                proper.prop_propagate(wf1, -f_lens)  # back-propagation
                proper.prop_lens(wf1, -f_lens)
                proper.prop_propagate(wf1, -f_lens)
                self.writefield(
                    coron_temp, 'zz_perf_' + calib,
                    wf1.wfarr)  # write the perfect-result vortex field

                vvc = self.readfield(coron_temp, 'zz_vvc_' + calib)
                vvc = proper.prop_shift_center(vvc)
                scale_psf = wf._wfarr[0, 0]
                psf_num = self.readfield(coron_temp, 'zz_psf_' +
                                         calib)  # read the pre-vortex field
                psf0 = psf_num[0, 0]
                psf_num = psf_num / psf0 * scale_psf
                perf_num = self.readfield(
                    coron_temp,
                    'zz_perf_' + calib)  # read the perfect-result vortex field
                perf_num = perf_num / psf0 * scale_psf
                wf._wfarr = (
                    wf._wfarr - psf_num
                ) * vvc + perf_num  # the wavefront takes into account the real pupil with the perfect-result vortex field

        return wf
Beispiel #29
0
    time_for_one_cycle = len(
        phase_1cycle) * cdip.phase_integration_time + cdip.null_time
    n_phase_cycles = full_simulation_time / time_for_one_cycle
    dprint(f"number of phase cycles = {n_phase_cycles}")
    if n_phase_cycles < 0.5:
        if cdip.n_probes > sp.numframes:
            warnings.warn(
                f"Number of timesteps in sp.numframes is less than number of CDI phases \n"
                f"not all phases will be used")
            phase_series = phase_1cycle[0:sp.numframes]
        else:
            warnings.warn(
                f"Total length of CDI integration time for all phase probes exceeds full simulation time \n"
                f"Not all phase probes will be used")
            phase_series = phase_1cycle[0:sp.numframes]
    elif 0.5 < n_phase_cycles < 1:
        phase_series[0:len(phase_1cycle)] = phase_1cycle
        dprint(f"phase_seris  = {phase_series}")
    else:
        n_full = np.floor(n_phase_cycles)
        raise NotImplementedError(f"Whoa, not implemented yet. Hang in there")
        # TODO implement

    return phase_series


if __name__ == '__main__':
    dprint(f"Testing CDI probe")
    theta = cdip.phase_list[0]
    CDIprobe(theta, 0)
Beispiel #30
0
def deformable_mirror(wf,
                      WFS_map,
                      iter,
                      previous_output=None,
                      apodize=False,
                      plane_name='',
                      debug=False):
    """
    combine different DM actuator commands into single map to send to prop_dm

    prop_dm needs an input map of n_actuators x n_actuators in units of actuator command height. quick_ao will handle
    the conversion to actuator command height, and the CDI probe must be scaled in cdi.probe_amp in params in
    units of m. Each subroutine is also responsible for creating a map of n_actuators x n_actuators spacing. prop_dm
    handles the resampling of this map onto the wavefront, including the influence function. Its some wizardry that
    happens in c, and presumably it is taken care of so you don't have to worry about it.

    In the call to proper.prop_dm, we apply the flag tp.fit_dm, which switches between two 'modes' of proper's DM
    surface fitting. If FALSE, the DM is driven to the heights specified by dm_map, and the influence function will
    act on these heights to define the final surface shape applied to the DM, which may differ substantially from
    the initial heights specified by dm_map. If TRUE, proper will iterate applying the influence function to the
    input heights, and adjust the heights until the difference between the influenced-map and input map meets some
    proper-defined convergence criterea. Setting tp.fit_dm=TRUE will obviously slow down the code, but will (likely)
    more accurately represent a well-calibrated DM response function.

    much of this code copied over from example from Proper manual on pg 94

    :param wf: single wavefront
    :param WFS_map: wavefront sensor map, should be in units of phase delay
    :param previous_output:
    :param iter: the current index of iteration (which timestep this is)
    :param plane_name: name of plane (should be 'woofer' or 'tweeter' for best functionality)
    :return: nothing is returned, but the probe map has been applied to the DM via proper.prop_dm. DM plane post DM
        application can be saved via the sp.save_list functionality
    """
    assert np.logical_xor(WFS_map is None, previous_output is None)

    # AO Actuator Count from DM Type
    if plane_name == 'tweeter' and hasattr(tp, 'act_tweeter'):
        nact = tp.act_tweeter
    elif plane_name == 'woofer' and hasattr(tp, 'act_woofer'):
        nact = tp.act_woofer
    else:
        nact = tp.ao_act

    # DM Coordinates
    nact_across_pupil = nact - 2  # number of full DM actuators across pupil (oversizing DM extent)
    dm_xc = (
        nact / 2
    )  # The location of the optical axis (center of the wavefront) on the DM in
    dm_yc = (
        nact / 2
    )  # actuator units. First actuator is centered on (0.0, 0.0). The 0.5 is a
    #  parameter introduced/tuned by Rupert to remove weird errors (address this).
    # KD verified this needs to be here or else suffer weird errors 9/19
    # TODO address/remove the 0.5 in DM x,y coordinates

    ############################
    # Creating DM Surface Map
    ############################
    d_beam = 2 * proper.prop_get_beamradius(wf)  # beam diameter
    act_spacing = d_beam / nact_across_pupil  # actuator spacing [m]

    #######
    # AO
    #######
    if previous_output is not None and WFS_map is None:
        dm_map = update_dm(previous_output)
    else:
        dm_map = quick_ao(wf, nact, WFS_map[wf.iw])

    #########
    # Waffle
    #########
    if tp.satelite_speck['apply'] and plane_name is not 'woofer':
        waffle = make_speckle_kxy(tp.satelite_speck['xloc'],
                                  tp.satelite_speck['yloc'],
                                  tp.satelite_speck['amp'],
                                  tp.satelite_speck['phase'])
        waffle += make_speckle_kxy(tp.satelite_speck['xloc'],
                                   -tp.satelite_speck['yloc'],
                                   tp.satelite_speck['amp'],
                                   tp.satelite_speck['phase'])
        dm_map += waffle

    #######
    # CDI
    ######
    if cdi.use_cdi and plane_name == cdi.which_DM:
        theta = cdi.phase_series[iter]
        if not np.isnan(theta):
            # dprint(f"Applying CDI probe, lambda = {wfo.wsamples[iw]*1e9:.2f} nm")
            cdi.save_tseries(iter, datetime.datetime.now())
            probe = config_probe(theta, nact, iw=wf.iw, ib=wf.ib, tstep=iter)
            dm_map = dm_map + probe  # Add Probe to DM map

    #########################
    # Applying Piston Error
    #########################
    if tp.piston_error:
        mean_dm_map = np.mean(np.abs(dm_map))
        var = 1e-4  # 1e-11
        dm_map = dm_map + np.random.normal(0, var,
                                           (dm_map.shape[0], dm_map.shape[1]))

    #########################
    # proper.prop_dm
    #########################
    dmap = proper.prop_dm(wf, dm_map, dm_xc, dm_yc, act_spacing,
                          FIT=tp.fit_dm)  #

    if debug and wf.iw == 0 and wf.ib == 0 and iter == 0:
        dprint(plane_name)
        check_sampling(wf,
                       iter,
                       plane_name + ' DM pupil plane',
                       getframeinfo(stack()[0][0]),
                       units='mm')

        quick2D(WFS_map[wf.iw],
                title=f"WFS map after masking",
                zlabel='unwrapped phase (rad)',
                vlim=[-3 * np.pi, 3 * np.pi])

        fig, ax = plt.subplots(1, 1)
        cax = ax.imshow(dm_map * 1e9, interpolation='none', origin='lower')
        plt.title(f'{plane_name} dm_map (actuator coordinates)')
        cb = plt.colorbar(cax)
        cb.set_label('nm')

        plt.show()

        post_ao = unwrap_phase(
            proper.prop_get_phase(wf)) * wf.lamda / (2 * np.pi)
        # quick2D(pre_ao_dist*1e9, title='unwrapped wavefront before DM', zlabel='nm', show=False)  # , vlim=(-0.5e-7,0.5e-7))
        # quick2D(np.abs(pre_ao_amp)**2, title='Pre-AO Intensity', show=False)#, vlim=(-0.5e-7,0.5e-7))
        # quick2D(dmap, title='the phase map prop_dm is applying', zlabel='distance (m)', show=False)#, vlim=(-0.5e-7,0.5e-7))
        # plt.figure()
        # plt.plot(pre_ao_dist[len(pre_ao_dist)//2], label=f'pre_ao 1D cut, row {len(pre_ao_dist)//2}')
        # plt.plot(2*dmap[len(dmap)//2], label=f'dmap 1D cut (x2), row {len(dmap)//2}')
        # plt.plot((pre_ao_dist + (2*dmap))[len(dmap)//2], label='difference')
        # plt.legend()
        # plt.xlim(sp.grid_size//2*np.array([1-sp.beam_ratio*1.1, 1+sp.beam_ratio*1.1]))
        # quick2D(pre_ao + (2*dmap), title='diff', zlabel='m', show=False, vlim=(-0.5e-7,0.5e-7))
        # quick2D(post_ao, title='unwrapped wavefront after DM', zlabel='m', show=True, vlim=(-0.5e-7,0.5e-7))
        # quick2D(np.abs(proper.prop_get_amplitude(wf))**2, title='wavefront after DM intensity', show=False)
        # quick2D(proper.prop_get_phase(wf), title='wavefront after DM in phase units', zlabel='Phase',
        #          show=True)  # colormap='sunlight',

    if apodize:
        hardmask_pupil(wf)

    return dmap