Esempio n. 1
0
def create_output_dirs(name, outdir):
    """create ouput directory for pybdsm log files and output images"""

    if "/" in name:
        LOGGER.info(
            "Output directory part of out-name will overwrite outdir option")
        outdir = os.path.dirname(name)

    if not outdir.endswith("/"):
        if outdir.endswith(".pc"):
            outdir += "/"
        else:
            outdir += ".pc/"

    if os.path.isdir(outdir):
        LOGGER.info(
            "Output directory already exit from previous run, will make a backup"
        )
        import glob
        nb_runs = len(glob.glob(outdir + "*"))

        import shutil
        shutil.move(outdir, outdir + "-" + str(nb_runs - 1))

    os.mkdir(outdir)

    return outdir
Esempio n. 2
0
	def __antenna_worker(ind, loc):

		tind, find = ind

		tsel = slice(time_chunks[tind][0], time_chunks[tind][1])
		fsel = slice(freq_chunks[find][0], freq_chunks[find][1])

		times = time_col[tsel]
		nch = freq_chunks[find][1] - freq_chunks[find][0]

		Nt = float(len(np.unique(times)))
		fp=f[tsel, fsel, 0].squeeze()
		

		ant1p = ant1[tsel]
		ant2p = ant2[tsel]

		ant1p = 1+np.repeat(ant1p[:,np.newaxis], nch, axis=1)
		ant2p = 1+np.repeat(ant2p[:,np.newaxis], nch, axis=1)

		ant1p*=(fp==False)
		ant2p*=(fp==False)

		ant_counts = np.zeros((nch, n_ant))

		for fi in range(nch):

			c_ant1, count_1 = np.unique(ant1p[:, fi], return_counts=True)
			c_ant2, count_2 = np.unique(ant2p[:, fi], return_counts=True)

			for aa in range(1, n_ant+1, 1):
				try:
					caa1 = count_1[np.where(c_ant1==aa)][0]
				except:
					caa1 = 0
				try:
					caa2 = count_2[np.where(c_ant2==aa)][0]
				except:
					caa2 = 0

					
				ant_counts[fi, aa-1] = float(caa1 + caa2)

		# print ant_counts, "scan id = %d"%(scan_id)
		ant_zeros = np.array(range(1, n_ant+1, 1))[np.where(ant_counts==0)[1]]

		if any(ant_zeros):
			LOGGER.debug("Completely flagged antennas:[{}]".format(", ".join('{}'.format(col) for col in ant_zeros)) + " for chunk T%dF%d"%(tind,find))

		with warnings.catch_warnings():
			warnings.simplefilter("ignore", category=RuntimeWarning)
			ant_counts = np.where(ant_counts==0, np.nan, ant_counts)/Nt
			# n_ants[:,tind,find] = 1+np.nanmin(ant_counts), 1+np.nanmax(ant_counts), 1+np.nanmean(ant_counts)   #should be using nanmean

			if dim_1:
				n_ants[loc] = ant_counts + 1
			else:
				n_ants[tind, find] = ant_counts + 1
Esempio n. 3
0
def define_time_chunks(timecol, size, scans, jump=1):
	"""
      	Construct the array indices for the different time chunks.

        Args:
            timecol (array): 
                measurement set time column.
            size (int): 
                chunk size.
            scans (array):   
                measurement scan numbers column.
           	jump (int, optional): 
                The magnitude of a jump has to be over this value to force a chunk boundary.             
        Returns:
            time_chunks:
                - list of chunks indices
	"""

	# import pdb; pdb.set_trace

	unique, tindex = np.unique(timecol, return_index=True)
	b = abs(np.roll(scans, 1) - scans) > jump
	bounds = np.append(np.where(b==True)[0], len(scans))

	rmap = {x: i for i, x in enumerate(unique)}
	indices  = np.fromiter(list(map(rmap.__getitem__, timecol)), int)

	time_chunks = []
	i=k=0

	while(i<len(unique)):
		if b[tindex[i]]:
			k +=1
		ts = tindex[i]
		if (indices[ts]+size)<len(tindex):
			if tindex[i+size] < bounds[k]:
				te = tindex[i+size]
				i = i + size
			else:
				te = bounds[k]
				i = indices[te+1]
			time_chunks.append((ts,te))
		else:
			te = bounds[-1]
			time_chunks.append((ts,te))
			# print("breaking from here", bounds[-1])
			break

	# print("i, k, after", i, k)

	LOGGER.info("Found {:d} time chunks from {:d} unique timeslots.".format(len(time_chunks), len(unique)))

	return time_chunks
Esempio n. 4
0
def extract_from_db(dbfile, name="G:gain"):
	"""Extract from dbfile"""

	LOGGER.debug("Loading gains database {}".format(dbfile))

	try:
		dbdata = db.load(dbfile)
		gains = dbdata[name]

		gainscube = gains.get_cube()
		data = gainscube.data[-1] # remove n_dir
	except Exception as e:
		# print(e)
		data = np.load(dbfile)
		data = data[-1] #,0] # assuming ntchunks is 1, remove n_dir

	return data
Esempio n. 5
0
def define_freq_chunks(size, nfreq):
	"""
		Contruct the array indices for the different frequency chunks

		Args:
			size (int):
				chunk size
			nfreq (int):
				number of frequencies

		Returns:
			freq_chunks:
				-list of chunks indices
	"""
	
	bounds = list(range(0,nfreq,size)) + [nfreq]

	freq_chunks = [(bounds[i], bounds[i+1]) for i in range(len(bounds)-1)]

	LOGGER.info("Found {:d} frequency chunks from {:d} frequency channels.".format(len(freq_chunks), nfreq))

	return freq_chunks
Esempio n. 6
0
def main():
    """Main function."""
    LOGGER.info("Welcome to CubiInts")

    parser = create_parser()
    args = parser.parse_args()

    if args.verbose:
        for handler in LOGGER.handlers:
            handler.setLevel(logging.DEBUG)
    else:
        for handler in LOGGER.handlers:
            handler.setLevel(logging.INFO)

    LOGGER.info("started cubiints " + " ".join(sys.argv[1:]))

    if args.usegains is False:

        outdir = create_output_dirs(args.name, args.outdir)

        ms_opts = {
            "DataCol": args.datacol,
            "ModelCol": args.modelcol,
            "FluxCol": args.fluxcol,
            "WeightCol": args.weightcol,
            "msname": args.ms
        }

        if args.ncpu:
            ncpu = args.ncpu
            from multiprocessing.pool import ThreadPool
            dask.config.set(pool=ThreadPool(ncpu))
        else:
            import multiprocessing
            ncpu = multiprocessing.cpu_count()

        LOGGER.info("Using %i threads" % ncpu)

        try:
            compute_interval_dask_index(ms_opts=ms_opts,
                                        SNR=args.snr,
                                        dvis=False,
                                        outdir=outdir,
                                        figname=os.path.basename(args.name) +
                                        "-interval",
                                        row_chunks=args.rowchunks,
                                        minbl=args.minbl,
                                        tchunk=args.tchunk,
                                        fchunk=args.fchunk,
                                        save_out=args.save_out,
                                        cubi_flags=args.cubi_flags,
                                        datachunk=args.datachunk)
        except:
            extype, value, tb = sys.exc_info()
            traceback.print_exc()
            pdb.post_mortem(tb)

    else:

        if args.tint is None or args.fint is None:
            print(
                "options time-int and freq-int must be passed when usegains is selected"
            )
            parser.exit()
        if args.gaintable is None:
            print("A gaintable must be specified")
            parser.exit()
        tint = optimal_time_freq_interval_from_gains(args.gaintable,
                                                     args.ms,
                                                     args.Gname,
                                                     args.tint,
                                                     args.fint,
                                                     args.tchunk,
                                                     verbosity=args.verbose,
                                                     prefix=os.path.basename(
                                                         args.name))
        print("optimal interval time-int= {}".format(tint))
Esempio n. 7
0
def model_flux_per_scan_dask(time_chunks,
                             freq_chunks,
                             fluxcols,
                             w,
                             f,
                             filename="M1",
                             outdir="./soln-intervals",
                             indices=None):
    """compute the flux per interval scans"""

    if len(fluxcols) == 1:
        m0 = getattr(xds[0], fluxcols[0]).data
        __sub_model = False
    else:
        m1 = getattr(xds[0], fluxcols[0]).data
        m0 = getattr(xds[0], fluxcols[1]).data
        __sub_model = True

    # apply flags and weights

    m0 *= (f == False)
    m0 *= w

    if __sub_model:
        # p*=(f==False) select based on m only
        m1 *= w

    LOGGER.debug("Done applying weights and flags")

    if indices is None:

        nt, nv = len(time_chunks), len(freq_chunks)

        flux = np.zeros((nt, nv))

        for tt, time_chunk in enumerate(time_chunks):
            for ff, freq_chunk in enumerate(freq_chunks):
                tsel = slice(time_chunk[0], time_chunk[1])
                fsel = slice(freq_chunk[0], freq_chunk[1])

                if __sub_model:
                    model_abs = da.absolute(m1[tsel, fsel, :][..., [0, 3]][m0[
                        tsel, fsel, :][..., [0, 3]] != 0] - m0[tsel, fsel, :][
                            ..., [0, 3]][m0[tsel, fsel, :][..., [0, 3]] != 0])
                else:
                    model_abs = da.absolute(
                        m0[tsel,
                           fsel, :][...,
                                    [0, 3]][m0[tsel, fsel, :][...,
                                                              [0, 3]] != 0])

                with warnings.catch_warnings():
                    warnings.simplefilter("ignore", category=RuntimeWarning)
                    flux[tt, ff] = np.mean(model_abs.compute())

            if tt % 6 == 0:
                LOGGER.info(
                    "Done computing model flux for {%d}/{%d} time chunks" %
                    (tt + 1, len(time_chunks)))

        LOGGER.info("Done computing model flux")

        np.save(outdir + "/" + filename + "flux.npy", flux)

        return flux

    else:

        flux = np.zeros(len(indices))

        for loc, index in enumerate(indices):
            tsel = slice(time_chunks[index[0]][0], time_chunks[index[0]][1])
            fsel = slice(freq_chunks[index[1]][0], freq_chunks[index[1]][1])

            if __sub_model:
                model_abs = da.absolute(m1[tsel, fsel, :][..., [0, 3]][
                    m0[tsel, fsel, :][..., [0, 3]] != 0] - m0[tsel, fsel, :][
                        ..., [0, 3]][m0[tsel, fsel, :][..., [0, 3]] != 0])
            else:
                model_abs = da.absolute(
                    m0[tsel,
                       fsel, :][...,
                                [0, 3]][m0[tsel, fsel, :][..., [0, 3]] != 0])

            with warnings.catch_warnings():
                warnings.simplefilter("ignore", category=RuntimeWarning)
                flux[loc] = np.mean(model_abs.compute())

        LOGGER.info("Done computing model flux")

        return flux
Esempio n. 8
0
def compute_interval_dask_index(ms_opts={},
                                SNR=3,
                                dvis=False,
                                outdir="./soln-intervals",
                                figname="interval",
                                minbl=0,
                                row_chunks=4000,
                                freqslice=slice(None),
                                jump=1,
                                tchunk=64,
                                fchunk=128,
                                save_out=True,
                                cubi_flags=False,
                                datachunk=None):
    """replicate the compute interval using dask arrays"""

    t0 = time.time()

    t = table(ms_opts["msname"])

    f = build_flag_colunm(t,
                          minbl=minbl,
                          obvis=None,
                          freqslice=freqslice,
                          cubi_flags=cubi_flags)

    LOGGER.info("finished building flags")

    LOGGER.debug("Took {} seconds to complete".format(time.time() - t0))

    cell = t.getcell("DATA", rownr=1)
    nfreq = cell.shape[0]

    w = fetch(ms_opts["WeightCol"], subset=t, return_dask=True)
    LOGGER.info("read weight-column susscessful")

    t.close()
    LOGGER.info("Table Closed")

    cols = ms_opts["FluxCol"].split("-")

    columns = list(
        set([
            "ANTENNA1", "ANTENNA2", "TIME", "SCAN_NUMBER", ms_opts["DataCol"],
            ms_opts["ModelCol"]
        ]) | set(cols))

    LOGGER.info("Reading the columns: [{}] as a daskms".format(", ".join(
        '{}'.format(col) for col in columns)))

    global xds

    xds = xds_from_ms(ms_opts["msname"],
                      columns=columns,
                      chunks={"row": row_chunks})

    scans = getattr(xds[0], "SCAN_NUMBER").data.compute()
    time_col = getattr(xds[0], "TIME").data.compute()
    ant1 = getattr(xds[0], "ANTENNA1").data.compute()
    ant2 = getattr(xds[0], "ANTENNA2").data.compute()

    NUMBER_ANTENNAS = max(ant2) + 1

    time_chunks = define_time_chunks(time_col, tchunk, scans, jump=jump)

    freq_chunks = define_freq_chunks(fchunk, nfreq)

    time_f = time.time()

    if datachunk:
        indices = [
            tuple(np.array(datachunk.split("T")[1].split("F"), dtype=int))
        ]

    else:
        flags_ratio = get_flag_ratio(time_chunks, freq_chunks, f.compute())

        indices = [
            np.unravel_index(
                np.nanargmin(np.abs(flags_ratio - np.nanmedian(flags_ratio))),
                flags_ratio.shape)
        ]

    n_ants = get_mean_n_ant_tf(
        ant1,
        ant2,
        f.compute(),
        time_chunks,
        freq_chunks,
        time_col,
        indices=indices
    )  #[2,...]  # return only the min values in each chunk so out put is 2D

    # print(np.where(n_ants<20), "see where n ants is less than 20 the most")

    n_ants[n_ants == 0] = np.nan

    LOGGER.debug("Took {} seconds to compute flag ratio and antennas".format(
        time.time() - time_f))

    if dvis:
        raise NotImplementedError(
            "Compute rms from visibilities only yet to be implemented")

    d = getattr(xds[0], ms_opts["DataCol"]).data
    p = getattr(xds[0], ms_opts["ModelCol"]).data

    #apply flags
    d *= (f == False)
    # p*=(f==False) select based on d only

    #apply weights
    d *= w
    p *= w

    LOGGER.info("Done applying weights and flags")

    _prefix = figname.split("interval")[0] + "T" + str(
        indices[0][0]) + "F" + str(indices[0][1]) + "-"

    time_m = time.time()
    flux = model_flux_per_scan_dask(time_chunks,
                                    freq_chunks,
                                    cols,
                                    w,
                                    f,
                                    filename=_prefix,
                                    outdir=outdir,
                                    indices=indices)
    LOGGER.debug("Took {} seconds to compute model".format(time.time() -
                                                           time_m))

    chan_rms = np.zeros((n_ants.shape[1], NUMBER_ANTENNAS))

    nv_nt = np.zeros_like(n_ants)

    grms_na = np.zeros_like(n_ants)

    time_c = time.time()

    for loc, index in enumerate(indices):

        tsel = slice(time_chunks[index[0]][0], time_chunks[index[0]][1])
        fsel = slice(freq_chunks[index[1]][0], freq_chunks[index[1]][1])

        ant1p = ant1[tsel]
        ant2p = ant2[tsel]

        # pdb.set_trace()

        for aa in range(NUMBER_ANTENNAS):
            dps = d[tsel][(ant1p == aa) | (ant2p == aa)][:, fsel, :][..., [
                0, 3
            ]]  #[d[(ant1p==aa)|(ant1p==aa)][:, fsel, :][...,[0,3]]!=0]
            pps = p[tsel][(ant1p == aa) | (ant2p == aa)][:, fsel, :][..., [
                0, 3
            ]]  #[d[(ant1p==aa)|(ant1p==aa)][:, fsel, :][...,[0,3]]!=0]
            rps = dps - pps

            with warnings.catch_warnings():
                warnings.simplefilter("ignore", category=RuntimeWarning)
                tmp = rps.compute()
                tmp[tmp == 0] = np.nan
                # rmss[loc, aa] = np.nanstd(tmp)*np.sqrt(2)	#np.sqrt(np.sum(tmp)/tmp.size) #
                chan_rms[:, aa] = np.nanstd(tmp, axis=(0, 2)) * np.sqrt(2)

            # nv_nt[loc, aa] = __get_interval(rmss[loc, aa], flux[loc], n_ants[index[0], index[1], aa], SNR=SNR
            if aa % 6 == 0:
                LOGGER.debug("Done computing noise for {%d}/{%d} antennas" %
                             (aa + 1, NUMBER_ANTENNAS))

        nv_nt[loc], grms_na[loc] = get_interval(chan_rms,
                                                flux[loc],
                                                n_ants[loc],
                                                SNR=SNR)

    LOGGER.debug("Took {} seconds to compute intervals".format(time.time() -
                                                               time_c))

    # save products

    nv_nt[nv_nt == 0] = np.nan

    nvis = np.nanmax(nv_nt)
    chunks_size_ok = True

    if nvis < fchunk:
        f_int = nvis
        t_int = 1
    else:
        f_int = fchunk
        t_int = np.ceil(nvis / fchunk)
        if t_int > tchunk:
            chunks_size_ok = False

    LOGGER.info("Number visibilities per solution block is {}.".format(nvis))
    if chunks_size_ok:
        LOGGER.info(
            "Suggested solution intervals based on chunks sizes frequency interval = {} and time interval = {}."
            .format(f_int, t_int))
    else:
        LOGGER.info(
            "Suggested solution intervals frequency interval = {} and time interval = {} large than chunk sizes. Consider increasing the chunk sizes and reruning a better suggestion."
            .format(f_int, t_int))

    if save_out:
        LOGGER.info("Computed statstics will be saved in the output folder.")

        np.save(outdir + "/" + _prefix + "num_antennas.npy", n_ants)
        np.save(outdir + "/" + _prefix + "chan_rms.npy", chan_rms)
        np.save(outdir + "/" + _prefix + "nv_nt.npy", nv_nt)
        np.save(outdir + "/" + _prefix + "grms.npy", grms_na)

        if datachunk is None:
            np.save(outdir + "/" + _prefix + "flags.npy", flags_ratio)

        imshow_stat(n_ants, outdir + "/" + _prefix + "nants.pdf")
        imshow_stat(chan_rms, outdir + "/" + _prefix + "chan_rms.pdf")
        imshow_stat(nv_nt, outdir + "/" + _prefix + "nv_nt.pdf")

        with np.errstate(divide='ignore', invalid='ignore'):
            imshow_stat(grms_na / 128.,
                        outdir + "/" + _prefix + "grms-128.pdf")
            imshow_stat(grms_na / nv_nt,
                        outdir + "/" + _prefix + "grms-var.pdf")

    LOGGER.info("Took {} seconds to complete".format(time.time() - t0))
Esempio n. 9
0
def build_flag_colunm(tt, minbl=100, obvis=None, freqslice=slice(None), row_chunks=4000, cubi_flags=False):
	"""Construct the a the initial flag column that will be use by Cubical
	when flagset is set to -cubical and min base line of 100"""

	# import pdb; pdb.set_trace()

	uvw0 =  fetch("UVW", subset=tt)

	# TODO: Explain this and handle it better in the user options 

	try:
		bflagrow = fetch("BITFLAG_ROW", subset=tt)
		bflagcol = fetch("BITFLAG", subset=tt, freqslice=freqslice)
		bitf = tt.getcolkeyword("BITFLAG", "FLAGSET_legacy")
		cubif = tt.getcolkeyword("BITFLAG", "FLAGSET_cubical")
	except:
		LOGGER.info("No BITFLAG column in MS will default to FLAG/FLAG_ROW columns")
		bflagrow = fetch("FLAG_ROW", subset=tt)
		bflagcol = fetch("FLAG", subset=tt, freqslice=freqslice)

		try:
			bitf = tt.getcolkeyword("FLAG", "FLAGSET_legacy")
			cubif = tt.getcolkeyword("FLAG", "FLAGSET_cubical")
		except:
			bitf = 1
			cubif = 2

	
	flag0 = np.zeros_like(bflagcol)

	#flag the with baseline length
	if minbl:
		uv2 = (uvw0[:, 0:2] ** 2).sum(1)
		flag0[uv2 < minbl**2] = 1
		del uvw0, uv2

	# Exceptionally add CubiCal flags
	if cubi_flags:
		apply_bit = bitf | cubif
	else:
		apply_bit = bitf

	#bitflag column
	flag0[(bflagcol & apply_bit) != 0] = True

	#bitflag row
	flag0[(bflagrow & apply_bit) != 0, :, :] = True

	#remove invalid visibilities
	if type(obvis) is np.ndarray:
		ax = obvis[...,(0,-1)] == 0
		flag0[ax[:,:,0]==True] = 1
		del ax


	percent_flag = 100.*np.sum(flag0[:,:,0])/flag0[:,:,0].size
	
	LOGGER.info("Flags fraction {}".format(percent_flag))

	del bflagrow, bflagcol

	_, nfreq, ncorr = flag0.shape

	flag0 = da.from_array(flag0, chunks=(row_chunks, nfreq, ncorr))

	return flag0
Esempio n. 10
0
def get_mean_n_ant_tf(ant1, ant2, f, time_chunks, freq_chunks, time_col, indices=None):
	"""
		Compute the mean number of antennas per time-frequency chunk

		Args:
			ant1 (1D array):
				antenna 1 values from measurement set
			ant2 (1D array):
				antenna 2 values from meausrement set
			f (3D array):
				flag column 
			time_chunks:
				list of time chunk indices
			freq_chunks:
				list of freq chunk indices
			time_col:
				time column from measurement set
			indices:
				tuple, use only these chunks


		returns
			n_ants (array):
				shape: 3 x len(time_chunks) x len(freq_chunks)
				3 for mean, min and max nmber of antennas
	
	"""

	n_ant = max(ant2) + 1
	
	# n_ants = np.zeros((3, tsize, fsize))

	nfreq = freq_chunks[0][1] - freq_chunks[0][0]

	
	if indices is None:
		tsize, fsize =  len(time_chunks), len(freq_chunks)

		rows, cols = np.indices((tsize, fsize))
		indices = list(zip(rows.flatten(), cols.flatten()))

		dim_1 = False

	else:
		tsize = fsize = len(indices)
		
		n_ants = np.zeros((len(indices), nfreq, n_ant))

		dim_1 = True

	# pdb.set_trace()

	def __antenna_worker(ind, loc):

		tind, find = ind

		tsel = slice(time_chunks[tind][0], time_chunks[tind][1])
		fsel = slice(freq_chunks[find][0], freq_chunks[find][1])

		times = time_col[tsel]
		nch = freq_chunks[find][1] - freq_chunks[find][0]

		Nt = float(len(np.unique(times)))
		fp=f[tsel, fsel, 0].squeeze()
		

		ant1p = ant1[tsel]
		ant2p = ant2[tsel]

		ant1p = 1+np.repeat(ant1p[:,np.newaxis], nch, axis=1)
		ant2p = 1+np.repeat(ant2p[:,np.newaxis], nch, axis=1)

		ant1p*=(fp==False)
		ant2p*=(fp==False)

		ant_counts = np.zeros((nch, n_ant))

		for fi in range(nch):

			c_ant1, count_1 = np.unique(ant1p[:, fi], return_counts=True)
			c_ant2, count_2 = np.unique(ant2p[:, fi], return_counts=True)

			for aa in range(1, n_ant+1, 1):
				try:
					caa1 = count_1[np.where(c_ant1==aa)][0]
				except:
					caa1 = 0
				try:
					caa2 = count_2[np.where(c_ant2==aa)][0]
				except:
					caa2 = 0

					
				ant_counts[fi, aa-1] = float(caa1 + caa2)

		# print ant_counts, "scan id = %d"%(scan_id)
		ant_zeros = np.array(range(1, n_ant+1, 1))[np.where(ant_counts==0)[1]]

		if any(ant_zeros):
			LOGGER.debug("Completely flagged antennas:[{}]".format(", ".join('{}'.format(col) for col in ant_zeros)) + " for chunk T%dF%d"%(tind,find))

		with warnings.catch_warnings():
			warnings.simplefilter("ignore", category=RuntimeWarning)
			ant_counts = np.where(ant_counts==0, np.nan, ant_counts)/Nt
			# n_ants[:,tind,find] = 1+np.nanmin(ant_counts), 1+np.nanmax(ant_counts), 1+np.nanmean(ant_counts)   #should be using nanmean

			if dim_1:
				n_ants[loc] = ant_counts + 1
			else:
				n_ants[tind, find] = ant_counts + 1

	for loc, index in enumerate(indices):
		__antenna_worker(index, loc)

		
	LOGGER.info("Done computing mean number of antennas")

	return n_ants
Esempio n. 11
0
def optimal_time_freq_interval_from_gains(dbfile, msname, name, tint, fint, tchunk, verbosity=False, prefix="G"):

	dbgains2 = extract_from_db(dbfile, name=name)#[:,0:1,...]
	_, nfreq, _, _, _, = dbgains2.shape

	dbgains = dbgains2
	# print(dbgains.shape, "shape")
	
	try:
		dbjhj = extract_from_db(dbfile, name=name+'.err') #[:,:,...]
	except:
		dbjhj = extract_from_db(dbfile[:-4]+"_err.npy") #[:,:,...]
	
	# rms, min_err, max_err = np.mean(dbgainserr[..., (0,1), (0,1)] + 1e-12), np.min(dbgainserr[..., (0,1), (0,1)] + 1e-12),\
					# np.max(dbgainserr[..., (0,1), (0,1)] + 1e-12)

	# if verbosity:
		# print("Gains rms: mean, min, max", rms, min_err, max_err)

	# dbjhj = np.ones_like(dbgainserr, dtype=np.float64)
	
	Nt, Nf, Na, _, __, = dbgains.shape

	# t = table(msname, ack=False)
	# time_col = t.getcol("TIME")
	# scans = t.getcol("SCAN_NUMBER")
	# scan_ids = np.unique(scans)
	# t.close()
		
	# ntchunks = np.zeros(len(scan_ids))

	# for i, scan_id in enumerate(scan_ids):
	# 	ntchunks[i] = len(np.unique(time_col[scans==scan_id]))

	ntc = tchunk #int(np.min(ntchunks))  #64 #151#tchunk #
	# print("ntc is {}, scan_ids {}, ntchunks".format(ntc, scan_ids, ntchunks))
	
	#import pdb; pdb.set_trace()

	tints = np.array(range(1, ntc+1, 1))
	fints = np.array(range(1, Nf+1, 1))

	# tints = np.insert(tints, 0, 11)
	# fints = np.insert(fints, 0, 1)
	
	gaic = np.zeros((len(fints), len(tints)))

	f_int = 1 #Nf for reasons to explain later
	gobs, jhjinv = g_and_jhjinv_from_soln(dbgains, dbjhj, Nt, Nf, tint, f_int)
	gobs = gobs[np.newaxis, np.newaxis, ...]
	jhjinv = jhjinv[np.newaxis, np.newaxis, ...]

	for i, fi in enumerate(fints):
		for j, ti in enumerate(tints):
			avg_gains = get_2D_avg_gains(gobs[:,:,:,fi-1:fi,...], ti, 1)
			chisq, num_valid_eqs, n_valid_sols = compute_gains_chisq(gobs[:,:,:,fi-1:fi,...], avg_gains, ti, fi, jhjinv[:,:,:,fi-1:fi,...]) # , jhjinv[:,:,:,fi-1:fi,...]

			gaic[i,j] = akaike_info_criterion(num_valid_eqs, n_valid_sols, chisq+1e-12)

		LOGGER.debug("Done for frequency block {}/{}".format(i+1, Nf))

	for i in range(tint):
		gaic[:,i] = gaic[:,tint]

	tmins = tints[np.argmin(gaic, axis=1)]
	ta = max(np.min(tmins), tint)

	LOGGER.info(f"Suggested optimal time interval is {ta}")

	fig, ax1 = plt.subplots()

	temp = 0.80
	extra_ys = 2
	right_additive = (0.98-temp)/float(extra_ys)

	ax1.set_xlabel("Time-interval", size=30)

	lns = None

	mod_val = 2 if len(fints) > 8 else 1

	for i, fi in enumerate(fints):
		# print(fi, len(tints), gaic.shape)
		ln = ax1.plot(tints[ta//3:], gaic[i, ta//3:]/np.min(gaic[i, :]), linestyle='-', label = "Block = %d"%fi, linewidth=2)

		if lns == None:
			lns = ln
		elif (i+1)%mod_val ==0:
			lns += ln
		else:
			pass

	ax1.set_ylabel("AIC")
	ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))

	labs = [l.get_label() for l in lns]
	ax1.legend(lns, labs, loc='best',fontsize = 'small')
	
	fig.tight_layout()

	outname = dbfile[:-4] + "-" + prefix

	plt.savefig(outname+"_cubi_AIC_freq.pdf")
	
	plt.clf()
	plt.close()


	return ta