예제 #1
0
 def findFirstandLast(self):
     if len(self.currentData) == 0:
         self.firstDate = np.inf()
         self.lastDate = 0
     else:
         self.firstDate = self.currentData[0]
         self.lastDate = self.currentData[-1]
예제 #2
0
def optimize_log_profile(z, v, dist_bank=None):
    """
    optimize velocity log profile relation of v=k*max(z/z0) with k a function of distance to bank and k_max

    :param z: list of depths
    :param v: list of surface velocities
    :param dist_bank: list of distances to bank
    :return: dict, fitted parameters of log_profile {z_0, k_max, s0 and s1}
    """
    if dist_bank is None:
        dist_bank = np.inf(len(v))
    v = np.array(v)
    z = np.array(z)
    result = curve_fit(
        log_profile,
        (z, dist_bank),
        np.array(v),
        # bounds=([0.00001, 0.05, -20], [10, 2., 20]),
        # bounds=([0.05, -20, 0., 0.], [0.051, 20, 5, 100]),
        bounds=([0.005, -20, 0., 0.], [0.1, 20, 5, 100]),
        # p0=[0.05, 0, 0., 0.],
        # method="dogbox"
    )
    # unravel parameters
    z0, k_max, s0, s1 = result[0]
    return {"z0": z0, "k_max": k_max, "s0": s0, "s1": s1}
예제 #3
0
    def fit_stats(self, rvname, probs, lmd=_LMD_DEFAULT, x0=None):
        """ for intermediate nodes (nodes with parents) only"""
        if np.isinf(self.bins[-1]) and np.isinf(self.bins[0]):
            loc = self.bins[1:-1]
        elif np.isinf(self.bins[-1]):
            loc = self.bins[:-1]
        elif np.inf(self.bins[-1]):
            loc = self.bins[1:]
        else:
            loc = self.bins
        cdfdata = self.node_cdf(loc, probs, lmd=lmd)
        if x0 is None:
            x0 = self.node_stats(probs, lmd=lmd)
        if rvname.lower() == 'lognormal':

            def objfunc(x, loc, cdfdata):
                m = x[0]
                v = x[1]
                mu = np.log(m / np.sqrt(1 + v / m**2))
                sgm = np.sqrt(np.log(1 + v / m**2))
                return np.linalg.norm(
                    stats.lognorm.cdf(loc, sgm, scale=np.exp(mu)) - cdfdata)

            optres = minimize(objfunc,
                              x0,
                              args=(loc, cdfdata),
                              bounds=([0., np.inf], [0., np.inf]))
        else:
            print "unknown distribution name to fit cdf data to"
            sys.exit(1)
        return optres.x, optres
예제 #4
0
 def fit_stats(self, rvname, probs, lmd=_LMD_DEFAULT, x0=None):
     """ for intermediate nodes (nodes with parents) only"""
     if np.isinf(self.bins[-1]) and np.isinf(self.bins[0]):
         loc = self.bins[1:-1]
     elif np.isinf(self.bins[-1]):
         loc = self.bins[:-1]
     elif np.inf(self.bins[-1]):
         loc = self.bins[1:]
     else:
         loc = self.bins
     cdfdata = self.node_cdf(loc, probs, lmd=lmd)
     if x0 is None:
         x0 = self.node_stats(probs, lmd=lmd)
     if rvname.lower() == 'lognormal':
         def objfunc(x, loc, cdfdata):
             m = x[0]
             v = x[1]
             mu = np.log(m/np.sqrt(1+v/m**2))
             sgm = np.sqrt(np.log(1+v/m**2))
             return np.linalg.norm(stats.lognorm.cdf(loc, sgm, scale=np.exp(mu))-cdfdata)
         optres = minimize(objfunc, x0, args = (loc,cdfdata), bounds=([0., np.inf], [0., np.inf]))
     else:
         print "unknown distribution name to fit cdf data to"
         sys.exit(1)
     return optres.x, optres
예제 #5
0
 def initialize(self):
     self.weight = np.random.normal(0, 1 / np.sqrt(self.width),
                                    np.shape(self.weight))
     self.bias = bias_const * np.ones(np.shape(self.bias), dtype=float)
     self.weight[self.num_class:self.width - 1, :, self.depth - 1] = np.zeros(
         (self.width - self.num_class, self.width,
          1), dtype=float
     )  # the paths after num_classes are killed by making weights zero and biases -infinity
     self.bias[self.num_class:self.width - 1, self.depth - 1] = -np.inf(
         (self.width - self.num_class, 1), dtype=float)
예제 #6
0
def median_index(list_of_nums):
    l = list(list_of_nums)
    l.sort()
    if len(l) % 2 != 0:
        middle_index = np.inf((len(l) - 1) / 2)
        return middle_index
    elif len(l) % 2 == 0:
        middle_index_1 = int(len(l) / 2)
        middle_index_2 = int(len(l) / 2) - 1
        return (middle_index_2 * 10, middle_index_1 * 10)
def _log_like(params, radii, esd_model_func, esd_data, true_mass,
              fisher_matrix):
    if np.any(params) < 0:
        return -np.inf()

    esd_model = esd_model_func(radii, true_mass, params).flatten()
    esd_data = esd_data.flatten()

    return maszcal.likelihoods.log_gaussian_shape(radii * esd_model,
                                                  radii * esd_data,
                                                  fisher_matrix)
예제 #8
0
def median(list_of_nums):
    l = list(list_of_nums)
    l.sort()
    if len(l) % 2 != 0:
        middle_index = np.inf((len(l) - 1) / 2)
        return l[middle_index]
    elif len(l) % 2 == 0:
        middle_index_1 = int(len(l) / 2)
        middle_index_2 = int(len(l) / 2) - 1
        return_middle = (l[middle_index_1] + l[middle_index_2]) / 2
        return return_middle
예제 #9
0
 def slope(x, y):
     dy = y[0] - x[0]
     dx = y[1] - x[1]
     return np.inf() if dx == 0 else dy / dx
예제 #10
0
파일: glcm.py 프로젝트: casperfibaek/buteo
def GLCMFeaturesInvariant(
    glcm, features="all", homogeneityConstant=None, inverseDifferenceConstant=None
):
    if isinstance(features, list) and len(features) == 0:
        raise Exception("Not enough input arguments")
    elif isinstance(features, str) and features != "all":
        raise Exception("Invalid argument")
    else:
        if (glcm.shape[1 - 1] <= 1) or (glcm.shape[2 - 1] <= 1):
            raise Exception("The GLCM should be a 2-D or 3-D matrix.")
        else:
            if glcm.shape[1 - 1] != glcm.shape[2 - 1]:
                raise Exception(
                    "Each GLCM should be square with NumLevels rows and NumLevels cols"
                )

    checkHomogeneityConstant = lambda x=None: is_number(x) and x > 0 and x < np.inf
    checkInverseDifferenceConstant = (
        lambda x=None: is_number(x) and x > 0 and x < np.inf
    )

    if homogeneityConstant is None or not checkHomogeneityConstant(homogeneityConstant):
        homogeneityConstant = 1

    if inverseDifferenceConstant is None or not checkInverseDifferenceConstant(
        inverseDifferenceConstant
    ):
        inverseDifferenceConstant = 1

    # epsilon
    eps = 1e-10

    # Get size of GLCM
    nGrayLevels = glcm.shape[1 - 1]
    nglcm = glcm.shape[3 - 1]

    # Differentials
    dA = 1 / (nGrayLevels ** 2)
    dL = 1 / nGrayLevels

    dXplusY = 1 / (2 * nGrayLevels - 1)
    dXminusY = 1 / nGrayLevels
    dkdiag = 1 / nGrayLevels

    # Normalize the GLCMs
    glcm = glcm / np.sum(np.sum(glcm) * dA)
    # glcm = bsxfun(rdivide, glcm, sum(sum(glcm)) * dA)

    out = {}

    all_features = [
        "autoCorrelation",
        "clusterProminence",
        "clusterShade",
        "contrast",
        "correlation",
        "differenceAverage",
        "differenceEntropy",
        "differenceVariance",
        "dissimilarity",
        "different_entropy",
        "entropy",
        "homogeneity",
        "informationMeasureOfCorrelation1",
        "informationMeasureOfCorrelation2",
        "inverseDifference",
        "maximumCorrelationCoefficient",
        "maximumProbability",
        "sumAverage",
        "sumEntropy",
        "sumOfSquaresVariance",
        "sumVariance",
    ]

    for feat in all_features:
        if str_in_list(feat, features):
            out[feat] = np.zeros(nglcm)

    class Feature_class:
        def __init__(self):
            for key in out.keys():
                self.__dict__[key] = True

    use_features = Feature_class()

    glcmMean = np.zeros((nglcm, 1))
    uX = np.zeros((nglcm, 1))
    uY = np.zeros((nglcm, 1))
    sX = np.zeros((nglcm, 1))
    sY = np.zeros((nglcm, 1))

    # pX pY pXplusY pXminusY
    if (
        str_in_list("informationMeasureOfCorrelation1", features)
        or str_in_list("informationMeasureOfCorrelation2", features)
        or str_in_list("maximalCorrelationCoefficient", features)
    ):
        pX = np.zeros((nGrayLevels, nglcm))
        pY = np.zeros((nGrayLevels, nglcm))

    if (
        use_features.sumAverage
        or use_features.sumVariance
        or use_features.sumEntropy
        or use_features.sumVariance
    ):
        pXplusY = np.zeros(((nGrayLevels * 2 - 1), nglcm))

    if use_features.differenceEntropy or use_features.differenceVariance:
        pXminusY = np.zeros((nGrayLevels, nglcm))

    # HXY1 HXY2 HX HY
    if use_features.informationMeasureOfCorrelation1:
        HXY1 = np.zeros((nglcm, 1))
        HX = np.zeros((nglcm, 1))
        HY = np.zeros((nglcm, 1))

    if use_features.informationMeasureOfCorrelation2:
        HXY2 = np.zeros((nglcm, 1))

    # Create indices for vectorising code:
    sub = np.arange(1, nGrayLevels * nGrayLevels + 1)
    I, J = ind2sub(np.array([nGrayLevels, nGrayLevels]), sub)
    nI = I / nGrayLevels
    nJ = J / nGrayLevels
    if use_features.sumAverage or use_features.sumVariance or use_features.sumEntropy:
        sumLinInd = np.empty((1, 2 * nGrayLevels - 1), dtype=np.int)
        for i in np.arange(1, 2 * nGrayLevels - 1 + 1).reshape(-1):
            diagonal = i - nGrayLevels
            d = np.ones((1, nGrayLevels - np.abs(diagonal)))
            diag_ = np.diag(d, diagonal)
            diag_ud_ = np.flipud(diag_)
            sumLinInd[i] = diag_ud_[diag_ud_ != 0]

    if (
        use_features.differenceAverage
        or use_features.differenceVariance
        or use_features.differenceEntropy
    ):
        diffLinInd = np.empty((1, nGrayLevels), dtype=np.int)
        idx2 = np.arange(0, nGrayLevels - 1 + 1)
        for i in idx2.reshape(-1):
            diagonal = i
            d = np.ones((1, nGrayLevels - diagonal))
            if diagonal == 0:
                D = np.diag(d, diagonal)
                diffLinInd[i + 1] = D[D != 0]
            else:
                Dp = np.diag(d, diagonal)
                Dn = np.diag(d, -diagonal)
                Dp_Dn = Dp + Dn
                diffLinInd[i + 1] = Dp_Dn[Dp_Dn != 0]

    sumIndices = np.arange(2, 2 * nGrayLevels + 1)

    # Loop over all GLCMs
    for k in np.arange(1, nglcm + 1).reshape(-1):
        currentGLCM = glcm[:, :, k]
        glcmMean[k] = np.mean(currentGLCM)

        # For symmetric GLCMs, uX = uY
        uX[k] = np.sum(np.multiply(nI, currentGLCM(sub))) * dA
        uY[k] = np.sum(np.multiply(nJ, currentGLCM(sub))) * dA
        sX[k] = np.sum(np.multiply((nI - uX[k]) ** 2, currentGLCM(sub))) * dA
        sY[k] = np.sum(np.multiply((nJ - uY[k]) ** 2, currentGLCM(sub))) * dA

        if (
            use_features.sumAverage
            or use_features.sumVariance
            or use_features.sumEntropy
        ):
            for i in sumIndices.reshape(-1):
                pXplusY[i - 1, k] = np.sum(currentGLCM(sumLinInd[i - 1])) * dkdiag

        if (
            use_features.differenceAverage
            or use_features.differenceVariance
            or use_features.differenceEntropy
        ):
            idx2 = np.arange(0, nGrayLevels - 1 + 1)

            for i in idx2.reshape(-1):
                pXminusY[i + 1, k] = np.sum(currentGLCM(diffLinInd[i + 1])) * dkdiag

        if (
            use_features.informationMeasureOfCorrelation1
            or use_features.informationMeasureOfCorrelation2
            or use_features.maximalCorrelationCoefficient
        ):
            pX[:, k] = np.sum(currentGLCM, 2 - 1) * dL
            pY[:, k] = np.transpose(np.sum(currentGLCM, 1 - 1)) * dL

        if use_features.informationMeasureOfCorrelation1:
            HX[k] = -np.nansum(np.multiply(pX[:, k], np.log(pX[:, k]))) * dL
            HY[k] = -np.nansum(np.multiply(pY[:, k], np.log(pY[:, k]))) * dL

            HXY1[k] = (
                -np.nansum(
                    np.multiply(
                        np.transpose(currentGLCM(sub)),
                        np.log(np.multiply(pX[I, k], pY[J, k])),
                    )
                )
                * dA
            )

        if use_features.informationMeasureOfCorrelation2:
            HXY2[k] = (
                -np.nansum(
                    np.multiply(
                        np.multiply(pX[I, k], pY[J, k]),
                        np.log(np.multiply(pX[I, k], pY[J, k])),
                    )
                )
                * dA
            )

        # Haralick features:
        if use_features.energy:
            out["energy"][k] = np.sum(currentGLCM(sub) ** 2) * dA

        if use_features.contrast:
            out["contrast"][k] = (
                np.sum(np.multiply((nI - nJ) ** 2, currentGLCM(sub))) * dA
            )

        if use_features.autoCorrelation or use_features.correlation:
            autoCorrelation = (
                np.sum(np.multiply(np.multiply(nI, nJ), currentGLCM(sub))) * dA
            )

            if use_features.autoCorrelation:
                out["autoCorrelation"][k] = autoCorrelation

        if use_features.correlation:
            if sX[k] < eps or sY[k] < eps:
                out["correlation"][k] = np.amin(
                    np.amax((autoCorrelation - np.multiply(uX[k], uY[k])), -1), 1
                )
            else:
                out["correlation"][k] = (
                    autoCorrelation - np.multiply(uX[k], uY[k])
                ) / np.sqrt(np.multiply(sX[k], sY[k]))

        if use_features.sumOfSquaresVariance:
            out["sumOfSquaresVariance"][k] = (
                np.sum(np.multiply(currentGLCM(sub), ((nI - uX[k]) ** 2))) * dA
            )

        if use_features.homogeneity:
            out["homogeneity"][k] = (
                np.sum(currentGLCM(sub) / (1 + homogeneityConstant * (nI - nJ) ** 2))
                * dA
            )

        if use_features.sumAverage or use_features.sumVariance:
            sumAverage = (
                np.sum(
                    np.multiply(
                        np.transpose(((2 * (sumIndices - 1)) / (2 * nGrayLevels - 1))),
                        pXplusY[sumIndices - 1, k],
                    )
                )
                * dXplusY
            )

            if use_features.sumAverage:
                out["sumAverage"][k] = sumAverage

        if use_features.sumVariance:
            out["sumVariance"][k] = (
                np.sum(
                    np.multiply(
                        np.transpose(
                            (
                                ((2 * (sumIndices - 1)) / (2 * nGrayLevels - 1))
                                - sumAverage
                            )
                        )
                        ** 2,
                        pXplusY[sumIndices - 1, k],
                    )
                )
                * dXplusY
            )

        if use_features.sumEntropy:
            out["sumEntropy"][k] = (
                -np.nansum(
                    np.multiply(
                        pXplusY[sumIndices - 1, k], np.log(pXplusY[sumIndices - 1, k])
                    )
                )
                * dXplusY
            )

        if (
            use_features.entropy
            or use_features.informationMeasureOfCorrelation1
            or use_features.informationMeasureOfCorrelation2
        ):
            entropy = (
                -np.nansum(np.multiply(currentGLCM(sub), np.log(currentGLCM(sub)))) * dA
            )

            if use_features.entropy:
                out["entropy"][k] = entropy

        if use_features.differenceAverage or use_features.differenceVariance:
            differenceAverage = (
                np.sum(
                    np.multiply(
                        np.transpose(((idx2 + 1) / nGrayLevels)),
                        pXminusY[idx2 + 1, k],
                    )
                )
                * dXminusY
            )

            if use_features.differenceAverage:
                out["differenceAverage"][k] = differenceAverage

        if use_features.differenceVariance:
            out["differenceVariance"][k] = (
                np.sum(
                    np.multiply(
                        np.transpose(
                            (((idx2 + 1) / nGrayLevels) - differenceAverage) ** 2
                        ),
                        pXminusY[idx2 + 1, k],
                    )
                )
                * dXminusY
            )

        if use_features.differenceEntropy:
            out["differenceEntropy"][k] = (
                -np.nansum(
                    np.multiply(pXminusY[idx2 + 1, k], np.log(pXminusY[idx2 + 1, k]))
                )
                * dXminusY
            )

        if use_features.informationMeasureOfCorrelation1:
            np.infoMeasure1 = (entropy - HXY1(k)) / (np.amax(HX(k), HY(k)))
            out["informationMeasureOfCorrelation1"][k] = np.infoMeasure1

        if use_features.informationMeasureOfCorrelation2:
            np.infoMeasure2 = np.sqrt(1 - np.exp(-2 * (HXY2(k) - entropy)))
            out["informationMeasureOfCorrelation2"][k] = np.infoMeasure2

        if use_features.maximalCorrelationCoefficient:
            # Correct by eps if the matrix has columns or rows that sums to zero.
            P = currentGLCM

            # pX_ = pX(:,k)
            pX_ = pX[:, k]
            if np.any(pX_ < eps):
                pX_ = pX_ + eps
                pX_ = pX_ / (np.sum(pX_) * dL)

            # pY_ = pY(:,k)
            pY_ = pY[:, k]
            if np.any(pY_ < eps):
                pY_ = pY_ + eps
                pY_ = pY_ / (np.sum(pY_) * dL)

            # Compute the Markov matrix
            Q = np.zeros((P.shape, P.shape))
            for i in np.arange(1, nGrayLevels + 1).reshape(-1):
                # Pi = P(i,:)
                Pi = P[i, :]
                pXi = pX_(i)
                for j in np.arange(1, nGrayLevels + 1).reshape(-1):
                    # Pj = P(j,:)
                    Pj = P[j, :]
                    d = pXi * np.transpose(pY_)
                    if d < eps:
                        print(
                            "Division by zero in the maximalCorrelationCoefficient!\n"
                            % ()
                        )
                    Q[i, j] = dA * np.sum((np.multiply(Pi, Pj)) / d)

            # Compute the second largest eigenvalue
            if np.any(np.inf(Q)):
                e2 = np.nan
            else:
                try:
                    E = eigs(Q, 2)
                finally:
                    pass
                # There may be a near-zero imaginary component here
                if True and True:
                    e2 = E(2)
                else:
                    e2 = np.amin(real(E(1)), real(E(2)))
            out["maximalCorrelationCoefficient"][k] = e2

        if use_features.dissimilarity:
            dissimilarity = np.sum(np.multiply(np.abs(nI - nJ), currentGLCM(sub))) * dA
            out["dissimilarity"][k] = dissimilarity
        if use_features.clusterShade:
            out["clusterShade"][k] = (
                np.sum(np.multiply((nI + nJ - uX[k] - uY[k]) ** 3, currentGLCM(sub)))
                * dA
            )
        if use_features.clusterProminence:
            out["clusterProminence"][k] = (
                np.sum(np.multiply((nI + nJ - uX[k] - uY[k]) ** 4, currentGLCM(sub)))
                * dA
            )
        if use_features.maximumProbability:
            out["maximumProbability"][k] = np.amax(currentGLCM)
        if use_features.inverseDifference:
            out["inverseDifference"][k] = (
                np.sum(
                    currentGLCM(sub) / (1 + inverseDifferenceConstant * np.abs(nI - nJ))
                )
                * dA
            )

    return out
예제 #11
0
def visible(mesh: Mesh.Mesh, r: np.ndarray) -> np.ndarray:
    rotated_vertices = r.dot(
        np.hstack([mesh.vertices,
                   np.ones([len(mesh.vertices), 1])]).T).T[:, :3]
    z = rotated_vertices[:, 2]
    uv = rotated_vertices[:, :]
    uv[:, 0] = uv - np.min(uv[:, 0], axis=0)
    uv = uv + 1
    uv = uv / np.max(uv) * 1000
    width = 1000
    height = 1000
    faces = np.array(mesh.tvi)
    v1 = faces[:, 0]
    v2 = faces[:, 1]
    v3 = faces[:, 2]
    nfaces = np.shape(faces)[0]
    x = np.c_[uv[v1, 0], uv[v2, 0], uv[v3, 0]]
    y = np.c_[uv[v1, 1], uv[v2, 1], uv[v3, 1]]
    minx = np.ceil(x.min(1))
    maxx = np.floor(x.max(1))
    miny = np.ceil(y.min(1))
    maxy = np.floor(y.max(1))

    del x, y

    minx = np.clip(minx, 0, width - 1)
    maxx = np.clip(maxx, 0, width - 1)
    miny = np.clip(miny, 0, height - 1)
    maxy = np.clip(maxy, 0, height - 1)

    [rows, cols] = np.meshgrid(np.linspace(1, 1000, num=1000),
                               np.linspace(1, 1000, num=1000))
    zbuffer = -np.inf(height, width)
    fbuffer = np.zeros(height, width)

    for i in range(nfaces):
        if minx[i] <= maxx[i] and miny[i] <= maxy[i]:
            px = rows[miny[i]:maxy[i], minx[i]:maxx[i]]
            py = cols[miny[i]:maxy[i], minx[i]:maxx[i]]
            px = px[:]
            py = py[:]

            e0 = uv[v1[i], :]
            e1 = uv[v2[i], :]
            e2 = uv[v3[i], :]

            det = e1[0] * e2[1] - e1[1] * e2[0]
            tmpx = px - e0[0]
            tmpy = py - e0[1]
            a = (tmpx * e2[1] - tmpy * e2[0]) / det
            b = (tmpx * e1[0] - tmpy * e1[1]) / det

            test = a >= 0 & b >= 0 & a + b <= 1

            if np.any(test):
                px = px[test]
                py = py[test]

                w2 = a[test]
                w3 = b[test]
                w1 = 1 - w3 - w2
                pz = z[v1[i]] * w1 + z[v2[i]] * w2 + z[v3[i]] * w3

                if pz > zbuffer[py, px]:
                    zbuffer[py, px] = pz
                    fbuffer[py, px] = i
    test = fbuffer != 0
    f = np.unique(fbuffer[test])
    v = np.unique(np.r_(v1[f], v2[f], v3[f]))
    return v
예제 #12
0
def train(objects, desired_location):
    Q = np.inf(2, len(objects), len(desired_location))
예제 #13
0
def extract_data(oname, coords, obsname, T0, period, inst, SDSS,
                 comp_mags=None, myLoc='.', fnames=None,
                 lower_phase=-0.5, upper_phase=0.5,
                 no_calibration=False):
    '''
    Takes a set of *CAM observations (and data on the system), and produces a set of phase folded lightcurves.

    If we're in the SDSS field, each .log file needs a corresponding .coords file that contains the RA and Dec of each aperture:
        <CCD1 ap1 RA> <Dec>
        <CCD1 ap2 RA> <Dec>

        <CCD2 ap1 RA> <Dec>
        <CCD2 ap2 RA> <Dec>

        <CCD3 ap1 RA> <Dec>
        <CCD3 ap2 RA> <Dec>
        <CCD3 ap3 RA> <Dec>

    If not, I need a standard star reduction, and each .log file needs a corresponding .log reduction that uses the same parameters
    to ensure an accurate match. These should be specified in comp_fnames. If none are supplied, try searching for a


    Arguments:
    ----------
    oname: str
        Template for written files. Applied to

    coords: str
        RA, Dec of target. Must in in a format astropy can understand.

    obsname: str
        Observatory location. See astropy for a list of valid names

    T0: float
        Ephemeris data

    period: float
        Ephemeris data

    SDSS: bool, optional
        If True, I'll do an SDSS lookup for the comparison star magnitudes. If False, use a standard star to calibrate

    myLoc: str, optional
        Working directory. If not supplied, default to current working directory

    fnames: list, optional
        List of target reduction files. If not supplied, searches for log files

    Returns:
    --------
    written_files: list
        List of created .calib files.
    '''
    printer("\n\n# # # # # # # # # # # # # # # # # # # BEGIN BATCH CALIBRATION # # # # # # # # # # # # # # # # # # #\n\n")

    # Writing out
    lc_dir = os.path.join(myLoc, 'MCMC_LIGHTCURVES')
    try:
        os.mkdir(lc_dir)
    except: pass
    printer("Lightcurves will go in: {}".format(lc_dir))

    figs_dir = os.path.join(myLoc, 'MCMC_LIGHTCURVES', "FIGS")
    try:
        os.mkdir(figs_dir)
    except: pass
    printer("Figures will go in: {}".format(figs_dir))


    # Report the things we're working with
    printer("  Using these log files: ")
    for i, fname in enumerate(fnames):
        printer("    {:2d} - {}".format(i, fname))
    printer('  ')
    printer("  I'll write out to {}*\n".format(oname))

    #Correct to BMJD
    printer("  Correcting observations from MJD to BMJD (observed from '{}')".format(obsname))
    printer("  Phase folding data for a T0: {:}, period: {:}".format(T0, period))

    # Data masking stuff
    FLAG = np.uint32(0)
    for f in FLAGS_TO_IGNORE:
        FLAG = FLAG | f
    if FLAG:
        printer("  Ignoring bad data flags: {}".format(FLAGS_TO_IGNORE))
        printer("List of keys:")
        printer(hcam.FLAGS)

    # Where are we?
    try:
        observatory = coord.EarthLocation.of_site(obsname)
    except:
        lat, lon = obsname.split(',')
        printer("  Attempting to get the earth observatory from latitude and longitude")
        observatory = coord.EarthLocation.from_geodetic(lat=lat, lon=lon)

    star_loc = coord.SkyCoord(
        coords,
        unit=(u.hourangle, u.deg), frame='icrs'
    )

    # I want to know what instrument I'm using, since each has a different number of cameras
    inst = inst.lower()
    if inst == 'uspec':
        nCCD = 1
        bands = ['???']
        c    = ['black']
    elif inst == 'ucam':
        nCCD = 3
        bands = ['r', 'g', 'u']
        c = ['red', 'green', 'blue']
    elif inst == 'hcam':
        nCCD = 5
        bands = ['u', 'g', 'r', 'i', 'z']
        c = ['blue', 'green', 'red', 'magenta', 'black']

    printer("  I'm using the instrument {}, which has {} CCDS in the following order:".format(inst, nCCD))
    for n, col in zip(range(nCCD), c):
        printer("  -> CCD {}: plotted in {}".format(n+1, col))

    written_files = []


    #  Plotting #
    ADU_lightcurves = {fname: [] for fname in fnames}

    print("Making plotting area...", end='')
    plt.ion()
    fig, ax = plt.subplots(nCCD, figsize=[11.69, 8.27], sharex=True)
    # If we only have one CCD, axes still need to be a lists
    if nCCD == 1:
        ax = [ax]

    twinAx = []
    for i, a in enumerate(ax):
        a.set_ylabel('Flux, mJy')

        twinAx.append(a.twinx())
        twinAx[i].set_ylabel('Count Ratio')
        twinAx[i].yaxis.tick_right()

    ax[-1].set_xlabel('Phase, days')
    ax[0].set_title('Waiting for data...')
    fig.tight_layout()

    compFig, compAx = plt.subplots(nCCD, figsize=[11.69,8.27], sharex=True)

    # If we only have one CCD, axes still need to be a lists
    if nCCD == 1:
        compAx = [compAx]

    compFig.tight_layout()
    plt.show()
    print(" Done!")



    # I want a master pdf file with all the nights' lightcurves plotted
    pdfname = os.path.join(figs_dir, oname+"_all_nights.pdf")
    with PdfPages(pdfname) as pdf:
        for fname in fnames:
            printer("\n----------------------------------------------------------------\n----------------------------------------------------------------\n")
            printer("Calibrating lightcurves for {}".format(fname))
            printer("\n----------------------------------------------------------------\n----------------------------------------------------------------\n")

            print("CWD:  {}".format(os.getcwd()))
            print("File: {}".format(fname))
            data = hcam.hlog.Hlog.read(fname)
            if data == {}:
                data = hcam.hlog.Hlog.rulog(fname)
            if data == {}:
                data = hcam.hlog.Hlog.rfits(fname)
            if data == {}:
                raise Exception("Could not properly read in log file, {}".format(fname))

            printer("  Read the data file!")

            # Get the apertures of this data set
            aps = data.apnames
            CCDs = [str(i) for i in aps]
            CCDs = sorted(CCDs)
            if CCDs == []:
                printer("ERROR! No data in the file!")
            printer("  The observations have the following CCDs: {}".format([int(ccd) for ccd in CCDs]))

            printer("  Am I flux calibrating the data? {}".format(not no_calibration))
            if no_calibration:
                printer("\n!!! Not doing flux calibration! Setting reference magnitudes to correspond to a flux=1\n\n")

                # Reference stars is a dict, keyed by the CCD string
                reference_stars = {}
                for CCD in CCDs:
                    mags = []

                    # The comparison star fluxes get added together, so each should have an even share of 1 mJy.
                    individual_flx = 1.0 / len(aps[CCD][1:])
                    for ap in aps[CCD][1:]:
                        mags.append(sdss_flux2mag(individual_flx))
                    reference_stars[CCD] = np.array(mags)

                printer("'Unit' Reference stars have a magnitude of {:.2f}\n".format(sdss_flux2mag(1.0)))

            elif SDSS:
                printer("  Looking up SDSS magnitudes from the database")
                comparison_coord_files = comp_mags[fname]
                reference_stars = construct_reference(comparison_coord_files)

            else:
                reference_stars = {}
                comparisons = comp_mags[fname]
                bands = list(comparisons.keys())

                printer("  For each of these CCDs, I've been given comparison stars of the following magnitudes:")
                for i, (b, comps) in enumerate(comparisons.items()):
                    # I need to capture 'none' strings here, and store them as np.nans.
                    # Later, when I construct the comparison star, these apertures must be ignored!

                    comparison_list = []
                    for comp in comps:
                        try:
                            # Can I float? (we all float down here, georgie...)
                            comp = float(comp)
                            comparison_list.append(comp)
                        except:
                            # If I can't, Ignore me.
                            comparison_list.append(np.nan)
                    # Who doesnt love vectorised calculations?
                    reference_stars[str(i+1)] = np.array(comparison_list)

                printer("  My comparison stars have the following apparent mags:")
                for b, mags in reference_stars.items():
                    printer("    - CCD{}, mags: {}".format(b, mags))
            printer("\n\n")

            for a in ax:
                    a.clear()
                    a.set_ylabel('Flux, mJy')
            ax[-1].set_xlabel('Phase, days')
            ax[0].set_title('Waiting for data...')

            # Loop through the CCDs.
            # # For each CCD, grab the target lightcurve, and the comparisons
            for CCD in CCDs:
                lightcurve_metadata = '# This is data from the file {} CCD {}\n'.format(fname, CCD)

                CCD_int = int(CCD) - 1
                printer("-> CCD {}".format(CCD))

                # Plot the comparison we construct
                compAx[CCD_int].clear()
                compAx[CCD_int].set_title("CCD {}, comparison star".format(CCD))
                compAx[CCD_int].set_ylabel("Counts per frame")

                # Get this frame's apertures
                ap = aps[CCD]
                printer("  This CCD has the apertures: {}".format(ap))
                # Check that there is more than one aperture -- i.e. if a comparison star exists
                if len(ap) <= 1:
                    printer("I can't do relative photometry with only one aperture!")
                    printer("!!! Bad log file, '{}' !!!".format(fname))
                    raise LookupError("Not enough apertures in the log file!", fname)


                # Check for nans in the log files.
                for a in ap:
                    star = data.tseries(CCD, a)
                    if np.any(np.isnan(star.y)):
                        printer("!!! Log file cannot contain nan values! File {}".format(fname))
                        raise ValueError("Log file cannot contain nan values!", fname)


                # Get some data on the """quality""" of the observations
                metadata = '#\n# Reduction info:\n\n'
                to_proc = data[CCD]

                ap_x = [header for header in
                    to_proc.dtype.fields if "x_" in header]
                ap_y = [header for header in
                    to_proc.dtype.fields if "y_" in header]
                ap_fwhm = [header for header in
                    to_proc.dtype.fields if "fwhm_" in header]

                for x_label, y_label, fwhm_label in zip(ap_x, ap_y, ap_fwhm):
                    x_pix_loc = to_proc[x_label].mean()
                    y_pix_loc = to_proc[y_label].mean()
                    fwhm_pix_loc = to_proc[fwhm_label].mean()

                    aperture_number =x_label.replace("x_", "")

                    metadata += '#   Aperture {}\n'.format(aperture_number)
                    metadata += "#     x location: {:.0f}\n".format(x_pix_loc)
                    metadata += "#     y location: {:.0f}\n".format(y_pix_loc)
                    metadata += "#     fwhm:       {:.2f}\n#\n#\n".format(fwhm_pix_loc)
                lightcurve_metadata += metadata


                # Grab the target data
                target = data.tseries(CCD, '1')


                # mags is a list of the relevant comparison star magnitudes.
                # For non-SDSS fields, this is the clipped mean magnitude of each object.
                mags = reference_stars[CCD]
                fluxs = sdss_mag2flux(mags)
                sumFlux = np.nansum(fluxs)
                sumMag = sdss_flux2mag(sumFlux)

                printer("  Comparison star mags: {}".format(mags))
                if no_calibration:
                    lightcurve_metadata += "# No flux calibration being done!!\n"
                    lightcurve_metadata += "# simulated a dummy comparison magnitude of 1.00 mJy\n"
                else:
                    lightcurve_metadata += "# Comparison star mags: {}\n".format(mags)


                # Add up the reference star fluxes
                N_comparisons = 0
                comparison = "Dummy initialiser ( ͡° ͜ʖ ͡°)"
                count_ratios = []
                for a, mag in zip(ap[1:], mags):
                    if np.isnan(mag):
                        printer("  The reference star in ap {} is being ignored!".format(a))
                    else:
                        N_comparisons += 1
                        new_comparison = data.tseries(CCD, a)
                        r = sdss_mag2flux(mag) / new_comparison.y.mean()
                        r_err = sdss_mag2flux(mag) / new_comparison.y.std()
                        try:
                            comparison = comparison + new_comparison
                            printer("  The reference star now includes data from aperture {}".format(a))
                        except:
                            comparison = data.tseries(CCD, a)
                            printer("  The comparison was initialised with aperture {}".format(a))
                        printer("    This star has mean count/frame of {:.3f}".format(new_comparison.y.mean()))
                        printer("    and has a flux/count of {:.3g} +/- {:.3g} mJy/count".format(r, r_err))

                printer("  The 'comparison star' I've construced from {} apertures now has a mean count/frame of {:.3f}".format(N_comparisons, np.mean(comparison.y)))


                # If we have SDSS stars too bright, get their mags from flux calibrating those that arent
                if SDSS and np.any(np.isinf(mags)):
                    printer("\n\n  I have some comparisons that saturated SDSS! Inferring their magnitudes from fainter stars.")

                    lightcurve_metadata += "# Some comparison stars saturated the SDSS image.\n# Their magnitudes were inferred from fainter stars\n"

                    printer("  Collecting fainter stars...")
                    calibComp = None
                    for mag, a in zip(mags, ap[1:]):
                        if np.inf(mag):
                            printer("    Skipping aperture {}, as it is nan".format(a))
                        else:
                            if calibComp is None:
                                calibComp = data.tseries(CCD, a)
                            else:
                                calibComp += data.tseries(CCD, a)

                    if calibComp is None:
                        raise Exception("All comparison stars saturated SDSS! Pick at least one that doesn't!")
                    calibComp_counts = np.mean(calibComp.y)

                    printer("  My non-saturated SDSS stars have a mean count/frame of {:.3f}".format(calibComp_counts))
                    lightcurve_metadata += "# My non-saturated SDSS stars have a mean count/frame of {:.3f}\n".format(calibComp_counts)

                    printer("  My fluxes are {}".format([f for f in fluxs if not np.inf(f)]))
                    printer("    with a sum flux of {:.3f} mJy".format(sumFlux))
                    printer("     and a sum mag of  {:.3f} mag".format(sumMag))

                    lightcurve_metadata += "# My fluxes are {}\n".format([f for f in fluxs if not np.inf(f)])
                    lightcurve_metadata += "#   with a sum flux of {:.3f} mJy\n".format(sumFlux)
                    lightcurve_metadata += "#    and a sum mag of  {:.3f} mag\n".format(sumMag)

                    for i, (mag, a) in enumerate(zip(mags, ap[1:])):
                        if np.isinf(mag):
                            cnts = data.tseries(CCD, a)
                            meanCnts = np.mean(cnts.y)

                            if np.any(np.isnan(cnts.y)):
                                meanCnts = np.nanmean(cnts.y)

                                printer("The file {} has nan counts! That's weird, and you should fix that.".format(fname))
                                printer("I'll continue ignoring the nan, BUT FIX IT!")

                                lightcurve_metadata += "# The file {} has nan counts! That's wierd, and you should fix that.\n".format(fname)
                                lightcurve_metadata += "# I'll continue ignoring the nan, BUT FIX IT!\n"


                            # Calibrated against known SDSS stars. Observed through the same air column since they're the same frame, so no ext. corr.
                            mag = sumMag - 2.5*np.log10(meanCnts/calibComp_counts)
                            mags[i] = mag

                            printer("    Star {} had no SDSS magnitude. Computed a magnitude of {:.3f} from an e- flux of {}".format(a, mag, meanCnts))
                            lightcurve_metadata += "# Star {} had no SDSS magnitude. Computed a magnitude of {:.3f} from an e- flux of {}\n".format(a, mag, meanCnts)

                    printer("\n")

                # # # # # # # # # # # # # # # # # # # #
                # # Conversion of target lightcurve # #
                # # # # # # # # # # # # # # # # # # # #

                # Get the non-saturated fluxes
                fluxs = sdss_mag2flux(mags)
                comparison_flux = np.nansum(fluxs)

                ratio = target / comparison # counts / counts - ratio between target and comp

                printer("\n\n  Correcting data to BMJD time...")
                ratio = tcorrect(ratio, star_loc, obsname)

                # If we're the first CCD, figure out what eclipse cycle we are
                if CCD == '1':
                    meantime = np.mean(ratio.t)
                    E = calc_E(meantime, T0, period)
                    printer("  The mean time of this eclipse is {:.3f}.".format(meantime))
                    printer("  From ephemeris data, I get an eclipse Number,")
                    printer("    E = ({:.3f} - [T0={:.3f}]) / [P={:.5f}]".format(meantime, T0, period))
                    printer("    E = {:.3f}".format(E))

                    E = np.rint(E)
                    # The above can be off, if the eclipse isnt the minimum. in/decriment until it's within bounds
                    while T0 + E*period < ratio.t[0]:
                        printer("    !!! Eclipse time not within these data! Incrimenting E...")
                        E += 1
                    while T0 + E*period > ratio.t[-1]:
                        printer("    !!! Eclipse time not within these data! Decrimenting E...")
                        E -= 1

                    printer("  I think that the eclipse spanning from {:.3f} to {:.3f} is cycle number {}".format(
                        ratio.t[0], ratio.t[-1], E)
                    )

                    eclTime = T0 + E*period
                    printer("  The eclipse is then at time {:.3f}".format(eclTime))
                    printer("")

                # slice out the data between phase -0.5 and 0.5
                printer("  Slicing out data between phase {} and {}".format(lower_phase, upper_phase))
                slice_time = (ratio.t - eclTime) / period
                slice_args = (slice_time < upper_phase)  *  (slice_time > lower_phase)

                ratio = hcam.hlog.Tseries(
                    slice_time[slice_args],
                    ratio.y[slice_args],
                    ratio.ye[slice_args],
                    ratio.mask[slice_args]
                    )

                # Bad data has error = -1
                mask = np.where(ratio.ye != -1)
                ratio = ratio[mask]

                meanRatio = np.mean(ratio.y)

                printer("  I sliced out {} data from the lightcurve about the eclipse.".format(len(ratio.t)))

                # Save the ratio for later
                ADU_lightcurves[fname].append(copy.deepcopy(ratio))

                # Convert the ratio from ADU to mJy
                printer("  Multiplying the target count flux / comparison count flux by the comparison flux, {:.3f}".format(comparison_flux))
                ratio = ratio * comparison_flux # Scale back up to actual flux.

                # Filter out flags I don't care about.
                ratio.mask = ratio.mask & (~ FLAG)

                #######################################################################################################
                ############ IGNORE ME I'M BORING AND HARD TO READ. WHY READ ANYTHING HARD? JUST TRUST ME. ############
                #######################################################################################################

                #  Reporting  #

                lightcurve_metadata += "# I calculated an eclipse time of {} BMJD, and phase-folded around that\n".format(eclTime)
                lightcurve_metadata += "# with a T0 of {}, and a period of {}, making this eclipse {}.\n".format(T0, period, E)
                lightcurve_metadata += "# I also sliced out the phase {} -> {}!!\n#\n#\n".format(lower_phase, upper_phase)

                printer("  Comparison star apparent SDSS magnitudes:")
                lightcurve_metadata += "# Comparison star apparent SDSS magnitudes:\n"

                for m, mag in enumerate(mags):
                    printer("    Star {} -> {:.3f} mag".format(m, mag))
                    lightcurve_metadata += "#   Star {} -> {:.3f} mag\n".format(m, mag)
                printer("")
                lightcurve_metadata += "#\n#\n#\n"

                printer("  Apparent fluxes of the comparison stars:")
                lightcurve_metadata += "# Apparent fluxes of the comparison stars:\n"

                for i, flux in enumerate(fluxs):
                    printer("    Star {} -> {:.3f} mJy".format(i, flux))
                    lightcurve_metadata += "#   Star {} -> {:.3f} mJy\n".format(i, flux)
                lightcurve_metadata += "#\n"

                printer('  Sum apparent Flux: {:.3f} mJy\n'.format(comparison_flux))

                printer("  Instrumental counts, summed per mean frame ({} frames) of {} comparison stars: {:.1f}".format(
                    len(comparison.y), N_comparisons, np.mean(comparison.y)
                ))

                printer("  Instrumental counts per mean frame ({} frames) of target: {:.1f}".format(
                    len(target.y), np.mean(target.y)
                ))
                printer("  Mean Target/comparison count ratio: {:.3f}".format(meanRatio))
                printer("  Mean target magnitude: {:.3f}".format(sdss_flux2mag(meanRatio * comparison_flux)))

                lightcurve_metadata += '# Sum apparent Flux: {:.3f} mJy\n#\n#\n'.format(comparison_flux)
                lightcurve_metadata += "# Instrumental summed counts per mean frame ({} frames) of {} comparison stars: {:.1f}\n".format(len(comparison.y), len(ap[1:]), np.mean(comparison.y))
                lightcurve_metadata += "# Instrumental counts per mean frame ({} frames) of target: {:.1f}\n#\n".format(len(target.y), np.mean(target.y))
                lightcurve_metadata += "# Mean Target/comparison count ratio: {:.3f}\n".format(meanRatio)
                lightcurve_metadata += "# Mean target magnitude: {:.3f}\n".format(sdss_flux2mag(meanRatio * comparison_flux))

                # Plotting management
                ax[CCD_int].clear()
                if CCD_int == 0:
                    title = os.path.split(fname)[1]

                    ax[0].set_title(title)
                    compAx[0].set_title("{}\nCCD {}, comparison stars, normalised.".format(title, CCD))

                ax[CCD_int].set_ylabel('Flux, mJy')

                # Plot the ratio
                ratio.mplot(ax[CCD_int], colour=c[CCD_int])

                # Scale the right side labels
                twinAx[CCD_int].set_ylim( ax[CCD_int].get_ylim() / comparison_flux )
                # Draw
                fig.canvas.draw_idle()

                compMin =  9e99
                compMax = -9e99
                if len(ap) == 2:
                    # # Plot the mean count flux on the figure -- only used when single aperture, as not as useful as ratios
                    compAx[CCD_int].errorbar(comparison.t, comparison.y, yerr=comparison.ye,
                        label='Mean', color='black', linestyle='', marker='o', capsize=0)
                    try:
                        mean, _, _ = sigma_clipped_stats(comparison.y, maxiters=2, sigma=3)
                    except:
                        mean, _, _ = sigma_clipped_stats(comparison.y, iters=2, sigma=3)
                    compAx[CCD_int].axhline(mean, linestyle='--', color='black')
                    compMin = np.min(comparison.y)
                    compMax = np.max(comparison.y)
                else:
                    # Plot each combination of comparison star ratios, i.e. for 3 comparisons: 2/3, 2/4, 3/4
                    j = 0
                    for i, a in enumerate(ap[1:-1]):
                        first = data.tseries(CCD, a)
                        for b in ap[i+2:]:
                            printer("  -> Plotting ap {}/{}".format(a, b))
                            toPlot = first / data.tseries(CCD, b)

                            # Filter out flags I don't care about.
                            toPlot.mask = toPlot.mask & (~ FLAG)

                            # Apply the mask to the data
                            if np.any(toPlot.mask):
                                mask = np.where(toPlot.mask == 0)
                                printer("  -> {} masked data!".format(np.sum(toPlot.mask != 0)))
                                printer("\nMasked data:")
                                printer(toPlot.mask)
                                printer("\n\n")
                                printer("Flags:")
                                printer(hcam.FLAGS)

                                if np.all(toPlot.mask != 0):
                                    print("ALL DATA ARE MASKED! Stopping...")
                                    exit()

                                toPlot.t  = toPlot.t[mask]
                                toPlot.y  = toPlot.y[mask]
                                toPlot.ye = toPlot.ye[mask]
                                toPlot.mask = toPlot.mask[mask]

                            toPlot.y = toPlot.y / np.mean(toPlot.y)
                            toPlot.y = toPlot.y + (j / 5)
                            j += 1

                            # Get min and max axis limits
                            if np.max(toPlot.y) > compMax:
                                compMax = np.max(toPlot.y)
                            if np.min(toPlot.y) < compMin:
                                compMin = np.min(toPlot.y)

                            # Fit a straight line to the data. Deviations indicate bad comparisons
                            A,B = curve_fit(straight_line, toPlot.t, toPlot.y)[0]
                            fit_X = np.linspace(toPlot.t[0], toPlot.t[-1], 3)
                            fit_Y = straight_line(fit_X, A, B)

                            # iters is depreciated. Try the new version, if that fails do the old version. yay, flexibility!
                            try:
                                mean, _, _ = sigma_clipped_stats(toPlot.y, maxiters=2, sigma=3)
                            except:
                                mean, _, _ = sigma_clipped_stats(toPlot.y, iters=2, sigma=3)
                            compAx[CCD_int].axhline(mean, linestyle='--', color='black')
                            compAx[CCD_int].plot(fit_X, fit_Y, color='red', linestyle=':')
                            compAx[CCD_int].scatter(toPlot.t, toPlot.y,
                                s=10,
                                label="Aperture {}/{} - grad: {:.2f}".format(a, b, A),
                                alpha=0.6
                            )
                            compFig.canvas.draw_idle()

                # Add in legend artist
                compAx[CCD_int].legend()

                pad = 0.05 * (compMax - compMin)
                compAx[CCD_int].set_ylim([compMin-pad, compMax+pad])
                compAx[CCD_int].set_xlim([comparison.t[0], comparison.t[-1]])

                # File handling stuff
                b = bands[CCD_int]
                while b == '???':
                    b = input("What band are these data?: ")
                    if b == '':
                        print("PLEASE ENTER A BAND NAME for:\n{}\n".format(fname))
                        b = "???"

                date = time.Time(eclTime, format='mjd')
                date = date.strftime("%Y-%m-%d@%Hh%Mm")

                filename = oname
                filename = "{}_{}_{}.calib".format(filename, date, b)

                filename = os.path.join(lc_dir, filename)

                # Saving data
                printer("  These data have {} masked points.".format(np.sum(ratio.mask != 0)))
                if np.sum(ratio.mask != 0):
                    printer("\n\n{}\n\n".format(ratio.mask))
                with open(filename, 'w') as f:
                    f.write(lightcurve_metadata)
                    f.write("# Phase, Flux, Err_Flux\n")
                    for t, y, ye, mask in zip(ratio.t, ratio.y, ratio.ye, ratio.mask):
                        if not mask:
                            f.write("{} {} {}\n".format(t, y, ye))

                written_files.append(filename)
                printer("  Wrote data to {}".format(filename))
                printer("  Finished CCD {}\n".format(CCD))

            ax[-1].set_xlabel('Phase, days')

            x_range = [min(ratio.t), max(ratio.t)]
            ax[0].set_xlim(x_range)

            x_range = [min(comparison.t), max(comparison.t)]
            compAx[0].set_xlim(x_range)

            plt.tight_layout()
            plt.subplots_adjust(hspace=0.0)

            fig.canvas.draw_idle()
            compFig.canvas.draw_idle()

            input("\n  Hit enter for next file\r")
            print()
            pdf.savefig(fig)
            pdf.savefig(compFig)
        plt.close(compFig)

        # Plot the ADU lightcurves for the user.
        for a in ax:
                a.clear()
                a.set_ylabel('Flux, ADU')
        ax[-1].set_xlabel('Phase, days')
        ax[0].set_title('Waiting for data...')

        for fname, lightcurves in ADU_lightcurves.items():
            for i, tseries in enumerate(lightcurves):
                print("{} // CCD {}".format(fname, i))
                flx = tseries.y
                phase = tseries.t

                ax[i].step(phase, flx, label=fname)

        for a in ax:
            a.legend()
        ax[0].set_title("ADU Lightcurves of all files")
        plt.tight_layout()
        fig.canvas.draw_idle()
        ADU_name = os.path.join(figs_dir, oname+'_ADU_lightcurves.pdf')
        fig.savefig(ADU_name)

        input("Hit enter to continue... ")

    printer("  ")
    printer("  Saved the plots to {}".format(pdfname))

    plt.close('all')
    plt.ioff()

    return written_files