コード例 #1
0
ファイル: stars.py プロジェクト: oknuutti/visnav-py
    def import_stars_hip():
        # I/239/hip_main
        Stars._create_stardb(Stars.STARDB_HIP)
        conn = sqlite3.connect(Stars.STARDB_HIP)
        cursor = conn.cursor()

        from astroquery.vizier import Vizier
        Vizier.ROW_LIMIT = -1

        cols = ["HIP", "HD", "_RA.icrs", "_DE.icrs", "Vmag", "B-V"]
        r = Vizier(catalog="I/239/hip_main", columns=cols,
                   row_limit=-1).query_constraints()[0]

        for i, row in enumerate(r):
            hip, hd, ra, dec, mag_v, b_v = [row[f] for f in cols]
            if np.any(list(map(np.ma.is_masked, (ra, dec, mag_v)))):
                continue
            hd = 'null' if np.ma.is_masked(hd) else hd
            mag_b = 'null' if np.ma.is_masked(b_v) or np.isnan(
                b_v) else b_v + mag_v
            x, y, z = tools.spherical2cartesian(math.radians(dec),
                                                math.radians(ra), 1)
            cursor.execute("""
                INSERT INTO deep_sky_objects (hip, hd, ra, dec, x, y, z, mag_v, mag_b)
                VALUES (%s, %s, %f, %f, %f, %f, %f, %f, %s)""" %
                           (hip, hd, ra, dec, x, y, z, mag_v, mag_b))
            if i % 100 == 0:
                conn.commit()
                tools.show_progress(len(r), i)

        conn.commit()
        conn.close()
コード例 #2
0
ファイル: stars.py プロジェクト: oknuutti/visnav-py
    def add_simbad_col():
        conn = sqlite3.connect(Stars.STARDB)
        cursor_r = conn.cursor()
        cursor_w = conn.cursor()

        # cursor_w.execute("alter table deep_sky_objects add column simbad char(20) default null")
        # conn.commit()

        N_tot = cursor_r.execute(
            "SELECT max(id) FROM deep_sky_objects WHERE 1").fetchone()[0]

        skip = 0
        result = cursor_r.execute(
            "select id, hip from deep_sky_objects where id >= %d" % skip)

        import time
        from astroquery.simbad import Simbad
        Simbad.add_votable_fields('typed_id')
        while 1:
            rows = result.fetchmany(1000)
            if rows is None or len(rows) == 0:
                break
            tools.show_progress(N_tot, rows[0][0] - 1)

            s = Simbad.query_objects(['HIP %d' % int(row[1]) for row in rows])
            time.sleep(2)

            values = []
            if s is not None:
                s.add_index('TYPED_ID')
                for row in rows:
                    sr = get(s, ('HIP %d' % int(row[1])).encode('utf-8'))
                    if sr is not None:
                        k = sr['MAIN_ID'].decode('utf-8')
                        values.append("(%d, '%s', 0,0,0,0,0,0)" %
                                      (row[0], k.replace("'", "''")))
            if len(values) > 0:
                cursor_w.execute("""
                    INSERT INTO deep_sky_objects (id, simbad, ra, dec, x, y, z, mag_v) VALUES """
                                 + ','.join(values) + """
                    ON CONFLICT(id) DO UPDATE SET simbad = excluded.simbad""")
                conn.commit()
        conn.close()
コード例 #3
0
ファイル: analyze-log-v2.py プロジェクト: oknuutti/visnav-py
                                len(predictor_idxs),
                                figsize=(8, 6))
        for r, axr in enumerate(axs):
            axr[0].get_shared_y_axes().join(*axs[r])
        for c, axc in enumerate(axs[0]):
            axc.get_shared_x_axes().join(*[axr[c] for axr in axs])

        count = len(predictor_idxs) * (len(targets) +
                                       1) * len(image_types) * len(algos)
        i = 0

        old_itype = None
        for (pj, pi), (ti, target), itype, algo in itertools.product(
                enumerate(predictor_idxs), enumerate(('fails', ) + targets),
                image_types, algos):
            tools.show_progress(count, i)
            ti -= 1
            i += 1
            try:
                X, Y, yc, labels = data[itype][algo]
            except TypeError:
                assert False, 'No log file found for: %s %s %s %s %s' % (
                    mission, postfix, itype, algo, sm_quality)

            # remove difficult samples on other dimensions so that effect of plotted dimension more clear
            I0 = tuple((X[:, k] >= EASY_LIMITS[itype][k][0],
                        X[:, k] <= EASY_LIMITS[itype][k][1])
                       for k in predictor_idxs if k != pi)
            I = np.logical_and.reduce(sum(I0, ()))
            if ti >= 0:
                if False:
コード例 #4
0
def export(sm, dst_path, src_path=None, src_imgs=None, trg_shape=(224, 224), crop=False, debug=False,
           img_prefix="", title=""):

    trg_w, trg_h = trg_shape
    assert (src_path is not None) + (src_imgs is not None) == 1, 'give either src_path or src_imgs, not both'

    if debug:
        renderer = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=0)
        obj_idx = renderer.load_object(sm.asteroid.target_model_file,
                                       smooth=sm.asteroid.render_smooth_faces)
        algo = AlgorithmBase(sm, renderer, obj_idx)

    metadatafile = os.path.join(dst_path, 'dataset_all.txt')
    if not os.path.exists(metadatafile):
        with open(metadatafile, 'w') as f:
            f.write('\n'.join(['%s, camera centric coordinate frame used' % title,
                               'Image ID, ImageFile, Target Pose [X Y Z W P Q R], Sun Vector [X Y Z]', '', '']))

    files = list(os.listdir(src_path)) if src_imgs is None else src_imgs
    files = sorted(files)

    id = 0
    for i, fn in enumerate(files):
        if src_imgs is not None or re.search(r'(?<!far_)\d{4}\.png$', fn):
            c = 2 if src_imgs is None else 1
            tools.show_progress(len(files)//c, i//c)
            id += 1

            # read system state, write out as relative to s/c
            fname = os.path.basename(fn)
            if src_imgs is None:
                fn = os.path.join(src_path, fn)
            lbl_fn = re.sub(r'_%s(\d{4})' % img_prefix, r'_\1', fn[:-4]) + '.lbl'

            sm.load_state(lbl_fn)
            sm.swap_values_with_real_vals()

            if not crop:
                shutil.copy2(fn, os.path.join(dst_path, fname))
                if os.path.exists(fn[:-4] + '.d.exr'):
                    shutil.copy2(fn[:-4] + '.d.exr', os.path.join(dst_path, fname[:-4] + '.d.exr'))
                if os.path.exists(fn[:-4] + '.xyz.exr'):
                    shutil.copy2(fn[:-4] + '.xyz.exr', os.path.join(dst_path, fname[:-4] + '.xyz.exr'))
                if os.path.exists(fn[:-4] + '.s.exr'):
                    shutil.copy2(fn[:-4] + '.s.exr', os.path.join(dst_path, fname[:-4] + '.s.exr'))
                _write_metadata(metadatafile, id, fname, sm.get_system_scf())
                continue

            from visnav.algo.absnet import AbsoluteNavigationNN

            # read image, detect box, resize, adjust relative pose
            img = cv2.imread(fn, cv2.IMREAD_GRAYSCALE)
            assert img is not None, 'image file %s not found' % fn

            # detect target, get bounds
            x, y, w, h = ImageProc.single_object_bounds(img, threshold=AbsoluteNavigationNN.DEF_LUMINOSITY_THRESHOLD,
                                                        crop_marg=AbsoluteNavigationNN.DEF_CROP_MARGIN,
                                                        min_px=AbsoluteNavigationNN.DEF_MIN_PIXELS, debug=debug)
            if x is None:
                continue

            # write image metadata
            system_scf = sm.get_cropped_system_scf(x, y, w, h)
            _write_metadata(metadatafile, id, fname, system_scf)

            others, (depth, coords, px_size), k = [], [False] * 3, 1
            if os.path.exists(fn[:-4] + '.d.exr'):
                depth = True
                others.append(cv2.imread(fn[:-4] + '.d.exr', cv2.IMREAD_UNCHANGED))
            if os.path.exists(fn[:-4] + '.xyz.exr'):
                coords = True
                others.append(cv2.imread(fn[:-4] + '.xyz.exr', cv2.IMREAD_UNCHANGED))
            if os.path.exists(fn[:-4] + '.s.exr'):
                px_size = True
                others.append(cv2.imread(fn[:-4] + '.s.exr', cv2.IMREAD_UNCHANGED))

            # crop & resize image, write it
            cropped = ImageProc.crop_and_zoom_image(img, x, y, w, h, None, (trg_w, trg_h), others=others)

            cv2.imwrite(os.path.join(dst_path, fname), cropped[0], [cv2.IMWRITE_PNG_COMPRESSION, 9])
            if depth:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.d.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))
                k += 1
            if coords:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.xyz.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))
                k += 1
            if px_size:
                cv2.imwrite(os.path.join(dst_path, fname[:-4] + '.s.exr'), cropped[k],
                            (cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT))

            if debug:
                sc, dq = sm.cropped_system_tf(x, y, w, h)

                sm.spacecraft_pos = tools.q_times_v(SystemModel.sc2gl_q.conj(), sc_ast_lf_r)
                sm.rotate_spacecraft(dq)
                #sm.set_cropped_system_scf(x, y, w, h, sc_ast_lf_r, sc_ast_lf_q)

                if False:
                    sm.load_state(lbl_fn)
                    sm.swap_values_with_real_vals()
                    imgd = cv2.resize(img, (trg_h, trg_w))

                imge = algo.render(center=False, depth=False, shadows=True)
                h, w = imge.shape
                imge = cv2.resize(imge[:, (w - h)//2:(w - h)//2+h], cropped[0].shape)
                cv2.imshow('equal?', np.hstack((
                    cropped[0],
                    np.ones((cropped[0].shape[0], 1), dtype=cropped[0].dtype) * 255,
                    imge,
                )))
                cv2.waitKey()

                if i > 60:
                    quit()
コード例 #5
0
ファイル: calibrate.py プロジェクト: oknuutti/visnav-py
    def optimize(self, measures):
        opt_method = self.method
        cams = measures[0].frame.cam
        cn = len(cams)
        qn = [len(cams[i].qeff_coefs) for i in range(cn)]
        fn = {
            FRAME_GAIN_NONE: 0,
            FRAME_GAIN_SAME: 1,
            FRAME_GAIN_STATIC: 0,
            FRAME_GAIN_INDIVIDUAL: np.max([m.frame.id for m in measures]) + 1,
        }[FRAME_GAINS]
        gn = 1 if GENERAL_GAIN_ADJUSTMENT is not False else 0

        STAR_SATURATION_MODELING = StarFrame.STAR_SATURATION_MODELING != StarFrame.STAR_SATURATION_MODEL_IDEAL

        f_gains = np.ones(fn)
        for m in measures:
            if FRAME_GAINS == FRAME_GAIN_SAME:
                f_gains[0] = STAR_GAIN_ADJUSTMENT
            elif FRAME_GAINS == FRAME_GAIN_INDIVIDUAL:
                if m.obj_id[0] == 'moon':
                    f_gains[m.frame.id] = MOON_GAIN_ADJ
                else:
                    f_gains[m.frame.id] = STAR_GAIN_ADJUSTMENT if m.frame.cam[
                        0].emp_coef >= 1 else STAR_GAIN_ADJUSTMENT_TN

        def encode(cams, f_gains, gain_adj, psf_coef):
            # if len(psf_coef) == 3 and not StarFrame.STAR_SATURATION_MULTI_KERNEL:
            #     psf_coef = list(psf_coef)
            #     psf_coef[2] = np.log(psf_coef[1]/psf_coef[2])
            #     psf_coef[1] = np.log(psf_coef[0]/psf_coef[1])

            # parameterize cam spectral responsivity, frame specific exposure correction
            return (*[qec for c in cams for qec in c.qeff_coefs], *f_gains,
                    *((gain_adj, ) if gn else tuple()), *psf_coef)

        def decode(xr):
            x = np.abs(xr)
            off1 = len(STAR_PSF_SDS)
            off0 = off1 + (1 if gn else 0)

            if not FIXED_PSF_SDS:
                psf_coef = list(x[-off1:] if STAR_SATURATION_MODELING else (1,
                                                                            0,
                                                                            0))

            # if len(STAR_PSF_SDS) == 3 and not StarFrame.STAR_SATURATION_MULTI_KERNEL:
            #     psf_coef[1] = psf_coef[0] * np.exp(-psf_coef[1])
            #     psf_coef[2] = psf_coef[1] * np.exp(-psf_coef[2])

            k = 0
            qeff_coefss = []
            for i in range(cn):
                qeff_coefss.append(x[k:k + qn[i]])
                k += qn[i]

            return (qeff_coefss, x[k:len(x) - off0], (x[-off0] if gn else 1),
                    psf_coef)

        def cost_fun(x, measures, prior_x, return_details=False, plot=False):
            c_qeff_coefs, f_gains, gain_adj, psf_coef = decode(x)

            band = []
            obj_ids = []
            measured_du = []
            expected_du = []
            weights = []
            for m in measures:
                if FRAME_GAINS == FRAME_GAIN_SAME:
                    pre_sat_gain = f_gains[0]
                elif FRAME_GAINS == FRAME_GAIN_INDIVIDUAL:
                    pre_sat_gain = f_gains[m.frame.id]
                elif FRAME_GAINS == FRAME_GAIN_STATIC:
                    if m.obj_id[0] == 'moon':
                        pre_sat_gain = MOON_GAIN_ADJ
                    else:
                        pre_sat_gain = STAR_GAIN_ADJUSTMENT if m.frame.cam[
                            0].emp_coef >= 1 else STAR_GAIN_ADJUSTMENT_TN
                else:
                    pre_sat_gain = 1

                edu = m.expected_du(pre_sat_gain=pre_sat_gain,
                                    post_sat_gain=gain_adj,
                                    qeff_coefs=c_qeff_coefs,
                                    psf_coef=psf_coef)

                if return_details or (m.obj_id[0],
                                      m.cam_i) not in IGNORE_MEASURES:
                    expected_du.append(edu)
                    measured_du.append(m.du_count)
                    weights.append(m.weight)
                    band.append(m.cam_i)
                    obj_ids.append(m.obj_id)

            measured_du, expected_du, band = map(
                np.array, (measured_du, expected_du, band))

            if plot:
                plt.rcParams.update({'font.size': 16})
                fig, ax = plt.subplots(1, 1, figsize=[6.4, 4.8])
                sb, = ax.plot(expected_du[band == 0] * 1e-3,
                              measured_du[band == 0] * 1e-3, 'bx')
                sg, = ax.plot(expected_du[band == 1] * 1e-3,
                              measured_du[band == 1] * 1e-3, 'gx')
                sr, = ax.plot(expected_du[band == 2] * 1e-3,
                              measured_du[band == 2] * 1e-3, 'rx')
                line = np.linspace(0, np.max(expected_du))
                ax.plot(line * 1e-3, line * 1e-3, 'k--', linewidth=0.5)
                ax.set_xlabel('Expected [1000 DNs]')
                ax.set_ylabel('Measured [1000 DNs]')
                names = Stars.get_catalog_id(
                    np.unique(list(s[0] for s in obj_ids if s[0] != 'moon')),
                    'simbad')
                names['moon'] = 'Moon'
                labels = np.array([names[id[0]] for id in obj_ids])
                tools.hover_annotate(fig, ax, sb, labels[band == 0])
                tools.hover_annotate(fig, ax, sg, labels[band == 1])
                tools.hover_annotate(fig, ax, sr, labels[band == 2])
                plt.tight_layout()
                plt.show()

            _, _, gain_adj0, _ = decode(prior_x)
            #            err = tuple(tools.pseudo_huber_loss(STAR_CALIB_HUBER_COEF, (measured_du - expected_du) * 2 / (expected_du + measured_du)) * np.array(weights))
            err = tuple(
                tools.pseudo_huber_loss(
                    STAR_CALIB_HUBER_COEF,
                    np.log10(expected_du) - np.log10(measured_du)) *
                np.array(weights))

            n = 3 * len(c_qeff_coefs[0])

            lab_dp = tuple()
            if STAR_LAB_DATAPOINT_WEIGHT > 0:
                c, lam = m.frame.cam, 557.7e-9
                g = Camera.sample_qeff(c_qeff_coefs[1], c[1].lambda_min,
                                       c[1].lambda_max, lam)
                eps = 1e-10
                r_g = (Camera.sample_qeff(c_qeff_coefs[2], c[2].lambda_min,
                                          c[2].lambda_max, lam) + eps) / (g +
                                                                          eps)
                b_g = (Camera.sample_qeff(c_qeff_coefs[0], c[0].lambda_min,
                                          c[0].lambda_max, lam) + eps) / (g +
                                                                          eps)
                lab_dp = tuple(STAR_LAB_DATAPOINT_WEIGHT * (np.log10(r_g) - np.log10(np.array((0.26, 0.25, 0.24, 0.24))))**2) \
                        +tuple(STAR_LAB_DATAPOINT_WEIGHT * (np.log10(b_g) - np.log10(np.array((0.23, 0.24, 0.21, 0.22))))**2)

            prior = tuple(STAR_CALIB_PRIOR_WEIGHT ** 2 * (np.array(x[:n]) - np.array(prior_x[:n])) ** 2) \
                if STAR_CALIB_PRIOR_WEIGHT > 0 else tuple()

            err_tuple = lab_dp + err + prior
            return (err_tuple, measured_du, expected_du) if return_details else \
                    err_tuple if opt_method == 'leastsq' else \
                    np.sum(err_tuple)

        if STAR_SATURATION_MODELING:
            psf_coef = STAR_PSF_COEF_TN if cams[
                0].emp_coef < 1 else STAR_PSF_SDS
        else:
            psf_coef = tuple()

        x0b = encode(cams, f_gains, GENERAL_GAIN_ADJUSTMENT, psf_coef)
        prior_x = encode(get_bgr_cam(estimated=False), f_gains,
                         GENERAL_GAIN_ADJUSTMENT, psf_coef)

        if DEBUG_MEASURES:
            cost_fun(x0b, measures, x0b, plot=True)

        timer = tools.Stopwatch()
        timer.start()
        results = [None] * OPTIMIZER_START_N
        scores = [None] * OPTIMIZER_START_N

        for i in range(OPTIMIZER_START_N):
            tools.show_progress(OPTIMIZER_START_N, i)
            x0 = tuple(
                np.array(x0b) * (1 if OPTIMIZER_START_N == 1 else
                                 np.random.lognormal(0, 0.05, len(x0b))))

            if opt_method == 'leastsq':
                res = leastsq(cost_fun,
                              x0,
                              args=(measures, prior_x),
                              full_output=True,
                              **self.params)
                x, fval = res[0], np.sum(res[2]['fvec'])
            else:
                res = minimize(cost_fun,
                               x0,
                               args=(measures, prior_x),
                               method=opt_method,
                               **self.params)
                x, fval = res.x, res.fun

            results[i] = (x, x0)
            scores[i] = fval if fval > 0 else float('inf')
        timer.stop()

        if len(scores) > 0:
            best = np.argmin(scores)
            print('\nscores: %s' % sorted(scores))
            res, x0 = results[best]
            print('\nbest prior_x: %s' % (x0, ))
            print('best x:  %s' % (res, ))
            print('time: %.1fs' % timer.elapsed)
        else:
            res = x0b

        qeff_coefs, f_gains, gain_adj, psf_sd = decode(res)
        err, measured, expected = cost_fun(res,
                                           measures,
                                           x0b,
                                           return_details=True,
                                           plot=True)
        return qeff_coefs, f_gains, gain_adj, psf_sd, err, measured, expected
コード例 #6
0
ファイル: stars.py プロジェクト: oknuutti/visnav-py
    def query_t_eff():
        from astroquery.vizier import Vizier
        v = Vizier(catalog="B/pastel/pastel",
                   columns=["ID", "Teff", "logg", "[Fe/H]"],
                   row_limit=-1)
        v2 = Vizier(catalog="J/A+A/525/A71/table2",
                    columns=["Name", "Teff", "log(g)", "[Fe/H]"],
                    row_limit=-1)
        v3 = Vizier(catalog="J/MNRAS/471/770/table2",
                    columns=["HIP", "Teff", "log(g)"],
                    row_limit=-1)

        conn = sqlite3.connect(Stars.STARDB)
        cursor_r = conn.cursor()
        cursor_w = conn.cursor()

        cond = "(t_eff is null OR log_g is null OR 1)"
        N_tot = cursor_r.execute("""
            SELECT max(id) FROM deep_sky_objects 
            WHERE %s
            """ % cond).fetchone()[0]

        skip = 37601
        f_id, f_hip, f_hd, f_sim, f_ra, f_dec, f_t, f_g, f_m, f_src = range(10)
        results = cursor_r.execute(
            """
            SELECT id, hip, hd, simbad, ra, dec, t_eff, log_g, fe_h, src
            FROM deep_sky_objects
            WHERE %s AND id >= ?
            ORDER BY id ASC
            """ % cond, (skip, ))

        r = v.query_constraints()[0]
        r.add_index('ID')

        N = 40
        while True:
            rows = results.fetchmany(N)
            if rows is None or len(rows) == 0:
                break
            tools.show_progress(N_tot, rows[0][f_id] - 1)

            ids = {
                row[f_id]: [i, row[f_src][:3] + '___']
                for i, row in enumerate(rows)
            }
            insert = {}
            for i, row in enumerate(rows):
                k = 'HIP %6d' % int(row[f_hip])
                if get(r, k) is None and row[f_hd]:
                    k = 'HD %6d' % int(row[f_hd])
                if get(r, k) is None and row[f_sim]:
                    k = row[f_sim]
                if get(r, k) is None and row[f_sim]:
                    k = row[f_sim] + ' A'
                dr = get(r, k)
                if dr is not None:
                    t_eff, log_g, fe_h = median(dr,
                                                ('Teff', 'logg', '__Fe_H_'),
                                                null='null')
                    src = row[f_src][0:3] + ''.join(
                        [('_' if v == 'null' else Stars.SOURCE_PASTEL)
                         for v in (t_eff, log_g, fe_h)])
                    insert[row[f_id]] = [t_eff, log_g, fe_h, src]
                    if '_' not in src[3:5]:
                        ids.pop(row[f_id])
                    else:
                        ids[row[f_id]][1] = src

            if len(ids) > 0:
                # try using other catalog
                r = v2.query_constraints(
                    Name='=,' + ','.join([('HD%06d' % int(rows[i][f_hd]))
                                          for i, s in ids.values()
                                          if rows[i][f_hd] is not None]))
                time.sleep(2)
                if len(r) > 0:
                    r = r[0]
                    r.add_index('Name')
                    for id, (i, src) in ids.copy().items():
                        dr = get(r, 'HD%06d' %
                                 int(rows[i][f_hd])) if rows[i][f_hd] else None
                        if dr is not None:
                            t_eff, log_g, fe_h = median(
                                dr, ('Teff', 'log_g_', '__Fe_H_'), null='null')
                            src = src[0:3] + ''.join(
                                [('_' if v == 'null' else Stars.SOURCE_WU)
                                 for v in (t_eff, log_g, fe_h)])
                            insert[id] = [t_eff, log_g, fe_h, src]
                            if '_' not in src[3:5]:
                                ids.pop(rows[i][f_id])
                            else:
                                ids[rows[i][f_id]][1] = src

            if len(ids) > 0:
                # try using other catalog
                r = v3.query_constraints(
                    HIP='=,' +
                    ','.join([str(rows[i][f_hip])
                              for i, s in ids.values()]))[0]
                r.add_index('HIP')
                for id, (i, src) in ids.copy().items():
                    dr = get(r, int(rows[i][f_hip]))
                    if dr is not None:
                        t_eff, log_g = median(dr, ('Teff', 'log_g_'),
                                              null='null')
                        src = src[0:3] + ''.join(
                            [('_' if v == 'null' else Stars.SOURCE_GAIA1)
                             for v in (t_eff, log_g)]) + src[5]
                        insert[id] = [
                            t_eff, log_g,
                            insert[id][2] if id in insert else 'null', src
                        ]
                        # if '_' not in src[3:5]:
                        #     ids.pop(rows[i][f_id])
                        # else:
                        #     ids[rows[i][f_id]][1] = src

            if len(insert) > 0:
                values = [
                    "(%d, %s, %s, %s, '%s', 0,0,0,0,0,0)" %
                    (id, t_eff, log_g, fe_h, src)
                    for id, (t_eff, log_g, fe_h, src) in insert.items()
                ]
                cursor_w.execute("""
                    INSERT INTO deep_sky_objects (id, t_eff, log_g, fe_h, src, ra, dec, x, y, z, mag_v) VALUES """
                                 + ','.join(values) + """
                    ON CONFLICT(id) DO UPDATE SET 
                        t_eff = excluded.t_eff, 
                        log_g = excluded.log_g, 
                        fe_h = excluded.fe_h,
                        src = excluded.src
                """)
                conn.commit()
        conn.close()
コード例 #7
0
def main(mission_sc=False, simple=False):
    mpl.rcParams['font.size'] = FONT_SIZE
    mpl.rcParams['lines.markersize'] = MARKER_SIZE
    mpl.rcParams['lines.linewidth'] = LINE_WIDTH

    try:
        switches = {'--video'}
        args = sys.argv[1:]
        video = '--video' in args
        if video:
            args = [a for a in args if a not in switches]
        filename = args[0]
        span = tuple(map(int, args[1].split(':'))) if len(args) > 1 else None
        is_4km = '4km' in filename
        is_vo = '-vo-' in filename
        is_nac = '-nac-' in filename or '-1n-' in filename or '-2n-' in filename
    except:
        print('USAGE: %s <logfile>' % sys.argv[0])
        quit()

    raw = []
    with open(filename, newline='') as fh:
        reader = csv.reader(fh, delimiter='\t', quotechar='"')
        for i, row in enumerate(reader):
            if i == 0 and mission_sc:
                continue
            raw.append(row)
    data = np.array(raw).astype('double')
    if span is not None:
        data = data[span[0]:span[1], :]

    if mission_sc:
        plot_mission_sc(data, format=3 if 'results' in filename else 2)
        quit()

    use_d2 = '-d2-' in filename.lower() or '-2n-' in filename.lower(
    ) or '-2w-' in filename.lower()
    id = filename.split('\\')[-1].split('-')[0]

    # time (1)
    time = data[:, 0] / 3600

    # real D1 pose (7)
    d1_loc = data[:, 1:4]
    d1_q = data[:, 4:8]

    # real D2 pose (7)          15
    d2_loc = data[:, 8:11]
    d2_q = data[:, 11:15]

    trg_loc = d2_loc if use_d2 else d1_loc
    trg_q = quaternion.as_quat_array(d2_q if use_d2 else d1_q)

    # real s/c pose (7)
    sc_loc = data[:, 15:18]
    sc_q = quaternion.as_quat_array(data[:, 18:22])

    # init s/c pose (7)         29
    isc_loc = data[:, 22:25]
    isc_q = data[:, 25:29]

    # landmark algo loc (3)
    # TODO: check that sc_q works, seems that static
    spl_loc = trg_loc - tools.q_times_mx(sc_q, data[:, 29:32])

    # landmark algo loc ok (1)  33
    spl_loc_ok = data[:, 32:33]
    spl_loc[np.logical_not(spl_loc_ok).flatten(), :] = np.nan

    # landmark algo ori (4)
    spl_q = data[:, 33:37]

    # landmark algo ori ok (1)  38
    spl_q_ok = data[:, 37:38]
    spl_q[np.logical_not(spl_q_ok).flatten(), :] = np.nan

    # laser algo loc (3)
    lsr_loc = trg_loc - tools.q_times_mx(sc_q, data[:, 38:41])

    # laser algo loc ok (1)     42
    lsr_loc_ok = data[:, 41:42]
    lsr_loc[np.logical_not(lsr_loc_ok).flatten(), :] = np.nan

    # nav filter loc (3)
    flt_loc = data[:, 42:45] if True else np.full_like(spl_loc, np.nan)

    # measurement log likelihood (1)
    meas_ll = data[:, 45:46]

    # delta-v spent (1)
    cum_delta_v = data[:, 46:47]

    # vo loc (3)
    vo_loc = trg_loc - tools.q_times_mx(sc_q, data[:, 47:50])

    # vo ori (4)
    vo_q = data[:, 50:54]

    # vo meas sds (6)
    vo_meas_sds = data[:, 54:60]

    # vo bias sds (6)
    vo_bias_sds = data[:, 60:66]

    # vo scale drift sd (1)
    vo_bias_sds = data[:, 66:67]

    # vo ok
    vo_ok = data[:, 67:68]

    # phase angle
    phase_angle = data[:, 68:69]

    # vo scale (1)
    vo_scale = data[:, 69:70]

    # cnt location (3)
    cnt_loc = trg_loc - tools.q_times_mx(
        sc_q, data[:, 70:73]) if data.shape[1] >= 73 else np.full_like(
            spl_loc, np.nan)

    # sun-sc vect
    sun_v = data[:, 73:76] if data.shape[1] >= 76 else None

    # s/c-target distance
    distance = np.linalg.norm(sc_loc - trg_loc, axis=1)

    has_spl = not np.all(np.isnan(spl_loc))
    has_lsr = not np.all(np.isnan(lsr_loc))
    has_vo = not np.all(np.isnan(vo_loc))  # and False
    has_cnt = not np.all(np.isnan(cnt_loc))
    has_flt = False

    if use_d2:
        sun_v = sun_v if sun_v is not None else {
            'id2': np.array([-0.3067, -0.9427, -0.1315]),
            'id4': np.array([-0.5252, -0.8379, -0.1485]),
            'id5': np.array([0, -1, 0]),
        }[id]
        is_d2_ecl = d2_eclipses(sun_v, d1_loc, d2_loc)
        d2_ecl = get_intervals(time, is_d2_ecl)

        is_d1_bg, is_d1_fg = d2_when_d1_in_view(sc_loc, sc_q, d1_loc, d2_loc)
        d1_bg, d1_fg = get_intervals(time,
                                     is_d1_bg), get_intervals(time, is_d1_fg)

    if not video:
        cnt_max_dist = {
            True: {  # is_nac
                True: 1300,  # - use_d2
                False: 5800,  # - not use_d2
            },
            False: {  # not is_nac
                True: 225,  # - use_d2
                False: 1050,  # - not use_d2
            },
        }[is_nac][use_d2]
        cnt_loc[phase_angle.flatten() > 100 / 180 * np.pi, :] = np.nan
        cnt_loc[distance.flatten() < cnt_max_dist, :] = np.nan
        spl_loc[phase_angle.flatten() > 135 / 180 * np.pi, :] = np.nan

    incl_for_stats = (phase_angle < 100 / 180 *
                      np.pi).flatten()  # phase angle less than 100 deg
    if use_d2:
        incl_for_stats = np.logical_and.reduce((
            incl_for_stats,
            np.logical_not(is_d1_fg),
            np.logical_not(is_d2_ecl),
        ))

    # calculate transformation to synodic frame, apply
    tr_sf = calc_transf(d1_loc, d2_loc)

    # for error calculation and plots
    tr_stf = calc_transf(sc_loc, d2_loc if use_d2 else d1_loc)
    c_lab = ('distance', 'along orbit', 'above orbit')

    if has_vo:
        vo_loc, vo_scale, vo_loc_bias, nkf_idxs, is_mm = vo_data_prep(
            vo_loc, vo_scale, vo_bias_sds, sc_loc, trg_loc)
        if False:
            # correct drifting scale
            vo_loc = (vo_loc - trg_loc) / vo_scale.reshape((-1, 1)) + trg_loc

    d1_loc_sf = apply_transf(tr_sf, d1_loc)
    d2_loc_sf = apply_transf(tr_sf, d2_loc)
    trg_loc_sf = d2_loc_sf if use_d2 else d1_loc_sf
    sc_loc_sf = apply_transf(tr_sf, sc_loc)
    isc_loc_sf = apply_transf(tr_sf, isc_loc)
    spl_loc_sf = apply_transf(tr_sf, spl_loc)
    lsr_loc_sf = apply_transf(tr_sf, lsr_loc)
    flt_loc_sf = apply_transf(tr_sf, flt_loc)
    vo_loc_sf = apply_transf(tr_sf, vo_loc)
    cnt_loc_sf = apply_transf(tr_sf, cnt_loc)

    d1_loc_stf = apply_transf(tr_stf, d1_loc)
    d2_loc_stf = apply_transf(tr_stf, d2_loc)
    trg_loc_stf = d2_loc_stf if use_d2 else d1_loc_stf
    sc_loc_stf = apply_transf(tr_stf, sc_loc)
    isc_loc_stf = apply_transf(tr_stf, isc_loc)
    spl_loc_stf = apply_transf(tr_stf, spl_loc)
    lsr_loc_stf = apply_transf(tr_stf, lsr_loc)
    flt_loc_stf = apply_transf(tr_stf, flt_loc)
    vo_loc_stf = apply_transf(tr_stf, vo_loc)
    cnt_loc_stf = apply_transf(tr_stf, cnt_loc)

    if has_vo:
        vo_loc_sf = apply_transf(tr_sf, vo_loc)
        vo_loc_stf = apply_transf(tr_stf, vo_loc)
        vo_loc_bias_stf = apply_transf(tr_stf, vo_loc_bias)
        #vo_loc_sf, _, vo_loc_bias_sf, _, _ = vo_data_prep(vo_loc_sf, vo_scale, vo_bias_sds, sc_loc_sf, trg_loc_sf)
        #vo_loc_stf, vo_scale, vo_loc_bias_stf, nkf_idxs, is_mm = vo_data_prep(vo_loc_stf, vo_scale, vo_bias_sds, sc_loc_stf, trg_loc_stf)

    if has_flt:
        flt_err_mean = tools.robust_mean(flt_loc_stf - sc_loc_stf, axis=0)
        flt_err_std = tools.robust_std(flt_loc_stf - sc_loc_stf, axis=0)
    if has_lsr:
        lsr_err_mean = tools.robust_mean((lsr_loc_stf - sc_loc_stf), axis=0)
        lsr_err_std = tools.robust_std((lsr_loc_stf - sc_loc_stf), axis=0)
    if has_cnt:
        cnt_err_mean = tools.robust_mean(
            (cnt_loc_stf - sc_loc_stf)[incl_for_stats, :], axis=0)
        cnt_err_std = tools.robust_std(
            (cnt_loc_stf - sc_loc_stf)[incl_for_stats, :], axis=0)
    if has_spl:
        spl_err_mean = tools.robust_mean(
            (spl_loc_stf - sc_loc_stf)[incl_for_stats, :], axis=0)
        spl_err_std = tools.robust_std(
            (spl_loc_stf - sc_loc_stf)[incl_for_stats, :], axis=0)
    if has_vo:
        vo_err_mean = tools.robust_mean(
            (vo_loc_stf - sc_loc_stf)[incl_for_stats, :], axis=0)
        vo_err_std = tools.robust_std(
            (vo_loc_stf - sc_loc_stf)[incl_for_stats, :], axis=0)

        # nkf_idxs need to include a nan value between vo resets, is_mm
        vo_delta_scale_mean = tools.robust_mean(
            np.diff(vo_scale[nkf_idxs])[np.logical_not(is_mm[1:])])
        vo_delta_scale_std = tools.robust_std(np.diff(
            vo_scale[nkf_idxs])[np.logical_not(is_mm[1:])],
                                              mean=0)
        vo_delta_bias_mean = tools.robust_mean(np.diff(
            vo_loc_bias_stf[nkf_idxs], axis=0)[np.logical_not(is_mm[1:]), :],
                                               axis=0)
        vo_delta_bias_std = tools.robust_std(np.diff(
            vo_loc_bias_stf[nkf_idxs], axis=0)[np.logical_not(is_mm[1:]), :],
                                             mean=0,
                                             axis=0)
        vo_mm_delta_scale_mean = tools.robust_mean(
            np.diff(vo_scale[nkf_idxs])[is_mm[1:]])
        vo_mm_delta_scale_std = tools.robust_std(np.diff(
            vo_scale[nkf_idxs])[is_mm[1:]],
                                                 mean=0)
        vo_mm_delta_bias_mean = tools.robust_mean(np.diff(
            vo_loc_bias_stf[nkf_idxs], axis=0)[is_mm[1:], :],
                                                  axis=0)
        vo_mm_delta_bias_std = tools.robust_std(np.diff(
            vo_loc_bias_stf[nkf_idxs], axis=0)[is_mm[1:], :],
                                                mean=0,
                                                axis=0)

    cutoff_time = time[0] + {
        'id1': time[-1],
        'id2': 1.5 * 73.125,
        'id3': time[-1],
        'id4': 4 * 11.91,
        'id5': 2 * 11.91,
    }[id]
    cutoff = np.argmax(time > cutoff_time)

    # normal plots
    if simple:
        fig2, axs = plt.subplots(4 + (0 if has_vo else 0),
                                 1,
                                 sharex=True,
                                 figsize=(8, 6))

        axs[0].plot(time, phase_angle / np.pi * 180, 'C0', label='phase angle')
        axs[0].set_ylabel('phase angle', color='C0')
        axs[0].tick_params(axis='y', labelcolor='C0')
        axs[0].set_ybound(0, 180)

        ax0b = axs[0].twinx()
        ax0b.plot(time, distance, 'C1', label='distance')
        ax0b.set_ylabel('distance', color='C1')
        ax0b.tick_params(axis='y', labelcolor='C1')

        axs[-1].set_xlabel('time [h]')
        for i, lab in enumerate(c_lab):
            axs[i + 1].set_ylabel(lab + ' error [m]')


#        for i, a in enumerate('real '+a for a in c_lab):
#            axs[i+1].plot(time, sc_loc_stf[:, i] - sc_loc_stf[:, i], label=a)

#        print('filter err μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' % (*flt_err_mean, *flt_err_std))
#        for i, a in enumerate('filter '+a for a in c_lab):
#            axs[i+1].plot(time, flt_loc_stf[:, i] - sc_loc_stf[:, i], label=a)

        if id in ('id3', 'id5'):
            idx = np.isclose((time * 60 * 60 - 5 + 1e-10) % 60, 0)
        else:
            idx = np.ones(len(time), dtype=np.bool)

        if has_cnt:
            print('cnt err μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' %
                  (*cnt_err_mean, *cnt_err_std))
            for i, a in enumerate(c_lab):
                axs[i + 1].plot(time[idx],
                                cnt_loc_stf[idx, i] - sc_loc_stf[idx, i],
                                'C0--',
                                label='CNT')

        if has_spl:
            print('spl err μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' %
                  (*spl_err_mean, *spl_err_std))
            for i, a in enumerate(c_lab):
                axs[i + 1].plot(time[idx],
                                spl_loc_stf[idx, i] - sc_loc_stf[idx, i],
                                'C1--',
                                label='SPL')

        if has_lsr:
            print('lsr err μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' %
                  (*lsr_err_mean, *lsr_err_std))
            for i, a in enumerate(c_lab):
                axs[i + 1].plot(time[idx],
                                lsr_loc_stf[idx, i] - sc_loc_stf[idx, i],
                                'C3:',
                                label='LSR')

        if has_vo:
            print('vo delta scale μ=%.2f, σ=%.2f' %
                  (vo_delta_scale_mean, vo_delta_scale_std))
            print('vo delta bias μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' %
                  (*vo_delta_bias_mean, *vo_delta_bias_std))
            print('vo mm delta scale μ=%.2f, σ=%.2f' %
                  (vo_mm_delta_scale_mean, vo_mm_delta_scale_std))
            print(
                'vo mm delta bias μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' %
                (*vo_mm_delta_bias_mean, *vo_mm_delta_bias_std))
            print('vo meas err μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' %
                  (*vo_err_mean, *vo_err_std))

            if id == 'id5':
                idx4 = np.isclose((time * 60 * 60 - 5 + 1e-10) % 60, 0)
            else:
                idx4 = np.ones(len(time), dtype=np.bool)

            for i, a in enumerate(c_lab):
                axs[i + 1].plot(time[idx4],
                                vo_loc_stf[idx4, i] - sc_loc_stf[idx4, i],
                                'C2-.',
                                label='VO')
            # for i, a in enumerate('vo bias ' + a for a in c_lab):
            #     axs[i].plot(time[idx], vo_loc_bias_stf[idx, i], 'b-', label=a)
            # axs[-1].plot(time[idx], vo_scale[idx], 'b-', label='vo scale')

        bounded = True
        bad_pa = get_intervals(time, phase_angle > 135 / 180 * np.pi)

        if id == 'id1':
            pass
            #axs[i].set_ybound(-1000, 1000)
        elif id == 'id2':
            if bounded:
                axs[1].set_ybound(-400, 400)
                axs[2].set_ybound(-40, 40)
                axs[3].set_ybound(-40, 40)
        elif id == 'id3':
            pass
            #axs[i].set_ybound(-1000, 1000)
        elif id == 'id4':
            if bounded:
                axs[1].set_ybound(-20, 20)
                axs[2].set_ybound(-40, 40)
                axs[3].set_ybound(-40, 40)
        elif id == 'id5':
            if bounded:
                axs[1].set_ybound(-5, 5)
                axs[2].set_ybound(-10, 10)
                axs[3].set_ybound(-10, 10)

        for i in range(1, 4):
            axs[i].legend(loc='lower right')
            for s, e in bad_pa:
                axs[i].axvspan(s, e, facecolor='#f7aaaa', alpha=0.5)  # pink
            if use_d2:
                for s, e in d2_ecl:
                    axs[i].axvspan(s, e, facecolor='#b0f9ef',
                                   alpha=0.5)  # turq
                for s, e in d1_bg:
                    axs[i].axvspan(s, e, facecolor='#f8f9b0',
                                   alpha=0.5)  # green
                for s, e in d1_fg:
                    axs[i].axvspan(s, e, facecolor='#f5b0f9',
                                   alpha=0.5)  # purple
        if bounded:
            ax0b.set_xbound(time[0], cutoff_time)

    else:
        fig2, axs = plt.subplots(4 if cum_delta_v[-1] > 0 else 3,
                                 1,
                                 sharex=True,
                                 figsize=(8, 6))

        # # location errors
        # i = 0
        # for j, a in enumerate('real '+a for a in c_lab):
        #     axs[i].plot(time, sc_loc_stf[:, j], label=a)
        # axs[i].set_prop_cycle(None)
        # for j, a in enumerate('filter '+a for a in c_lab):
        #     axs[i].plot(time, flt_loc_stf[:, j], ':', label=a)
        #
        # axs[i].set_title('filter output\nerr μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' % (*flt_err_mean, *flt_err_std))
        # axs[i].legend(loc='lower right')
        #
        # # measurements
        # i += 1
        # for j, a in enumerate('real '+a for a in c_lab):
        #     axs[i].plot(time, sc_loc_stf[:, j], label=a)
        # axs[i].set_prop_cycle(None)
        #
        # if has_spl:
        #     for j, a in enumerate('spl '+a for a in c_lab):
        #         axs[i].plot(time, spl_loc_stf[:, j], 'C1--', label=a)
        #     axs[i].set_prop_cycle(None)
        #
        # if has_lsr:
        #     for j, a in enumerate('laser '+a for a in c_lab):
        #         axs[i].plot(time, lsr_loc_stf[:, j], 'r:', label=a)
        #     axs[i].set_prop_cycle(None)
        #
        # if has_vo:
        #     for j, a in enumerate('vo '+a for a in c_lab):
        #         axs[i].plot(time, vo_loc_stf[:, j], 'C2.-', label=a)
        #     axs[i].set_prop_cycle(None)
        #
        # axs[i].set_title('measurements'
        #                  + ('\nopt err μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' % (*spl_err_mean, *spl_err_std) if has_spl else '')
        #                  + ('\nlsr err μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' % (*lsr_err_mean, *lsr_err_std) if has_lsr else '')
        #                  + ('\nvo err μ=(%.2f, %.2f, %.2f), σ=(%.2f, %.2f, %.2f)' % (*vo_err_mean, *vo_err_std) if has_vo else '')
        #                  )
        # axs[i].legend(loc='lower right')

        # measurement likelihood
        i += 1
        axs[i].plot(time, meas_ll)
        axs[i].set_title('measurement likelihood')

        # delta-v used
        if cum_delta_v[-1] > 0:
            i += 1
            axs[i].plot(time, cum_delta_v)
            axs[i].set_title('cumulative delta-v usage')

        axs[i].set_xlim(np.min(time), np.max(time))
    plt.tight_layout()

    # plot didymain & didymoon
    fig1, ax = plt.subplots(figsize=(7, 7))

    if video:
        framerate = 25
        dw, dh = fig1.canvas.get_width_height()
        writer = cv2.VideoWriter(filename[:-4] + '.avi',
                                 cv2.VideoWriter_fourcc(*'DIVX'), framerate,
                                 (dw * 2, dh * 2))

    try:
        skip = 2
        for c in range(skip, len(d1_loc_sf), skip):
            if video:
                tools.show_progress(len(d1_loc_sf) // skip, c // skip)
            else:
                c = cutoff or -1

            if is_vo:
                # for c in range(0, len(d1_loc_sf), 30):
                #     plot_orbit_sf(ax, d1_loc_sf, sc_loc_sf, vo_loc_sf, cutoff=c, idx1=1, static=False)
                plot_orbit_sf(ax,
                              d1_loc_sf,
                              sc_loc_sf,
                              vo_loc_sf,
                              cutoff=c,
                              static=not video)
            elif is_4km:
                plot_orbit_sf(
                    ax,
                    d1_loc,
                    d2_loc,
                    sc_loc,
                    flt_loc if has_flt else None,
                    spl_loc=spl_loc[idx, :] if id in ('id1', 'id2',
                                                      'id3') else None,
                    vo_loc=vo_loc[idx4, :]
                    if id in ('id1', 'id3') and has_vo else None,
                    synodic=False,
                    cutoff=c,
                    static=not video)
            else:
                plot_orbit_sf(
                    ax,
                    d1_loc_sf,
                    d2_loc_sf,
                    sc_loc_sf,
                    flt_loc_sf if has_flt else None,
                    spl_loc=spl_loc_sf[idx, :] if id in ('id4',
                                                         'id5') else None,
                    #vo_loc=vo_loc_sf[idx4, :] if id in ('id5',) else None,
                    synodic=True,
                    cutoff=c,
                    static=not video)
            if video:
                #plt.tight_layout()
                # plt.pause(0.05)
                # plt.waitforbuttonpress()
                mi = [m for m in (5760, 7593) if m < c]
                if len(mi) > 0:
                    ax.plot(spl_loc[mi, 0],
                            spl_loc[mi, 1],
                            'bv',
                            label='Maneuver',
                            fillstyle='none')
                errtxt = 'error [m]: x=%5.1f, y=%5.1f, z=%5.1f' % tuple(
                    spl_loc[c, :] - sc_loc[c, :])
                plt.text(2650,
                         9500,
                         errtxt,
                         family='monospace',
                         fontsize=12,
                         horizontalalignment='center')
                ax.set_xbound(-5200, 10500)
                ax.set_ybound(-7100, 8600)
                fig1.canvas.draw()
                img = np.frombuffer(fig1.canvas.tostring_argb(),
                                    dtype=np.uint8)
                img.shape = (dh * 3, dw * 3, 4)  # why need *3 ???
                # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
                img = np.roll(img, 3, axis=2)
                img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGR)
                img = cv2.resize(img, (dw * 2, dh * 2))
                if False:
                    cv2.imshow('test', img)
                    cv2.waitKey()
                writer.write(img)
                ax.clear()
            else:
                plt.tight_layout()
                plt.show()
                break
    finally:
        if video:
            writer.release()
コード例 #8
0
            img_files.append((i, file))

    img_files = [f for _, f in sorted(img_files, key=lambda x: x[0])]
    assert len(img_files) > 3, 'too few images found: %s' % (img_files, )

    img0 = cv2.imread(os.path.join(folder, img_files[0]), cv2.IMREAD_COLOR)
    sh, sw, sc = img0.shape
    codecs = ['DIVX', 'H264', 'MPEG', 'MJPG']
    writer = cv2.VideoWriter(target_file, cv2.VideoWriter_fourcc(*codecs[0]),
                             framerate, (dw, dh))
    imgs = []
    times = []
    try:
        for i, f in enumerate(img_files):
            if i % skip_mult == 0:
                tools.show_progress(
                    len(img_files) // skip_mult, i // skip_mult)
                img = cv2.imread(os.path.join(folder, f), cv2.IMREAD_COLOR)
                if sw != dw or sh != dh:
                    img = cv2.resize(img, (dw, dh),
                                     interpolation=cv2.INTER_AREA)
                if exposure:
                    # blend images to simulate blur due to long exposure times
                    timestr = f[0:17]
                    time = datetime.datetime.strptime(timestr,
                                                      '%Y-%m-%dT%H%M%S')
                    imgs.append(img)
                    times.append(time)
                    idxs = np.where(
                        np.array(times) > time -
                        datetime.timedelta(seconds=exposure))
                    if len(idxs) < np.ceil(exposure):