Exemple #1
0
def write_flat_table(hs):
    dbdir = hs.dirs.db_dir
    # Make flat table
    valid_cx = hs.get_valid_cxs()
    flat_table = make_flat_table(hs, valid_cx)
    flat_table_fpath = join(dbdir, 'flat_table.csv')
    # Write flat table
    print('[ld2] Writing flat table')
    helpers.write_to(flat_table_fpath, flat_table)
Exemple #2
0
def __dump_text_report(allres, report_type):
    if not 'report_type' in vars():
        report_type = 'rankres_str'
    print('[rr2] Dumping textfile: ' + report_type)
    report_str = allres.__dict__[report_type]
    # Get directories
    result_dir = allres.hs.dirs.result_dir
    timestamp_dir = join(result_dir, 'timestamped_results')
    helpers.ensurepath(timestamp_dir)
    helpers.ensurepath(result_dir)
    # Write to timestamp and result dir
    timestamp = helpers.get_timestamp()
    csv_timestamp_fname = report_type + allres.title_suffix + timestamp + '.csv'
    csv_timestamp_fpath = join(timestamp_dir, csv_timestamp_fname)
    csv_fname = report_type + allres.title_suffix + '.csv'
    csv_fpath = join(result_dir, csv_fname)
    helpers.write_to(csv_fpath, report_str)
    helpers.write_to(csv_timestamp_fpath, report_str)
def __dump_text_report(allres, report_type):
    if not 'report_type' in vars():
        report_type = 'rankres_str'
    print('[rr2] Dumping textfile: ' + report_type)
    report_str = allres.__dict__[report_type]
    # Get directories
    result_dir    = allres.hs.dirs.result_dir
    timestamp_dir = join(result_dir, 'timestamped_results')
    helpers.ensurepath(timestamp_dir)
    helpers.ensurepath(result_dir)
    # Write to timestamp and result dir
    timestamp = helpers.get_timestamp()
    csv_timestamp_fname = report_type + allres.title_suffix + timestamp + '.csv'
    csv_timestamp_fpath = join(timestamp_dir, csv_timestamp_fname)
    csv_fname  = report_type + allres.title_suffix + '.csv'
    csv_fpath = join(result_dir, csv_fname)
    helpers.write_to(csv_fpath, report_str)
    helpers.write_to(csv_timestamp_fpath, report_str)
Exemple #4
0
def write_to_wrapper(csv_fpath, csv_string):
    if exists(csv_fpath) and DIFF_CHECK:
        print('table already exists: %r' % csv_fpath)
        with open(csv_fpath) as file:
            csv_string2 = file.read()
            if csv_string2 == csv_string:
                print('No difference!')
            else:
                print('difference!')
                #print('--------')
                #print(csv_string2)
                #print('--------')
                #print(csv_string)
                #print('--------')
                #diff_str = diff_strings(csv_string2, csv_string)
                #print(diff_str)
                #print('--------')
    else:
        helpers.write_to(csv_fpath, csv_string)
Exemple #5
0
def write_to_wrapper(csv_fpath, csv_string):
    if exists(csv_fpath) and DIFF_CHECK:
        print('table already exists: %r' % csv_fpath)
        with open(csv_fpath) as file:
            csv_string2 = file.read()
            if csv_string2 == csv_string:
                print('No difference!')
            else:
                print('difference!')
                #print('--------')
                #print(csv_string2)
                #print('--------')
                #print(csv_string)
                #print('--------')
                #diff_str = diff_strings(csv_string2, csv_string)
                #print(diff_str)
                #print('--------')
    else:
        helpers.write_to(csv_fpath, csv_string)
Exemple #6
0
def tune_flann(data, **kwargs):
    flann = pyflann.FLANN()
    #num_data = len(data)
    flann_atkwargs = dict(algorithm='autotuned',
                          target_precision=.01,
                          build_weight=0.01,
                          memory_weight=0.0,
                          sample_fraction=0.001)
    flann_atkwargs.update(kwargs)
    suffix = repr(flann_atkwargs)
    badchar_list = ',{}\': '
    for badchar in badchar_list:
        suffix = suffix.replace(badchar, '')
    print(flann_atkwargs)
    tuned_params = flann.build_index(data, **flann_atkwargs)
    helpers.myprint(tuned_params)
    out_file = 'flann_tuned' + suffix
    helpers.write_to(out_file, repr(tuned_params))
    flann.delete_index()
    return tuned_params
Exemple #7
0
def tune_flann(data, **kwargs):
    flann = pyflann.FLANN()
    #num_data = len(data)
    flann_atkwargs = dict(algorithm='autotuned',
                          target_precision=.01,
                          build_weight=0.01,
                          memory_weight=0.0,
                          sample_fraction=0.001)
    flann_atkwargs.update(kwargs)
    suffix = repr(flann_atkwargs)
    badchar_list = ',{}\': '
    for badchar in badchar_list:
        suffix = suffix.replace(badchar, '')
    print(flann_atkwargs)
    tuned_params = flann.build_index(data, **flann_atkwargs)
    helpers.myprint(tuned_params)
    out_file = 'flann_tuned' + suffix
    helpers.write_to(out_file, repr(tuned_params))
    flann.delete_index()
    return tuned_params
Exemple #8
0
def write_csv_tables(hs):
    'Saves the tables to disk'
    print('[ld2] Writing csv tables')
    internal_dir = hs.dirs.internal_dir
    CREATE_BACKUP = True  # TODO: Should be a preference
    if CREATE_BACKUP:
        backup_csv_tables(hs, force_backup=True)
    # csv strings
    chip_table  = make_chip_csv(hs)
    image_table = make_image_csv(hs)
    name_table  = make_name_csv(hs)
    # csv filenames
    chip_table_fpath  = join(internal_dir, CHIP_TABLE_FNAME)
    name_table_fpath  = join(internal_dir, NAME_TABLE_FNAME)
    image_table_fpath = join(internal_dir, IMAGE_TABLE_FNAME)
    # write csv files
    helpers.write_to(chip_table_fpath, chip_table)
    helpers.write_to(name_table_fpath, name_table)
    helpers.write_to(image_table_fpath, image_table)
Exemple #9
0
def write_csv_tables(hs):
    'Saves the tables to disk'
    print('[ld2] Writing csv tables')
    internal_dir = hs.dirs.internal_dir
    CREATE_BACKUP = True  # TODO: Should be a preference
    if CREATE_BACKUP:
        backup_csv_tables(hs, force_backup=True)
    # csv strings
    chip_table = make_chip_csv(hs)
    image_table = make_image_csv(hs)
    name_table = make_name_csv(hs)
    # csv filenames
    chip_table_fpath = join(internal_dir, CHIP_TABLE_FNAME)
    name_table_fpath = join(internal_dir, NAME_TABLE_FNAME)
    image_table_fpath = join(internal_dir, IMAGE_TABLE_FNAME)
    # write csv files
    helpers.write_to(chip_table_fpath, chip_table)
    helpers.write_to(name_table_fpath, name_table)
    helpers.write_to(image_table_fpath, image_table)
Exemple #10
0
def export_subdatabase(hs, gx_list, new_dbdir):
    # New database dirs
    new_imgdir = join(new_dbdir, ld2.RDIR_IMG)
    new_internal = join(new_dbdir, ld2.RDIR_INTERNAL)
    print('[scripts] Exporting into %r' % new_dbdir)

    # Ensure new database
    helpers.ensuredir(new_dbdir)
    helpers.ensuredir(new_imgdir)
    helpers.ensuredir(new_internal)

    gname_list = hs.gx2_gname(gx_list)
    src_gname_list = hs.gx2_gname(gx_list, full=True)
    dst_gname_list = map(lambda gname: join(new_imgdir, gname), gname_list)

    copy_list = [(src, dst) for (src, dst) in zip(src_gname_list, dst_gname_list)]

    mark_progress, end_prog = helpers.progress_func(len(copy_list), lbl='Copy Images')
    for count, (src, dst) in enumerate(copy_list):
        shutil.copy(src, dst)
        mark_progress(count)
    end_prog()

    cx_list = [cx for cxs in hs.gx2_cxs(gx_list) for cx in cxs.tolist()]
    nx_list = np.unique(hs.tables.cx2_nx[cx_list])

    image_table = ld2.make_image_csv2(hs, gx_list)
    chip_table  = ld2.make_chip_csv2(hs, cx_list)
    name_table  = ld2.make_name_csv2(hs, nx_list)
    # csv filenames
    chip_table_fpath  = join(new_internal, ld2.CHIP_TABLE_FNAME)
    name_table_fpath  = join(new_internal, ld2.NAME_TABLE_FNAME)
    image_table_fpath = join(new_internal, ld2.IMAGE_TABLE_FNAME)
    # write csv files
    helpers.write_to(chip_table_fpath, chip_table)
    helpers.write_to(name_table_fpath, name_table)
    helpers.write_to(image_table_fpath, image_table)
    return locals()