示例#1
0
def run_groundtruth(params):
    output_directory = utilities.basename(params.bagfile) + "_groundtruth"
    groundtruth_command = (
        "rosrun localization_analysis make_groundtruth.py "
        + params.bagfile
        + " "
        + params.base_surf_map
        + " "
        + params.maps_directory
        + " "
        + params.loc_map
        + " "
        + params.config_path
        + " -o "
        + output_directory
        + " -w "
        + params.world
        + " -i "
        + params.image_topic
        + " -r "
        + params.robot_name
    )
    if not bool(params.use_image_features):
        groundtruth_command += " --generate-image-features"

    output_file = utilities.basename(params.bagfile) + "_groundtruth.txt"
    utilities.run_command_and_save_output(groundtruth_command, output_file)
示例#2
0
    if not os.path.isdir(args.maps_directory):
        print("Maps directory " + args.maps_directory + " does not exist.")
        sys.exit()
    if os.path.isdir(args.output_directory):
        print("Output directory " + args.output_directory + " already exists.")
        sys.exit()

    bagfile = os.path.abspath(args.bagfile)
    base_surf_map = os.path.abspath(args.base_surf_map)
    maps_directory = os.path.abspath(args.maps_directory)

    os.mkdir(args.output_directory)
    os.chdir(args.output_directory)

    map_name = args.map_name
    bag_prefix = utilities.basename(bagfile)
    if not args.map_name:
        map_name = bag_prefix + "_groundtruth"

    make_map.make_map(bagfile, map_name, args.world, args.robot_name,
                      base_surf_map, maps_directory)

    robot_config = "config/robots/" + args.robot_name + ".config"
    groundtruth_bag = map_name + ".bag"
    groundtruth_map_file = map_name + ".brisk.vocabdb.map"
    groundtruth_pdf = "groundtruth.pdf"
    groundtruth_csv = "groundtruth.csv"
    make_groundtruth_command = (
        "rosrun localization_analysis run_graph_bag_and_plot_results.py " +
        bagfile + " " + groundtruth_map_file + " " + args.config_path +
        " -i " + args.image_topic + " -r " + robot_config + " -w " +
def create_groundtruth(
    bagfile, base_surf_map, maps_directory, map_name, world, robot_name
):
    gt_images_dir = "gt_images_" + utilities.basename(bagfile)
    os.mkdir(gt_images_dir)
    gt_images = os.path.abspath(gt_images_dir)
    extract_images_command = (
        "rosrun localization_node extract_image_bag "
        + bagfile
        + " -use_timestamp_as_image_name -image_topic /mgt/img_sampler/nav_cam/image_record -output_directory "
        + gt_images
    )
    utilities.run_command_and_save_output(extract_images_command, "extract_images.txt")

    all_gt_images = os.path.join(gt_images, "*.jpg")
    select_images_command = (
        "rosrun sparse_mapping select_images -density_factor 1.4 " + all_gt_images
    )
    utilities.run_command_and_save_output(select_images_command, "select_images.txt")

    # Set environment variables
    home = os.path.expanduser("~")
    robot_config_file = os.path.join("config/robots", robot_name + ".config")
    astrobee_path = os.path.join(home, "astrobee/src/astrobee")
    os.environ["ASTROBEE_RESOURCE_DIR"] = os.path.join(astrobee_path, "resources")
    os.environ["ASTROBEE_CONFIG_DIR"] = os.path.join(astrobee_path, "config")
    os.environ["ASTROBEE_ROBOT"] = os.path.join(
        astrobee_path, "config/robots/bumble.config"
    )
    os.environ["ASTROBEE_WORLD"] = world

    # Build groundtruth
    groundtruth_map = map_name + ".map"
    build_map_command = (
        "rosrun sparse_mapping build_map "
        + all_gt_images
        + " -output_map "
        + groundtruth_map
        + " -feature_detection -feature_matching -track_building -incremental_ba -bundle_adjustment -histogram_equalization -num_subsequent_images 100"
    )
    utilities.run_command_and_save_output(build_map_command, "build_map.txt")

    # Merge with base map
    groundtruth_surf_map = map_name + ".surf.map"
    merge_map_command = (
        "rosrun sparse_mapping merge_maps "
        + base_surf_map
        + " "
        + groundtruth_map
        + " -output_map "
        + groundtruth_surf_map
        + " -num_image_overlaps_at_endpoints 100000000 -skip_bundle_adjustment"
    )
    utilities.run_command_and_save_output(merge_map_command, "merge_map.txt")

    # Link maps directory since conversion to BRISK map needs
    # image files to appear to be in correct relative path
    os.symlink(maps_directory, "maps")
    maps_gt_images = os.path.join("maps", gt_images_dir)
    os.symlink(gt_images, maps_gt_images)

    # Convert SURF to BRISK map
    # Get full path to output file to avoid permission errors when running
    # command in maps directory
    rebuild_output_file = os.path.join(os.getcwd(), "rebuild_map_as_brisk_map.txt")
    groundtruth_brisk_map = map_name + ".brisk.map"
    shutil.copyfile(groundtruth_surf_map, groundtruth_brisk_map)
    groundtruth_brisk_map_full_path = os.path.abspath(groundtruth_brisk_map)
    gt_path = os.getcwd()
    os.chdir("maps")
    rebuild_map_command = (
        "rosrun sparse_mapping build_map -rebuild -histogram_equalization -output_map "
        + groundtruth_brisk_map_full_path
    )
    utilities.run_command_and_save_output(rebuild_map_command, rebuild_output_file)
    # Use gt_path since relative commands would now be wrt maps directory simlink
    os.chdir(gt_path)

    # Create vocabdb
    groundtruth_brisk_vocabdb_map = map_name + ".brisk.vocabdb.map"
    shutil.copyfile(groundtruth_brisk_map, groundtruth_brisk_vocabdb_map)
    add_vocabdb_command = (
        "rosrun sparse_mapping build_map -vocab_db -output_map "
        + groundtruth_brisk_vocabdb_map
    )
    utilities.run_command_and_save_output(add_vocabdb_command, "build_vocabdb.txt")

    # Remove simlinks
    os.unlink(maps_gt_images)
    os.unlink("maps")
示例#4
0
def main() :
    """
    Get the command line arguments
    """
    p = optparse.OptionParser()
    p.add_option("-d", action="store_true", dest="debug")
    p.add_option("--debug", action="store_true", dest="debug")
    p.add_option("--config_file", action="store", dest="config_file")
    p.add_option("--test_parse", action="store_true", dest="test_parse")
    p.set_defaults(debug = False)

    opts, source_file_args = p.parse_args()

    try :
        # Config File is mandatory
        if not opts.config_file :
            raise ParseError("No Config file")
        #
        #    Upload the configs
        #
        Config(opts.config_file)
        pattern_spec = DTPOParseSpec(Config.config.get_pattern_file())
    except DTPOFileError as file_error:
        dtpo_alert(log_type = 'fatal', reason = file_error.message)
        raise SystemExit("FATAL ERROR - Failed to parse config file")
    except ParseError as parse_error :
        dtpo_alert('fatal', reason = parse_error.message)
        raise SystemExit("FATAL ERROR - Failed to parse pattern file")

    #
    #    Now iterate through the files
    #
    for source_file in source_file_args:
        dtpo_log('info', "Started processing -> %s", source_file)

        try :

            #  TODO - we're assuming PDF files here
            #  Check that the file name actually ends in
            #  pdf if not rename it as it will save trouble with DTPO later
            suffix = source_file[-3:]
            if suffix.lower() != 'pdf' :
                dtpo_log('debug', "Adding pdf suffix on to '%s'",
                         source_file)
                source_dir = Config.config.get_source_directory() + '/'
                os.rename(source_dir + source_file,
                          source_dir + source_file + '.pdf')
                source_file += '.pdf'
            #
            #    Convert the file to text if we can and then parse it
            #
            import_details = get_import_parameters(source_file, pattern_spec,
                                                   opts.test_parse)
            if opts.test_parse :
                import_details.print_import_details(source_file)
            else :
                execute_import(import_details)
                trash_file(source_file, import_details.get_document_name())
                dtpo_alert('info',
                           file_name = import_details.get_document_name(),
                           group_name = import_details.group)
        except DTPOFileError as file_error :
            #    We failed ... Leave the file be as there is a problem with it
            dtpo_log('error', "Import failed for '%s' - file not touched\n%s",
                basename(source_file), file_error.message)
            dtpo_alert('fatal', reason = file_error.message,
                       file_name = source_file)

        except ParseError as parse_error :
            #    We failed ... Move the file to the Orphan directory
            dtpo_log('error', "Import failed for '%s' - orphaning file\n%s",
                basename(source_file), parse_error.message)
            dtpo_alert('error', reason = parse_error.message,
                       file_name = source_file)
            orphan_file(source_file)
        except Exception as exception :
            #   Something horrible has happend
            dtpo_log('fatal', "System error for '%s'\n%s",
                     basename(source_file), str(exception))
            dtpo_alert('fatal', reason = str(exception),
                       file_name = source_file)

        dtpo_log('debug', 'Completed Successfully')
示例#5
0
def parse_source_file(text_extractor, pattern_spec) :
    """
        Now iterate through the file and see if we can find anything

        We're trying to avoide multiple parses of the file (we're looking for
        up to 3 patterns:  pattern 1, pattern 2 & date).  The algorithm is
        currently not efficient as we have to parse the file twice but
        given that most patterns will be found in the first few lines it's not
        that bad.  In addition it makes things considerably less complicated.
        Pattern 2 and date can "look back" and occur before pattern 1 so
        tracking that would make things harder to understand!!
    """
    assert pattern_spec

    found_string1 = None
    found_string2 = None
    found_date = None
    line_number = 0
    pattern_number = None

    #
    #   Iterate through the file for the first time looking for the primary
    #   pattern
    file_array = text_extractor.get_file_contents_as_array()
    for line_number in range(0, len(file_array)-1) :
        found_string1, pattern_number = search_pattern_list(
            pattern_spec.string1_search_dict,
            file_array,
            line_number)
        if found_string1 :
            # See if it's a special
            if found_string1 == 'SOURCE_FILE' :
                found_string1 = basename(text_extractor.source_file)
            break

    if found_string1 :
        #   We got something - see if there is a pattern2 and date to look for
        string2_search_details = None
        date_search_details = None

        if pattern_number in pattern_spec.string2_search_dict :
            string2_search_details = \
                pattern_spec.string2_search_dict[pattern_number]
        if pattern_number in pattern_spec.date_search_dict :
            date_search_details = \
                pattern_spec.date_search_dict[pattern_number]

        line_number = 0
        found_string2 = None
        found_date = None

        #   Assuming there is something to look for do the search
        while line_number < len(file_array)-1 and (
            (string2_search_details is not None and found_string2 is None ) or
            (date_search_details is not None and found_date is None)) :

            if string2_search_details is not None :
                found_string2 = search_pattern(
                    string2_search_details, file_array, line_number)
            if date_search_details is not None :
                found_date = search_pattern(
                    date_search_details, file_array, line_number)
            line_number += 1

    return DTPOImportParameters(pattern_spec = pattern_spec,
                                pattern_number = pattern_number,
                                source_file = text_extractor.source_file,
                                string1 = found_string1,
                                string2 = found_string2,
                                date_string = found_date)
示例#6
0
 def basename(self):
     return utilities.basename(f"{self.title} {' '.join(self.authors)}")