def run_main(path, host, port, job_name, pipeline_id, referenceset_uuid, block=False, custom_options=None):
    """

    :param path: Path to SubreadSet XML will be imported (if it's not already been imported)
    :param host: SL Host
    :param port: SL Port
    :param job_name:  Job name
    :param pipeline_id:  Pipeline Id (e.g, pbsmrtpipe.pipelines.my_pipeline
    :param referenceset_uuid: UUID of Rset. This *must* already be imported
    :param block: To block and poll for the analysis job to complete

    :param custom_options: Dictionary of task options for the provided
    Pipeline in the form
    {"pbalign.task_options.concordant":True}


    :type custom_options: dict | None
    :rtype: int
    """

    # look up the reference set UUID from pbservice CLI util or
    # http://smrtlink-beta:8081/secondary-analysis/datasets/references
    # TODO. 1. Import SubreadSet if it's not already imported
    # TODO. 2. Check and see if the Job with the SubreadSet UUID was already submitted
    # TODO. 3. Add option to force a new submission to override (2)
    # TODO. 4. Enable custom pipeline options json file at the CLI

    # sanity test
    sset = SubreadSet(path)
    log.info("Loaded SubreadSet {}".format(sset))

    sal = ServiceAccessLayer(host, port)
    # Sanity Check
    _ = sal.get_status()

    # Step 1. Import SubreadSet (and block) if it's not imported already
    service_sset = sal.get_subreadset_by_id(sset.uuid)
    # TODO. Add check to see if Job was successful
    if service_sset is None:
        log.info("Running Import-DataSet job with {}".format(path))
        sset_import_job = sal.run_import_dataset_subread(path)
        log.info("Import-DataSet job {}".format(sset_import_job))
    else:
        log.info("Found already imported SubreadSet {}".format(service_sset))

    # Step 2. Check and See if an previous analysis job has already been run
    # Immediately exit if an analysis job is found
    analysis_job = get_job_by_subreadset_uuid_or_none(sal, sset.uuid)
    if analysis_job is not None:
        log.info("Found exiting job {} for SubreadSet {}".format(analysis_job, sset))
        return 0

    # Step 3. Create a new Analysis job with custom task options (if provided)
    task_options = {} if custom_options is None else custom_options

    # Get the already Successfully imported DataSets
    service_sset_d = sal.get_dataset_by_uuid(sset.uuid)
    service_rset_d = sal.get_dataset_by_uuid(referenceset_uuid)

    f = sal.run_by_pipeline_template_id if block else sal.create_by_pipeline_template_id

    # The API takes the Int id of the DataSet
    epoints = (ServiceEntryPoint("eid_subread", FileTypes.DS_SUBREADS.file_type_id, service_sset_d['id']),
               ServiceEntryPoint("eid_ref_dataset", FileTypes.DS_REF.file_type_id, service_rset_d['id']))

    job = f(job_name, pipeline_id, epoints, task_options=task_options)

    log.info("Analysis Job {}".format(job))

    if block:
        exit_code = 0 if job.state == JobStates.SUCCESSFUL else 1
    else:
        # the job is in the created state
        exit_code = 0

    return exit_code