Example #1
0
def sammplesheet_exonuclease(lims, process_id, output_file):
    """Create manual pipetting samplesheet for Exonuclease protocol"""
    process = Process(lims, id=process_id)
    sample_count = len(process.analytes()[0])

    # All input paramters
    data = [
        ['EXO I', process.udf['EXO I']],
        ['EXO III', process.udf['EXO III']],
        ['Ampligase buffer 10X', process.udf['Ampligase buffer 10X']],
        ['H2O', process.udf['H2O']],
    ]

    # Caculate for sample count
    for i, item in enumerate(data):
        data[i].append(sample_count * item[1] * 1.25)

    # Calculate total
    data.append([
        'TOTAL (incl. 25% overmaat)',
        sum([item[1] for item in data]),
        sum([item[2] for item in data]),
    ])

    # Write samplesheet
    output_file.write('\tMaster Mix (ul)\t{0}\n'.format(sample_count))
    for item in data:
        output_file.write('{0}\t{1:.2f}\t{2:.2f}\n'.format(
            item[0], item[1], item[2]))
Example #2
0
def set_sequence_name(lims, process_id):
    """Change artifact name to sequnece name."""
    process = Process(lims, id=process_id)
    for artifact in process.analytes()[0]:
        sample = artifact.samples[0]
        artifact.name = get_sequence_name(sample)
        artifact.put()
Example #3
0
def sammplesheet_pcr_exonuclease(lims, process_id, output_file):
    """Create manual pipetting samplesheet for PCR after Exonuclease protocol"""
    process = Process(lims, id=process_id)
    sample_count = len(process.analytes()[0])

    # All input paramters
    data = [
        ['2X iProof', process.udf['2X iProof']],
        [
            'Illumina forward primer(100uM) MIP_OLD_BB_FOR',
            process.udf['Illumina forward primer(100uM) MIP_OLD_BB_FOR']
        ],
        ['H2O', process.udf['H2O']],
    ]

    # Caculate for sample count
    for i, item in enumerate(data):
        data[i].append(sample_count * item[1] * 1.1)

    # Calculate total
    data.append([
        'TOTAL (incl. 10% overmaat)',
        sum([item[1] for item in data]) * 1.1,
        sum([item[2] for item in data]),
    ])

    # Write samplesheet
    output_file.write('\tMaster Mix (ul)\t{0}\n'.format(sample_count))
    for item in data:
        output_file.write('{0}\t{1:.2f}\t{2:.2f}\n'.format(
            item[0], item[1], item[2]))
Example #4
0
def samplesheet_capture(lims, process_id, output_file):
    """Create manual pipetting samplesheet for capture protocol."""
    process = Process(lims, id=process_id)
    sample_count = len(process.analytes()[0])

    # All input paramters
    data = [
        ['Ampligase Buffer 10X', process.udf['Ampligase Buffer 10X']],
        ['MIP pool werkoplossing', process.udf['MIP pool werkoplossing']],
        ['*dNTP 0.25mM', process.udf['*dNTP 0.25mM']],
        ['Hemo Klentaq 10U/ul', process.udf['Hemo Klentaq 10U/ul']],
        ['Ampligase 100U/ul', process.udf['Ampligase 100U/ul']],
        ['Water', process.udf['Water']],
    ]

    # Caculate for sample count
    for i, item in enumerate(data):
        data[i].append(sample_count * item[1] * 1.1)

    # Calculate final volume
    data.append([
        'ul MM in elke well',
        sum([item[1] for item in data]),
        sum([item[2] for item in data]),
    ])

    # Write samplesheet
    output_file.write('Mastermix\t1\t{0}\n'.format(sample_count))
    for item in data:
        output_file.write('{0}\t{1:.2f}\t{2:.2f}\n'.format(
            item[0], item[1], item[2]))
Example #5
0
def copy_layout(lims, process_id):
    """Copy placement layout from previous steps."""
    process = Process(lims, id=process_id)
    used_placements = []
    # Get parent container layout
    parent_container = None
    for parent_process in process.parent_processes():
        if parent_process:
            for container in parent_process.output_containers():
                if container.type != Containertype(lims, id='2'):  # skip tubes
                    parent_container = container

    if parent_container:
        parent_placements = {}
        for placement in parent_container.placements:
            sample = parent_container.placements[placement].samples[0].name
            parent_placements[sample] = placement

        # Create new container and copy layout
        new_container = Container.create(lims, type=parent_container.type)
        placement_list = []
        for artifact in process.analytes()[0]:
            sample_name = artifact.samples[0].name
            if sample_name in parent_placements:
                placement = parent_placements[sample_name]
                if placement not in used_placements:
                    placement_list.append([artifact, (new_container, placement)])
                    used_placements.append(placement)

        process.step.placements.set_placement_list(placement_list)
        process.step.placements.post()
Example #6
0
def storage_location(lims, process_id, output_file):
    """Generate storage location label file."""
    process = Process(lims, id=process_id)
    for artifact in process.analytes()[0]:
        storage_location = artifact.samples[0].udf['Dx Opslaglocatie']
        output_file.write(
            '{sample}\t{storage_location}\t{birth_date}\n'.format(
                sample=artifact.samples[0].name,
                storage_location=storage_location,
                birth_date=artifact.samples[0].udf['Dx Geboortejaar']))
def main(lims, pid, epp_logger):
    process = Process(lims,id = pid)
    sample_names = map(lambda a: a.name, process.analytes()[0])
    target_files = process.result_files()
    file_handler = ReadResultFiles(process)
    files = file_handler.shared_files['Qubit Result File']
    qubit_result_file = file_handler.format_file(files, 
                                                 name = 'Qubit Result File',
                                                 first_header = 'Sample',
                                                 find_keys = sample_names)
    missing_samples = 0
    low_conc = 0
    bad_formated = 0
    abstract = []
    udfs = dict(process.udf.items())
    if udfs.has_key("Minimum required concentration (ng/ul)"):
        min_conc = udfs["Minimum required concentration (ng/ul)"]
    else:
        min_conc = None
        abstract.append("Set 'Minimum required concentration (ng/ul)' to get qc-flaggs based on this treshold!")
    for target_file in target_files:
        sample = target_file.samples[0].name
        if qubit_result_file.has_key(sample):
            sample_mesurements = qubit_result_file[sample]
            if "Sample Concentration" in sample_mesurements.keys():
                conc, unit = sample_mesurements["Sample Concentration"]
                if conc == 'Out Of Range':
                    target_file.qc_flag = "FAILED"
                elif conc.replace('.','').isdigit():
                    conc = float(conc)
                    if unit == 'ng/mL':
                        conc = np.true_divide(conc, 1000)
                    if min_conc:
                        if conc < min_conc:
                            target_file.qc_flag = "FAILED"
                            low_conc +=1
                        else:
                            target_file.qc_flag = "PASSED"
                    target_file.udf['Concentration'] = conc
                    target_file.udf['Conc. Units'] = 'ng/ul'
                else:
                    bad_formated += 1
                set_field(target_file)
        else:
            missing_samples += 1

    if low_conc:
        abstract.append('{0}/{1} samples have low concentration.'.format(low_conc, len(target_files)))
    if missing_samples:
        abstract.append('{0}/{1} samples are missing in Qubit Result File.'.format(missing_samples, len(target_files)))
    if bad_formated:
        abstract.append('There are {0} badly formated samples in Qubit Result File. Please fix these to get proper results.'.format(bad_formated))

    print >> sys.stderr, ' '.join(abstract)
Example #8
0
def old_main(lims, pid, epp_logger):
    process = Process(lims,id = pid)
    sample_names = map(lambda a: a.name, process.analytes()[0])
    target_files = process.result_files()
    file_handler = ReadResultFiles(process)
    files = file_handler.shared_files['Qubit Result File']
    qubit_result_file = file_handler.format_file(files, 
                                                 name = 'Qubit Result File',
                                                 first_header = ['Test','Sample'],
                                                 find_keys = sample_names)
    missing_samples = 0
    low_conc = 0
    bad_formated = 0
    abstract = []
    udfs = dict(process.udf.items())
    if udfs.has_key("Minimum required concentration (ng/ul)"):
        min_conc = udfs["Minimum required concentration (ng/ul)"]
    else:
        min_conc = None
        abstract.append("Set 'Minimum required concentration (ng/ul)' to get qc-flaggs based on this treshold!")
    for target_file in target_files:
        sample = target_file.samples[0].name
        if qubit_result_file.has_key(sample):
            sample_mesurements = qubit_result_file[sample]
            if "Sample Concentration" in sample_mesurements.keys():
                conc, unit = sample_mesurements["Sample Concentration"]
                if conc == 'Out Of Range':
                    target_file.qc_flag = "FAILED"
                elif conc.replace('.','').isdigit():
                    conc = float(conc)
                    if unit == 'ng/mL':
                        conc = np.true_divide(conc, 1000)
                    if min_conc:
                        if conc < min_conc:
                            target_file.qc_flag = "FAILED"
                            low_conc +=1
                        else:
                            target_file.qc_flag = "PASSED"
                    target_file.udf['Concentration'] = conc
                    target_file.udf['Conc. Units'] = 'ng/ul'
                else:
                    bad_formated += 1
                set_field(target_file)
        else:
            missing_samples += 1

    if low_conc:
        abstract.append('{0}/{1} samples have low concentration.'.format(low_conc, len(target_files)))
    if missing_samples:
        abstract.append('{0}/{1} samples are missing in Qubit Result File.'.format(missing_samples, len(target_files)))
    if bad_formated:
        abstract.append('There are {0} badly formated samples in Qubit Result File. Please fix these to get proper results.'.format(bad_formated))

    print >> sys.stderr, ' '.join(abstract)
Example #9
0
def main(lims, args, epp_logger):
    source_udfs = args.source_udf
    dest_udfs = args.dest_udf
    correct_artifacts = 0
    incorrect_artifacts = 0
    no_updated = 0
    p = Process(lims, id=args.pid)
    artifacts, inf = p.analytes()

    if args.status_changelog:
        epp_logger.prepend_old_log(args.status_changelog)

    if not dest_udfs:
        dest_udfs = source_udfs
    elif len(dest_udfs) != len(source_udfs):
        logging.error(
            "source_udfs and dest_udfs lists of arguments are uneven.")
        sys.exit(-1)
    for i in range(len(source_udfs)):
        source_udf = source_udfs[i]
        dest_udf = dest_udfs[i]
        with open(args.status_changelog, 'a') as changelog_f:
            for artifact in artifacts:
                if source_udf in artifact.udf:
                    correct_artifacts = correct_artifacts + 1
                    copy_sesion = CopyField(artifact, artifact.samples[0],
                                            source_udf, dest_udf)
                    test = copy_sesion.copy_udf(changelog_f)
                    if test:
                        no_updated = no_updated + 1
                else:
                    incorrect_artifacts = incorrect_artifacts + 1
                    logging.warning(("Found artifact for sample {0} with {1} "
                                     "undefined/blank, exiting").format(
                                         artifact.samples[0].name, source_udf))

    if incorrect_artifacts == 0:
        warning = "no artifacts"
    else:
        warning = "WARNING: skipped {0} udfs(s)".format(incorrect_artifacts)
    d = {
        'ua': no_updated,
        'ca': correct_artifacts,
        'ia': incorrect_artifacts,
        'warning': warning
    }

    abstract = ("Updated {ua} udf(s), out of {ca} in total, "
                "{warning} with incorrect udf info.").format(**d)

    print(abstract,
          file=sys.stderr)  # stderr will be logged and printed in GUI
def main(lims, args, epp_logger):
    d_elts = []
    no_updated = 0
    incorrect_udfs = 0
    project_names = ''
    source_udfs = args.source_udf
    dest_udfs = args.dest_udf
    s_elt = Process(lims, id=args.pid)
    analytes, inf = s_elt.analytes()

    for analyte in analytes:
        for samp in analyte.samples:
            d_elts.append(samp.project)
    d_elts = list(set(d_elts))

    if args.status_changelog:
        epp_logger.prepend_old_log(args.status_changelog)

    if not dest_udfs:
        dest_udfs = source_udfs
    elif len(dest_udfs) != len(source_udfs):
        logging.error(
            "source_udfs and dest_udfs lists of arguments are uneven.")
        sys.exit(-1)

    for d_elt in d_elts:
        project_names = ' '.join([project_names, d_elt.name])
        for i in range(len(source_udfs)):
            source_udf = source_udfs[i]
            dest_udf = dest_udfs[i]
            with open(args.status_changelog, 'a') as changelog_f:
                if source_udf in s_elt.udf:
                    copy_sesion = CopyField(s_elt, d_elt, source_udf, dest_udf)
                    test = copy_sesion.copy_udf(changelog_f)
                    if test:
                        no_updated = no_updated + 1
                else:
                    logging.warning(
                        ("Udf: {1} in Process {0} is undefined/blank, exiting"
                         ).format(s_elt.id, source_udf))
                    incorrect_udfs = incorrect_udfs + 1

    if incorrect_udfs > 0:
        warn = "Failed to update %s udf(s) due to missing/wrong source udf info." % incorrect_udfs
    else:
        warn = ''

    d = {'up': no_updated, 'ap': len(d_elts), 'w': warn, 'pr': project_names}

    abstract = ("Updated {up} udf(s). Handeled project(s): {pr} {w}").format(
        **d)
    print(abstract, file=sys.stderr)
def main(lims, args, epp_logger):
    d_elts = []
    no_updated = 0
    incorrect_udfs = 0
    project_names = ''
    source_udfs = args.source_udf
    dest_udfs = args.dest_udf
    s_elt = Process(lims,id = args.pid)
    analytes, inf = s_elt.analytes()

    for analyte in analytes:
        for samp in analyte.samples:
            d_elts.append(samp.project)
    d_elts = list(set(d_elts))

    if args.status_changelog:
        epp_logger.prepend_old_log(args.status_changelog)

    if not dest_udfs:
        dest_udfs = source_udfs
    elif len(dest_udfs) != len(source_udfs):
        logging.error("source_udfs and dest_udfs lists of arguments are uneven.")
        sys.exit(-1)
    
    for d_elt in d_elts:
        project_names = ' '.join([project_names, d_elt.name])
        for i in range(len(source_udfs)):
            source_udf = source_udfs[i]
            dest_udf = dest_udfs[i]
            with open(args.status_changelog, 'a') as changelog_f:
                if source_udf in s_elt.udf:
                    copy_sesion = CopyField(s_elt, d_elt, source_udf, dest_udf)
                    test = copy_sesion.copy_udf(changelog_f)
                    if test:
                        no_updated = no_updated + 1
                else:
                    logging.warning(("Udf: {1} in Process {0} is undefined/blank, exiting").format(s_elt.id, source_udf))
                    incorrect_udfs = incorrect_udfs + 1

    if incorrect_udfs > 0:
        warn = "Failed to update %s udf(s) due to missing/wrong source udf info." %incorrect_udfs
    else:
        warn = ''

    d = {'up': no_updated,
         'ap': len(d_elts),
         'w' : warn,
         'pr': project_names}

    abstract = ("Updated {up} udf(s). Handeled project(s): {pr} {w}").format(**d)
    print >> sys.stderr, abstract
def main(lims, args, epp_logger):
    source_udfs = args.source_udf
    dest_udfs = args.dest_udf
    correct_artifacts = 0
    incorrect_artifacts = 0
    no_updated = 0
    p = Process(lims,id = args.pid)
    artifacts, inf = p.analytes()


    if args.status_changelog:
        epp_logger.prepend_old_log(args.status_changelog)

    if not dest_udfs:
        dest_udfs = source_udfs
    elif len(dest_udfs) != len(source_udfs):
        logging.error("source_udfs and dest_udfs lists of arguments are uneven.")
        sys.exit(-1)
    for i in range(len(source_udfs)):
        source_udf = source_udfs[i]
        dest_udf = dest_udfs[i]
        with open(args.status_changelog, 'a') as changelog_f:
            for artifact in artifacts:
                if source_udf in artifact.udf:
                    correct_artifacts = correct_artifacts +1
                    copy_sesion = CopyField(artifact, artifact.samples[0], source_udf, dest_udf)
                    test = copy_sesion.copy_udf(changelog_f)
                    if test:
                        no_updated = no_updated + 1
                else:
                    incorrect_artifacts = incorrect_artifacts + 1
                    logging.warning(("Found artifact for sample {0} with {1} "
                                   "undefined/blank, exiting").format(artifact.samples[0].name, source_udf))

    if incorrect_artifacts == 0:
        warning = "no artifacts"
    else:
        warning = "WARNING: skipped {0} udfs(s)".format(incorrect_artifacts)
    d = {'ua': no_updated,
         'ca': correct_artifacts,
         'ia': incorrect_artifacts,
         'warning' : warning}

    abstract = ("Updated {ua} udf(s), out of {ca} in total, "
                "{warning} with incorrect udf info.").format(**d)

    print >> sys.stderr, abstract # stderr will be logged and printed in GUI
Example #13
0
def set_runid_name(lims, process_id):
    """Change artifact name to run id."""
    process = Process(lims, id=process_id)
    analyte = process.analytes()[0][0]
    input_artifact = process.all_inputs()[0]

    container_name = analyte.container.name

    # Find sequencing process
    # Assume one sequence process per input artifact
    for sequence_process_type in config.sequence_process_types:
        sequence_processes = lims.get_processes(
            type=sequence_process_type, inputartifactlimsid=input_artifact.id)
        for sequence_process in sequence_processes:
            if sequence_process.analytes(
            )[0][0].container.name == container_name:
                analyte.name = sequence_process.udf['Run ID']
                analyte.put()
Example #14
0
def finish_protocol_complete(lims, process_id):
    """Finish next step after current step is finished (Dx Mark protocol complete)."""
    process = Process(lims, id=process_id)
    inputs = ''
    actions = []

    # Check all analytes
    for analyte in process.analytes()[0]:
        analyte_workflow_stages_and_statuses = analyte.workflow_stages_and_statuses
        if analyte_workflow_stages_and_statuses[-1][2] == 'Dx Mark protocol complete' and analyte_workflow_stages_and_statuses[-1][1] == 'QUEUED':
            actions.append({'action': 'complete', 'artifact': analyte})
            step_uri = analyte_workflow_stages_and_statuses[-1][0].step.uri
            inputs += '<input uri="{0}" replicates="1"/>'.format(analyte.uri)

    # Generate start step XML
    xml = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
            <tmp:step-creation xmlns:tmp="http://genologics.com/ri/step">
                <configuration uri="{0}"/>
                <inputs>
                    {1}
                </inputs>
            </tmp:step-creation>
    '''.format(step_uri, inputs)

    # Start step
    output = lims.post(
        uri=lims.get_uri('steps'),
        data=xml
    )

    # Get started step uri
    step_action_uri = output.find('actions').get('uri')
    step_actions = StepActions(lims, uri=step_action_uri)

    # Advance to next step screen
    step = step_actions.step
    step.advance()  # Next step

    # Mark everything complete
    step_actions.set_next_actions(actions)
    step_actions.put()

    # Finish step
    step.advance()
Example #15
0
def check_family(lims, process_id):
    """Check barcodes."""
    process = Process(lims, id=process_id)
    for artifact in process.analytes()[0]:
        sample = artifact.samples[0]
        barcode = artifact.reagent_labels[0]
        
        try:
            query_udf = {'Dx Familienummer': sample.udf['Dx Familienummer']}
        except KeyError:
            artifact.udf['Dx monster met BC duplicaat'] = "Barcode niet gecontroleerd."
            artifact.put()
        else:
            family_samples = lims.get_samples(udf=query_udf)
            for family_sample in family_samples:
                if family_sample.id != sample.id:
                    family_sample_artifacts = lims.get_artifacts(samplelimsid=family_sample.id, reagent_label=barcode, process_type=process.type.name)
                    if family_sample_artifacts:
                        artifact.udf['Dx monster met BC duplicaat'] = "{sample}".format(sample=family_sample.name)
                        artifact.put()
Example #16
0
def set_avg_q30(lims, process_id):
    """Calculate average % Bases >=Q30."""
    process = Process(lims, id=process_id)
    artifact = process.analytes()[0][0]

    if all([
            udf in artifact.container.udf for udf in [
                '% Bases >=Q30 R1', 'Yield PF (Gb) R1', '% Bases >=Q30 R2',
                'Yield PF (Gb) R2'
            ]
    ]):
        r1_q30 = artifact.container.udf['% Bases >=Q30 R1']
        r1_yield = artifact.container.udf['Yield PF (Gb) R1']
        r2_q30 = artifact.container.udf['% Bases >=Q30 R2']
        r2_yield = artifact.container.udf['Yield PF (Gb) R2']

        average_q30 = (r1_q30 * r1_yield + r2_q30 * r2_yield) / (r1_yield +
                                                                 r2_yield)

        artifact.udf['Dx Average % Bases >=Q30'] = average_q30
        artifact.put()
Example #17
0
def results(lims, process_id):
    """Upload magnis export to process."""
    process = Process(lims, id=process_id)
    lot_error = False

    for output_file in process.result_files():
        if output_file.name == 'Magnis export file':
            magnis_xml_file = output_file.files[0]
            magnis_data = xmltodict.parse(lims.get_file_contents(magnis_xml_file.id))

            # Save lot nunmbers and check sample input strip barcode
            for labware in magnis_data['RunInfo']['LabwareInfos']['Labware']:
                if labware['@Name'] == 'Probe Input Strip':
                    process.udf['lot # SureSelect v7'] = labware['@LotNumber']
                elif labware['@Name'] == 'Reagent Plate':
                    process.udf['lot # Magnis Sureselect XT HS reagent plate'] = labware['@LotNumber']
                elif labware['@Name'] == 'Beads/Buffers Plate':
                    process.udf['lot # Magnis SureSelect XT Beads/Buffers Plate'] = labware['@LotNumber']
                elif labware['@Name'] == 'Index Strip':
                    process.udf['lot # Dual BC strip'] = labware['@LotNumber']
                    index_strip_number = int(labware['@IndexStrip'])
                elif labware['@Name'] == 'Reagent Strip':
                    process.udf['lot # BR Oligo strip (blockers)'] = labware['@LotNumber']
                elif (
                    labware['@Name'] == 'Sample Input Strip' and
                    process.udf['Barcode sample input strip'] != labware['@BarCode']
                ):
                    lot_error = True

    # Check sample reagents and fill Lotnr check flag
    for output in process.analytes()[0]:
        label_index_number = int(output.reagent_labels[0][3:5])
        if lot_error or label_index_number != index_strip_number:
            output.udf['Lotnr check'] = False
        else:
            output.udf['Lotnr check'] = True
        output.put()
    process.put()
Example #18
0
def create_file(lims, process_id, output_file):
    """Create mege file."""
    process = Process(lims, id=process_id)
    samples = process.analytes()[0][0].samples

    output_file.write(
        'Sample\tMerge 1 Sample\tMerge 1 Sequencing Run\tMerge 2 Sample\tMerge 2 Sequencing Run\n'
    )

    for sample in samples:
        sample_merge = []
        if 'Dx Mergen' in sample.udf and sample.udf['Dx Mergen']:
            for udf in [
                    'Dx Merge 1 Samplenaam', 'Dx Merge 1 Runnaam',
                    'Dx Merge 2 Samplenaam', 'Dx Merge 2 Runnaam'
            ]:
                if udf in sample.udf:
                    sample_merge.append(sample.udf[udf])
                else:
                    sample_merge.append('')

            output_file.write('{sample}\t{merge}\n'.format(
                sample=get_sequence_name(sample),
                merge='\t'.join(sample_merge)))
Example #19
0
def create_file(lims, process_id, output_file):
    """Create ped file."""
    process = Process(lims, id=process_id)
    samples = process.analytes()[0][0].samples

    ped_families = {}

    for sample in samples:
        if 'Dx Familienummer' in sample.udf and sample.udf[
                'Dx Onderzoeksreden'] != 'Research':
            family = sample.udf['Dx Familienummer']
            sample_name = get_sequence_name(sample)
            ped_sample = {'name': sample_name}

            if family not in ped_families:
                ped_families[family] = {
                    'father': {},
                    'mother': {},
                    'children': []
                }

            if sample.udf['Dx Geslacht'].lower() == 'man':
                ped_sample['sex'] = 1
            elif sample.udf['Dx Geslacht'].lower() == 'vrouw':
                ped_sample['sex'] = 2
            elif sample.udf['Dx Geslacht'].lower() == 'onbekend':
                ped_sample['sex'] = 0

            # Determine affection
            ped_sample['affection'] = 0  # unkown
            if 'Bevestiging diagnose' in sample.udf['Dx Onderzoeksreden']:
                ped_sample['affection'] = 2  # affected
            elif 'Informativiteitstest' in sample.udf['Dx Onderzoeksreden']:
                ped_sample['affection'] = 1  # unaffected
            elif 'Partner onderzoek' in sample.udf['Dx Onderzoeksreden']:
                ped_sample['affection'] = 1  # unaffected

            # Determine family relationships
            if sample.udf['Dx Familie status'] == 'Ouder' and ped_sample[
                    'sex'] == 1:
                ped_families[family]['father'] = ped_sample
            elif sample.udf['Dx Familie status'] == 'Ouder' and ped_sample[
                    'sex'] == 2:
                ped_families[family]['mother'] = ped_sample
            else:
                ped_families[family]['children'].append(ped_sample)

    for family in ped_families:
        ped_family = ped_families[family]
        paternal_sample_name = '0'
        maternal_sample_name = '0'

        if ped_family['father']:
            paternal_sample_name = ped_family['father']['name']
            output_file.write(
                '{family}\t{name}\t{paternal_sample}\t{maternal_sample}\t{sex}\t{affection}\n'
                .format(
                    family=family,
                    name=ped_family['father']['name'],
                    paternal_sample='0',
                    maternal_sample='0',
                    sex=ped_family['father']['sex'],
                    affection=ped_family['father']['affection'],
                ))
        if ped_family['mother']:
            maternal_sample_name = ped_family['mother']['name']
            output_file.write(
                '{family}\t{name}\t{paternal_sample}\t{maternal_sample}\t{sex}\t{affection}\n'
                .format(
                    family=family,
                    name=ped_family['mother']['name'],
                    paternal_sample='0',
                    maternal_sample='0',
                    sex=ped_family['mother']['sex'],
                    affection=ped_family['mother']['affection'],
                ))
        for child_sample in ped_family['children']:
            output_file.write(
                '{family}\t{name}\t{paternal_sample}\t{maternal_sample}\t{sex}\t{affection}\n'
                .format(
                    family=family,
                    name=child_sample['name'],
                    paternal_sample=paternal_sample_name,
                    maternal_sample=maternal_sample_name,
                    sex=child_sample['sex'],
                    affection=child_sample['affection'],
                ))
Example #20
0
def samplesheet_normalise(lims, process_id, output_file):
    """Create Caliper samplesheet for normalising 96 well plate."""
    output_file.write(
        'Monsternummer\tPlate_Id_input\tWell\tPlate_Id_output\tPipetteervolume DNA (ul)\tPipetteervolume H2O (ul)\n'
    )
    process = Process(lims, id=process_id)
    process_samples = [artifact.name for artifact in process.analytes()[0]]
    parent_processes = []
    parent_process_barcode_manual = 'None'
    parent_process_barcode_hamilton = 'None'

    for p in process.parent_processes():
        if p.type.name.startswith('Dx manueel gezuiverd placement'):
            for pp in p.parent_processes():
                parent_processes.append(pp)
            parent_process_barcode_manual = p.output_containers()[0].name
        elif p.type.name.startswith('Dx Hamilton'):
            parent_processes.append(p)
            parent_process_barcode_hamilton = p.output_containers()[0].name
        elif p.type.name.startswith('Dx Zuiveren gDNA manueel'):
            parent_processes.append(p)

    if parent_process_barcode_hamilton != 'None':
        parent_process_barcode = parent_process_barcode_hamilton
    else:
        parent_process_barcode = parent_process_barcode_manual

    # Get all Qubit and Tecan Spark QC types
    qc_process_types = clarity_epp.export.utils.get_process_types(
        lims, ['Dx Qubit QC', 'Dx Tecan Spark 10M QC'])

    # Get all unique input artifact ids
    parent_processes = list(set(parent_processes))
    input_artifact_ids = []
    for p in parent_processes:
        for analyte in p.all_outputs():
            input_artifact_ids.append(analyte.id)
    input_artifact_ids = list(set(input_artifact_ids))

    # Get unique QC processes for input artifacts
    qc_processes = list(
        set(
            lims.get_processes(type=qc_process_types,
                               inputartifactlimsid=input_artifact_ids)))

    samples_measurements_qubit = {}
    sample_concentration = {}
    samples_measurements_tecan = {}
    filled_wells = []
    order = [
        'A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1', 'A2', 'B2', 'C2', 'D2',
        'E2', 'F2', 'G2', 'H2', 'A3', 'B3', 'C3', 'D3', 'E3', 'F3', 'G3', 'H3',
        'A4', 'B4', 'C4', 'D4', 'E4', 'F4', 'G4', 'H4', 'A5', 'B5', 'C5', 'D5',
        'E5', 'F5', 'G5', 'H5', 'A6', 'B6', 'C6', 'D6', 'E6', 'F6', 'G6', 'H6',
        'A7', 'B7', 'C7', 'D7', 'E7', 'F7', 'G7', 'H7', 'A8', 'B8', 'C8', 'D8',
        'E8', 'F8', 'G8', 'H8', 'A9', 'B9', 'C9', 'D9', 'E9', 'F9', 'G9', 'H9',
        'A10', 'B10', 'C10', 'D10', 'E10', 'F10', 'G10', 'H10', 'A11', 'B11',
        'C11', 'D11', 'E11', 'F11', 'G11', 'H11', 'A12', 'B12', 'C12', 'D12',
        'E12', 'F12', 'G12', 'H12'
    ]
    order = dict(zip(order, range(len(order))))
    last_filled_well = 0
    monsternummer = {}
    volume_DNA = {}
    volume_H2O = {}
    conc_measured = {}
    output_ng = float(process.udf['Output genormaliseerd gDNA'])
    conc = {}
    output_ul = process.udf['Eindvolume (ul) genormaliseerd gDNA']
    output_plate_barcode = process.output_containers()[0].name

    for qc_process in qc_processes:
        if 'Dx Qubit QC' in qc_process.type.name:
            for artifact in qc_process.all_outputs():
                sample = artifact.samples[0].name
                if sample in process_samples and not any(
                        keyword in artifact.name
                        for keyword in ['Tecan', 'check', 'Label']):
                    if 'Dx Conc. goedgekeurde meting (ng/ul)' in artifact.udf:
                        measurement = artifact.udf[
                            'Dx Conc. goedgekeurde meting (ng/ul)']
                        if sample not in samples_measurements_qubit:
                            samples_measurements_qubit[sample] = []
                        samples_measurements_qubit[sample].append(measurement)
                    elif sample not in sample_concentration:
                        sample_concentration[sample] = 'geen'

        elif 'Dx Tecan Spark 10M QC' in qc_process.type.name:
            for artifact in qc_process.all_outputs():
                sample = artifact.samples[0].name
                if sample in process_samples and not any(
                        keyword in artifact.name
                        for keyword in ['Tecan', 'check', 'Label']):
                    if 'Dx Conc. goedgekeurde meting (ng/ul)' in artifact.udf:
                        measurement = artifact.udf[
                            'Dx Conc. goedgekeurde meting (ng/ul)']
                        if sample not in samples_measurements_tecan:
                            samples_measurements_tecan[sample] = []
                        samples_measurements_tecan[sample].append(measurement)
                    elif sample not in sample_concentration:
                        sample_concentration[sample] = 'geen'

    for qc_process in qc_processes:
        for artifact in qc_process.all_outputs():
            sample = artifact.samples[0].name
            if not any(keyword in artifact.name
                       for keyword in ['Tecan', 'check', 'Label']):
                if 'Dx Tecan Spark 10M QC' in qc_process.type.name and 'Dx Conc. goedgekeurde meting (ng/ul)' in artifact.udf:
                    machine = 'Tecan'
                elif 'Dx Qubit QC' in qc_process.type.name and 'Dx Conc. goedgekeurde meting (ng/ul)' in artifact.udf:
                    machine = 'Qubit'

                if sample not in sample_concentration or machine == 'Qubit':
                    if sample in samples_measurements_tecan or sample in samples_measurements_qubit:
                        if machine == 'Tecan':
                            sample_measurements = samples_measurements_tecan[
                                sample]
                        elif machine == 'Qubit':
                            sample_measurements = samples_measurements_qubit[
                                sample]
                        sample_measurements_average = sum(
                            sample_measurements) / float(
                                len(sample_measurements))
                        sample_concentration[
                            sample] = sample_measurements_average

    for placement, artifact in process.output_containers()[0].placements.items(
    ):
        placement = ''.join(placement.split(':'))
        filled_wells.append(placement)
        if order[placement] > last_filled_well:
            last_filled_well = order[placement]

    for x in range(0, last_filled_well):
        for well, number in order.items():
            if number == x:
                placement = well
        monsternummer[placement] = 'Leeg'
        volume_DNA[placement] = 0
        volume_H2O[placement] = 0

    for placement, artifact in process.output_containers()[0].placements.items(
    ):
        sample = artifact.samples[0].name
        if sample in process_samples:
            placement = ''.join(placement.split(':'))
            monsternummer[placement] = sample
            conc_measured[placement] = sample_concentration[sample]
            if conc_measured[placement] != 'geen':
                if output_ng / conc_measured[placement] > 100:
                    conc[placement] = output_ng / 100
                else:
                    conc[placement] = conc_measured[placement]
                volume_DNA[placement] = int(round(output_ng / conc[placement]))
                volume_H2O[placement] = output_ul - int(
                    round(output_ng / conc[placement]))

    for well in clarity_epp.export.utils.sort_96_well_plate(
            monsternummer.keys()):
        output_file.write(
            '{monsternummer}\t{plate_id_input}\t{position}\t{plate_id_output}\t{volume_DNA}\t{volume_H2O}\n'
            .format(monsternummer=monsternummer[well],
                    plate_id_input=parent_process_barcode,
                    position=well,
                    plate_id_output=output_plate_barcode,
                    volume_DNA=volume_DNA[well],
                    volume_H2O=volume_H2O[well]))