Exemplo n.º 1
0
def samplesheet_capture(lims, process_id, output_file):
    """Create manual pipetting samplesheet for capture protocol."""
    process = Process(lims, id=process_id)
    sample_count = len(process.analytes()[0])

    # All input paramters
    data = [
        ['Ampligase Buffer 10X', process.udf['Ampligase Buffer 10X']],
        ['MIP pool werkoplossing', process.udf['MIP pool werkoplossing']],
        ['*dNTP 0.25mM', process.udf['*dNTP 0.25mM']],
        ['Hemo Klentaq 10U/ul', process.udf['Hemo Klentaq 10U/ul']],
        ['Ampligase 100U/ul', process.udf['Ampligase 100U/ul']],
        ['Water', process.udf['Water']],
    ]

    # Caculate for sample count
    for i, item in enumerate(data):
        data[i].append(sample_count * item[1] * 1.1)

    # Calculate final volume
    data.append([
        'ul MM in elke well',
        sum([item[1] for item in data]),
        sum([item[2] for item in data]),
    ])

    # Write samplesheet
    output_file.write('Mastermix\t1\t{0}\n'.format(sample_count))
    for item in data:
        output_file.write('{0}\t{1:.2f}\t{2:.2f}\n'.format(
            item[0], item[1], item[2]))
Exemplo n.º 2
0
def sammplesheet_exonuclease(lims, process_id, output_file):
    """Create manual pipetting samplesheet for Exonuclease protocol"""
    process = Process(lims, id=process_id)
    sample_count = len(process.analytes()[0])

    # All input paramters
    data = [
        ['EXO I', process.udf['EXO I']],
        ['EXO III', process.udf['EXO III']],
        ['Ampligase buffer 10X', process.udf['Ampligase buffer 10X']],
        ['H2O', process.udf['H2O']],
    ]

    # Caculate for sample count
    for i, item in enumerate(data):
        data[i].append(sample_count * item[1] * 1.25)

    # Calculate total
    data.append([
        'TOTAL (incl. 25% overmaat)',
        sum([item[1] for item in data]),
        sum([item[2] for item in data]),
    ])

    # Write samplesheet
    output_file.write('\tMaster Mix (ul)\t{0}\n'.format(sample_count))
    for item in data:
        output_file.write('{0}\t{1:.2f}\t{2:.2f}\n'.format(
            item[0], item[1], item[2]))
def main(lims, args):
    currentStep = Process(lims, id=args.pid)
    driver_file_out = None
    driver = []
    ar_driver = {}
    valid_cols = set()
    for output in currentStep.all_outputs():
        if output.name == "Driver File":
            driver_file_out = output
        elif output.output_type == "ResultFile":
            location_ar = output.location[1].split(":")
            valid_cols.add(location_ar[0])
            #idx = (ord(location_ar[0])-65)*12 + int(location_ar[1])-1
            ar_driver[output.location[1].replace(":",
                                                 "")] = output.samples[0].name

    col_idx = -1
    for column in sorted(list(valid_cols)):
        col_idx += 1
        for i in xrange(1, 13):
            location = "{}{}".format(column, i)
            driver.append((col_idx * 12 + i, location,
                           ar_driver.get(location,
                                         "ladder" if i == 12 else "")))

    with open("frag_an_driver.csv", "w") as f:
        for line in driver:
            f.write("{0},{1},{2}\n".format(line[0], line[1], line[2]))

    lims.upload_new_file(driver_file_out, "frag_an_driver.csv")
Exemplo n.º 4
0
def copy_layout(lims, process_id):
    """Copy placement layout from previous steps."""
    process = Process(lims, id=process_id)
    used_placements = []
    # Get parent container layout
    parent_container = None
    for parent_process in process.parent_processes():
        if parent_process:
            for container in parent_process.output_containers():
                if container.type != Containertype(lims, id='2'):  # skip tubes
                    parent_container = container

    if parent_container:
        parent_placements = {}
        for placement in parent_container.placements:
            sample = parent_container.placements[placement].samples[0].name
            parent_placements[sample] = placement

        # Create new container and copy layout
        new_container = Container.create(lims, type=parent_container.type)
        placement_list = []
        for artifact in process.analytes()[0]:
            sample_name = artifact.samples[0].name
            if sample_name in parent_placements:
                placement = parent_placements[sample_name]
                if placement not in used_placements:
                    placement_list.append([artifact, (new_container, placement)])
                    used_placements.append(placement)

        process.step.placements.set_placement_list(placement_list)
        process.step.placements.post()
Exemplo n.º 5
0
def main(lims, args, epp_logger):
    pro = Process(lims, id=args.pid)
    source_udf = 'Reference genome'
    destination_udf = 'Reference Genome'

    artifacts = pro.all_inputs(unique=True)
    projects = all_projects_for_artifacts(artifacts)

    correct_projects, incorrect_udf = check_udf_is_defined(
        projects, source_udf)
    correct_samples = filter_samples(artifacts, correct_projects)

    session = Session(pro, source_udf, destination_udf)
    session.copy_main(correct_samples)

    if len(incorrect_udf) == 0:
        warning = "no projects"
    else:
        warning = "WARNING: skipped {0} project(s)".format(len(incorrect_udf))

    d = {'cs': len(correct_samples), 'warning': warning}

    abstract = (
        "Updated {cs} sample(s), {warning} with incorrect udf info.").format(
            **d)

    print >> sys.stderr, abstract  # stderr will be logged and printed in GUI
Exemplo n.º 6
0
def main(lims,args,epp_logger):
    pro = Process(lims,id = args.pid)
    source_udf = 'Reference genome'
    destination_udf = 'Reference Genome'

    artifacts = pro.all_inputs(unique=True)
    projects = all_projects_for_artifacts(artifacts)

    correct_projects, incorrect_udf = check_udf_is_defined(projects, source_udf)
    correct_samples = filter_samples(artifacts, correct_projects)

    session = Session(pro, source_udf, destination_udf)
    session.copy_main(correct_samples)

    if len(incorrect_udf) == 0:
        warning = "no projects"
    else:
        warning = "WARNING: skipped {0} project(s)".format(len(incorrect_udf))

    d = {'cs': len(correct_samples),
         'warning' : warning
    }

    abstract = ("Updated {cs} sample(s), {warning} with incorrect udf info.").format(**d)

    print >> sys.stderr, abstract # stderr will be logged and printed in GUI
def main(lims, pid, epp_logger):
    process = Process(lims,id = pid)
    target_files = dict((r.samples[0].name, r) for r in process.result_files())
    file_handler = ReadResultFiles(process)
    QiT = QuantitConc(process, file_handler)
    QiT.fit_model()
    QiT.prepare_result_files_dict()
    if QiT.model and 'Linearity of standards' in QiT.udfs.keys():
        R2 = QiT.model[0]
        if R2 >= QiT.udfs['Linearity of standards']:
            QiT.abstract.insert(0,"R2 = {0}. Standards OK.".format(R2))
            if QiT.result_files:
                for sample, target_file in target_files.items():
                    rel_fluor_int = QiT.get_and_set_fluor_int(target_file)
                    QiT.calc_and_set_conc(target_file, rel_fluor_int)
                QiT.abstract.append("Concentrations uploaded for {0} "
                                                "samples.".format(QiT.no_samps))
            else:
                QiT.abstract.append("Upload input file(s) for samples.")
        else:
            QiT.abstract.insert(0, "R2 = {0}. Problem with standards! Redo "
                                                    "measurement!".format(R2))
    else:
        QiT.missing_udfs.append('Linearity of standards')
    if QiT.missing_samps:
        QiT.abstract.append("The following samples are missing in Quant-iT "
            "result File 1 or 2: {0}.".format(', '.join(QiT.missing_samps)))
    if QiT.missing_udfs:
        QiT.abstract.append("Are all of the following udfs set? : {0}".format(
                                                   ', '.join(QiT.missing_udfs)))
    print >> sys.stderr, ' '.join(QiT.abstract)
Exemplo n.º 8
0
def samplesheet_pool_samples(lims, process_id, output_file):
    """Create manual pipetting samplesheet for pooling samples."""
    process = Process(lims, id=process_id)

    # print header
    output_file.write('Sample\tContainer\tWell\tPool\n')

    # Get all input artifact and store per container
    input_containers = {}
    for input_artifact in process.all_inputs(resolve=True):
        container = input_artifact.location[0].name
        well = ''.join(input_artifact.location[1].split(':'))

        if container not in input_containers:
            input_containers[container] = {}

        input_containers[container][well] = input_artifact

    # print pool scheme per input artifact
    # sort on container and well
    for input_container in sorted(input_containers.keys()):
        input_artifacts = input_containers[input_container]
        for well in clarity_epp.export.utils.sort_96_well_plate(
                input_artifacts.keys()):
            output_file.write('{sample}\t{container}\t{well}\t{pool}\n'.format(
                sample=input_artifacts[well].name,
                container=input_artifacts[well].location[0].name,
                well=well,
                pool=process.outputs_per_input(input_artifacts[well].id,
                                               Analyte=True)[0].name))
Exemplo n.º 9
0
def sequencing_run(lims, email_settings, process_id):
    process = Process(lims, id=process_id)
    artifact = process.all_inputs()[0]

    subject = "LIMS QC Controle - {0}".format(artifact.name)

    message = "Sequencing Run: {0}\n".format(artifact.name)
    message += "Technician: {0}\n".format(process.technician.name)
    message += "LIMS Next Action: {0}\n\n".format(
        process.step.actions.next_actions[0]['action'])

    message += "UDF - Conversie rapport OK?: {0}\n".format(
        process.udf['Conversie rapport OK?'])
    if 'Fouten registratie (uitleg)' in process.udf:
        message += "UDF - Fouten registratie (uitleg): {0}\n".format(
            process.udf['Fouten registratie (uitleg)'])
    if 'Fouten registratie (oorzaak)' in process.udf:
        message += "UDF - Fouten registratie (oorzaak): {0}\n".format(
            process.udf['Fouten registratie (uitleg)'])

    if process.step.actions.escalation:
        message += "\nManager Review LIMS:\n"
        message += "{0}: {1}\n".format(
            process.step.actions.escalation['author'].name,
            process.step.actions.escalation['request'])
        message += "{0}: {1}\n".format(
            process.step.actions.escalation['reviewer'].name,
            process.step.actions.escalation['answer'])

    send_email(email_settings['server'], email_settings['from'],
               email_settings['to_sequencing_run_complete'], subject, message)
Exemplo n.º 10
0
def main(args, lims, epp_logger):
    p = Process(lims, id=args.pid)
    lines = []
    cs = []
    if args.container_id:
        cs = p.output_containers()
        for c in cs:
            logging.info('Constructing barcode for container {0}.'.format(
                c.id))
            lines += makeContainerBarcode(c.id, copies=1)
    if args.container_name:
        cs = p.output_containers()
        for c in cs:
            logging.info('Constructing name label for container {0}.'.format(
                c.id))
            lines += makeContainerNameBarcode(c.name, copies=1)
    if args.operator_and_date:
        op = p.technician.name
        date = str(datetime.date.today())
        if cs:  # list of containers
            copies = len(cs)
        else:
            copies = args.copies
        lines += makeOperatorAndDateBarcode(op, date, copies=copies)
    if args.process_name:
        pn = p.type.name
        if cs:  # list of containers
            copies = len(cs)
        else:
            copies = args.copies
        lines += makeProcessNameBarcode(pn, copies=copies)
    if not (args.container_id or args.container_name or args.operator_and_date
            or args.process_name):
        logging.info('No recognized label type given, exiting.')
        sys.exit(-1)
    if not args.use_printer:
        logging.info('Writing to stdout.')
        epp_logger.saved_stdout.write('\n'.join(lines) + '\n')
    elif lines:  # Avoid printing empty files
        lp_args = ["lp"]
        if args.hostname:
            #remove that when all the calls to this script have been updated
            if args.hostname == 'homer.scilifelab.se:631':
                args.hostname = 'homer2.scilifelab.se:631'
            lp_args += ["-h", args.hostname]
        if args.destination:
            lp_args += ["-d", args.destination]
        lp_args.append("-")  # lp accepts stdin if '-' is given as filename
        logging.info('Ready to call lp for printing.')
        sp = subprocess.Popen(lp_args,
                              stdin=subprocess.PIPE,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE)
        sp.stdin.write(str('\n'.join(lines)))
        logging.info('lp command is called for printing.')
        stdout, stderr = sp.communicate()  # Will wait for sp to finish
        logging.info('lp stdout: {0}'.format(stdout))
        logging.info('lp stderr: {0}'.format(stderr))
        logging.info('lp command finished')
        sp.stdin.close()
Exemplo n.º 11
0
def main(lims, args, epp_logger):
    p = Process(lims, id=args.pid)
    udf_check = "Conc. Units"
    value_check = "ng/ul"
    concentration_udf = "Concentration"
    size_udf = "Size (bp)"

    if args.aggregate:
        artifacts = p.all_inputs(unique=True)
    else:
        all_artifacts = p.all_outputs(unique=True)
        artifacts = filter(lambda a: a.output_type == "ResultFile", all_artifacts)

    correct_artifacts, no_concentration = check_udf_is_defined(artifacts, concentration_udf)
    correct_artifacts, no_size = check_udf_is_defined(correct_artifacts, size_udf)
    correct_artifacts, wrong_value = check_udf_has_value(correct_artifacts, udf_check, value_check)

    apply_calculations(lims, correct_artifacts, concentration_udf, size_udf, udf_check, epp_logger)

    d = {"ca": len(correct_artifacts), "ia": len(wrong_value) + len(no_size) + len(no_concentration)}

    abstract = (
        "Updated {ca} artifact(s), skipped {ia} artifact(s) with " "wrong and/or blank values for some udfs."
    ).format(**d)

    print >>sys.stderr, abstract  # stderr will be logged and printed in GUI
Exemplo n.º 12
0
def main(lims, args, epp_logger):
    p = Process(lims, id=args.pid)
    udf_check = 'Conc. Units'
    value_check = 'ng/ul'
    concentration_udf = 'Concentration'
    size_udf = 'Size (bp)'

    if args.aggregate:
        artifacts = p.all_inputs(unique=True)
    else:
        all_artifacts = p.all_outputs(unique=True)
        artifacts = filter(lambda a: a.output_type == "ResultFile",
                           all_artifacts)

    correct_artifacts, no_concentration = check_udf_is_defined(
        artifacts, concentration_udf)
    correct_artifacts, no_size = check_udf_is_defined(correct_artifacts,
                                                      size_udf)
    correct_artifacts, wrong_value = check_udf_has_value(
        correct_artifacts, udf_check, value_check)

    apply_calculations(lims, correct_artifacts, concentration_udf, size_udf,
                       udf_check, epp_logger)

    d = {
        'ca': len(correct_artifacts),
        'ia': len(wrong_value) + len(no_size) + len(no_concentration)
    }

    abstract = ("Updated {ca} artifact(s), skipped {ia} artifact(s) with "
                "wrong and/or blank values for some udfs.").format(**d)

    print >> sys.stderr, abstract  # stderr will be logged and printed in GUI
Exemplo n.º 13
0
def set_sequence_name(lims, process_id):
    """Change artifact name to sequnece name."""
    process = Process(lims, id=process_id)
    for artifact in process.analytes()[0]:
        sample = artifact.samples[0]
        artifact.name = get_sequence_name(sample)
        artifact.put()
Exemplo n.º 14
0
def samplesheet_dilute_library_pool(lims, process_id, output_file):
    """Create manual pipetting samplesheet for sequencing pools."""
    output_file.write('Sample\tContainer\tWell\tul Sample\tul EB\n')
    process = Process(lims, id=process_id)

    output = []  # save pool data to list, to be able to sort on pool number.
    nM_pool = process.udf['Dx Pool verdunning (nM)']
    output_ul = process.udf['Eindvolume (ul)']

    for input in process.all_inputs():
        search_number = re.search(r'Pool #(\d+)_', input.name)
        if search_number:
            input_number = int(search_number.group(1))
        else:
            input_number = 0
        qc_artifact = input.input_artifact_list()[0]

        size = float(qc_artifact.udf['Dx Fragmentlengte (bp)'])
        concentration = float(
            qc_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])

        nM_dna = (concentration * 1000 * (1 / 660.0) * (1 / size)) * 1000
        ul_sample = (nM_pool / nM_dna) * output_ul
        ul_EB = output_ul - ul_sample

        line = '{pool_name}\t{container}\t{well}\t{ul_sample:.2f}\t{ul_EB:.2f}\n'.format(
            pool_name=input.name,
            container=input.location[0].name,
            well=input.location[1],
            ul_sample=ul_sample,
            ul_EB=ul_EB)
        output.append((input_number, line))

    for number, line in sorted(output):
        output_file.write(line)
Exemplo n.º 15
0
def main(lims, pid, epp_logger):
    process = Process(lims, id=pid)
    QiT = QuantitQC(process)
    QiT.assign_QC_flag()
    if QiT.flour_int_missing:
        QiT.abstract.append("Fluorescence intensity is missing for {0} "
                            "samples.".format(QiT.flour_int_missing))
    if QiT.missing_udfs:
        QiT.abstract.append("Could not set QC flags. Some of the following "
                            "required udfs seems to be missing: {0}.".format(
                                QiT.missing_udfs))
    else:
        QiT.abstract.append("{0} out of {1} samples failed "
                            "QC.".format(QiT.no_failed,
                                         len(process.result_files())))
    if QiT.saturated:
        QiT.abstract.append("{0} samples had saturated fluorescence "
                            "intensity.".format(QiT.saturated))
    if QiT.hig_CV_fract:
        QiT.abstract.append("{0} samples had high %CV.".format(
            QiT.hig_CV_fract))
    if QiT.low_conc:
        QiT.abstract.append("{0} samples had low concentration.".format(
            QiT.low_conc))
    if QiT.conc_missing:
        QiT.abstract.append("Concentration is missing for {0} "
                            "sample(s).".format(QiT.conc_missing))
    QiT.abstract = list(set(QiT.abstract))
    print >> sys.stderr, ' '.join(QiT.abstract)
Exemplo n.º 16
0
def sammplesheet_pcr_exonuclease(lims, process_id, output_file):
    """Create manual pipetting samplesheet for PCR after Exonuclease protocol"""
    process = Process(lims, id=process_id)
    sample_count = len(process.analytes()[0])

    # All input paramters
    data = [
        ['2X iProof', process.udf['2X iProof']],
        [
            'Illumina forward primer(100uM) MIP_OLD_BB_FOR',
            process.udf['Illumina forward primer(100uM) MIP_OLD_BB_FOR']
        ],
        ['H2O', process.udf['H2O']],
    ]

    # Caculate for sample count
    for i, item in enumerate(data):
        data[i].append(sample_count * item[1] * 1.1)

    # Calculate total
    data.append([
        'TOTAL (incl. 10% overmaat)',
        sum([item[1] for item in data]) * 1.1,
        sum([item[2] for item in data]),
    ])

    # Write samplesheet
    output_file.write('\tMaster Mix (ul)\t{0}\n'.format(sample_count))
    for item in data:
        output_file.write('{0}\t{1:.2f}\t{2:.2f}\n'.format(
            item[0], item[1], item[2]))
Exemplo n.º 17
0
def main(lims,args,epp_logger):
    p = Process(lims,id = args.pid)
    udf_factor1 = 'Concentration (ng/ul)'
    result_udf = 'Concentration nM'
    udf_factor2 = 'Size (bp)'

    if args.aggregate:
        artifacts = p.all_inputs(unique=True)
    else:
        all_artifacts = p.all_outputs(unique=True)
        artifacts = filter(lambda a: a.output_type == "Analyte", all_artifacts)

#    print rrtifacts
    correct_artifacts, wrong_factor1 = check_udf_is_defined(artifacts, udf_factor1)
    correct_artifacts, wrong_factor2 = check_udf_is_defined(correct_artifacts, udf_factor2)

    f = open(args.res, "a")

    if correct_artifacts:
        apply_calculations(lims, correct_artifacts, udf_factor1,
                           udf_factor2, result_udf, epp_logger, f)
    
    f.close()


    d = {'ca': len(correct_artifacts),
         'ia': len(wrong_factor1)+ len(wrong_factor2) }

    abstract = ("Updated {ca} artifact(s), skipped {ia} artifact(s) with "
                "wrong and/or blank values for some udfs.").format(**d)

    print >> sys.stderr, abstract # stderr will be logged and printed in GUI
Exemplo n.º 18
0
def results(lims, process_id):
    """Upload bioanalyzer results to artifacts."""
    process = Process(lims, id=process_id)
    sample_measurements = {}

    # Parse File
    for output in process.all_outputs(unique=True):
        if output.name == 'Bioanalyzer Output':
            bioanalyzer_result_file = output.files[0]

            for line in lims.get_file_contents(
                    bioanalyzer_result_file.id).split('\n'):
                if line.startswith('Sample Name'):
                    sample = line.rstrip().split(',')[1]
                elif line.startswith('Region 1'):
                    line = re.sub(
                        r'"([0-9]+),([0-9\.]+)"', r'\1\2', line
                    )  # Fix remove thousands seperator (,) and quotes ("")
                    size = line.rstrip().split(',')[5]
                    sample_measurements[sample] = int(size)

    # Set UDF
    for artifact in process.all_outputs():
        if artifact.name in sample_measurements:
            artifact.udf['Dx Fragmentlengte (bp)'] = sample_measurements[
                artifact.name]
            artifact.put()
Exemplo n.º 19
0
def main(lims, args, epp_logger):
    p = Process(lims, id=args.pid)
    udf_check = "Conc. Units"
    value_check = "ng/ul"
    udf_factor1 = "Concentration"
    udf_factor2 = "Volume (ul)"
    result_udf = "Amount (ng)"

    if args.aggregate:
        artifacts = p.all_inputs(unique=True)
    else:
        all_artifacts = p.all_outputs(unique=True)
        artifacts = filter(lambda a: a.output_type == "ResultFile", all_artifacts)

    correct_artifacts, wrong_factor1 = check_udf_is_defined(artifacts, udf_factor1)
    correct_artifacts, wrong_factor2 = check_udf_is_defined(correct_artifacts, udf_factor2)

    correct_artifacts, wrong_value = check_udf_has_value(correct_artifacts, udf_check, value_check)

    if correct_artifacts:
        apply_calculations(lims, correct_artifacts, udf_factor1, "*", udf_factor2, result_udf, epp_logger, p)

    d = {"ca": len(correct_artifacts), "ia": len(wrong_factor1) + len(wrong_factor2) + len(wrong_value)}

    abstract = (
        "Updated {ca} artifact(s), skipped {ia} artifact(s) with " "wrong and/or blank values for some udfs."
    ).format(**d)

    print >>sys.stderr, abstract  # stderr will be logged and printed in GUI
Exemplo n.º 20
0
def main(lims, pid, epp_logger):
    process = Process(lims,id = pid)
    QiT = QuantitQC(process)
    QiT.assign_QC_flag()
    if QiT.flour_int_missing:
        QiT.abstract.append("Fluorescence intensity is missing for {0} "
                                       "samples.".format(QiT.flour_int_missing))
    if QiT.missing_udfs:
        QiT.abstract.append("Could not set QC flags. Some of the following "
             "required udfs seems to be missing: {0}.".format(QiT.missing_udfs))
    else:
        QiT.abstract.append("{0} out of {1} samples failed "
                       "QC.".format(QiT.no_failed, len(process.result_files())))
    if QiT.saturated:
        QiT.abstract.append("{0} samples had saturated fluorescence "
                                             "intensity.".format(QiT.saturated))
    if QiT.hig_CV_fract:
        QiT.abstract.append("{0} samples had high %CV.".format(QiT.hig_CV_fract))
    if QiT.low_conc:
        QiT.abstract.append("{0} samples had low concentration.".format(
                                                                  QiT.low_conc))
    if QiT.conc_missing:
        QiT.abstract.append("Concentration is missing for {0} "
                                     "sample(s).".format(QiT.conc_missing))
    QiT.abstract = list(set(QiT.abstract))
    print >> sys.stderr, ' '.join(QiT.abstract)
Exemplo n.º 21
0
def main(lims, pid, epp_logger):
    process = Process(lims, id=pid)
    target_files = dict((r.samples[0].name, r) for r in process.result_files())
    file_handler = ReadResultFiles(process)
    QiT = QuantitConc(process, file_handler)
    QiT.fit_model()
    QiT.prepare_result_files_dict()
    if QiT.model and 'Linearity of standards' in QiT.udfs.keys():
        R2 = QiT.model[0]
        if R2 >= QiT.udfs['Linearity of standards']:
            QiT.abstract.insert(0, "R2 = {0}. Standards OK.".format(R2))
            if QiT.result_files:
                for sample, target_file in target_files.items():
                    rel_fluor_int = QiT.get_and_set_fluor_int(target_file)
                    QiT.calc_and_set_conc(target_file, rel_fluor_int)
                QiT.abstract.append("Concentrations uploaded for {0} "
                                    "samples.".format(QiT.no_samps))
            else:
                QiT.abstract.append("Upload input file(s) for samples.")
        else:
            QiT.abstract.insert(
                0, "R2 = {0}. Problem with standards! Redo "
                "measurement!".format(R2))
    else:
        QiT.missing_udfs.append('Linearity of standards')
    if QiT.missing_samps:
        QiT.abstract.append("The following samples are missing in Quant-iT "
                            "result File 1 or 2: {0}.".format(', '.join(
                                QiT.missing_samps)))
    if QiT.missing_udfs:
        QiT.abstract.append("Are all of the following udfs set? : {0}".format(
            ', '.join(QiT.missing_udfs)))
    print >> sys.stderr, ' '.join(QiT.abstract)
Exemplo n.º 22
0
def results(lims, process_id):
    """Upload tapestation results to artifacts."""
    process = Process(lims, id=process_id)
    sample_size_measurements = {}
    sample_concentration_measurements = {}

    # Parse File
    for output in process.all_outputs(unique=True):
        if output.name == 'TapeStation Output':
            tapestation_result_file = output.files[0]
            for line in lims.get_file_contents(
                    tapestation_result_file.id).split('\n'):
                if line.startswith('FileName'):
                    header = line.split(',')
                    if 'Size [bp]' in header:  # Tapestation compact peak table
                        size_index = header.index('Size [bp]')
                        concentration_index = None
                    else:  # Tapestation compact region table
                        size_index = header.index('Average Size [bp]')
                        try:
                            concentration_index = header.index(
                                u'Conc. [pg/\xb5l]')  # micro sign
                            concentration_correction = 1000  # Used to transform pg/ul to ng/ul
                        except ValueError:
                            concentration_index = header.index(
                                u'Conc. [ng/\xb5l]')  # micro sign
                            concentration_correction = 1
                    sample_index = header.index('Sample Description')

                elif line:
                    data = line.split(',')
                    sample = data[sample_index]
                    if sample != 'Ladder':
                        if data[size_index]:
                            size = int(data[size_index])
                            sample_size_measurements[sample] = size
                        if concentration_index and data[concentration_index]:
                            # Correct concentration
                            concentration = float(data[concentration_index]
                                                  ) / concentration_correction
                            sample_concentration_measurements[
                                sample] = concentration

    # Set UDF
    for artifact in process.all_outputs():
        if artifact.name not in [
                'TapeStation Output', 'TapeStation Samplesheet',
                'TapeStation Sampleplots PDF'
        ]:
            sample_name = artifact.name.split('_')[0]
            if sample_name in sample_size_measurements:
                artifact.udf[
                    'Dx Fragmentlengte (bp)'] = sample_size_measurements[
                        sample_name]
            if sample_name in sample_concentration_measurements:
                artifact.udf[
                    'Dx Concentratie fluorescentie (ng/ul)'] = sample_concentration_measurements[
                        sample_name]
            artifact.put()
def main(lims, args):

    p = Process(lims, id=args.pid)
    log = []
    datamap = {}
    wsname = None
    username = "******".format(p.technician.first_name,
                                p.technician.last_name)
    user_email = p.technician.email
    for art in p.all_inputs():
        if len(art.samples) != 1:
            log.append(
                "Warning : artifact {0} has more than one sample".format(
                    art.id))
        for sample in art.samples:
            #take care of lamda DNA
            if sample.project:
                if sample.project.id not in datamap:
                    datamap[sample.project.id] = [sample.name]
                else:
                    datamap[sample.project.id].append(sample.name)

    for art in p.all_outputs():
        try:
            wsname = art.location[0].name
            break
        except:
            pass

    now = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
    for pid in datamap:
        pj = Project(lims, id=pid)
        if len(datamap[pid]) > 1:
            rnt = "{0} samples planned for {1}".format(len(datamap[pid]),
                                                       wsname)
        else:
            rnt = "{0} sample planned for {1}".format(len(datamap[pid]),
                                                      wsname)

        running_note = {
            "note": rnt,
            "user": username,
            "email": user_email,
            "category": "Workset"
        }
        write_note_to_couch(pid, now, running_note, lims.get_uri())
        log.append(
            "Updated project {0} : {1}, {2} samples in this workset".format(
                pid, pj.name, len(datamap[pid])))

    with open("EPP_Notes.log", "w") as flog:
        flog.write("\n".join(log))
    for out in p.all_outputs():
        #attach the log file
        if out.name == "RNotes Log":
            attach_file(os.path.join(os.getcwd(), "EPP_Notes.log"), out)

    sys.stderr.write("Updated {0} projects successfully".format(
        len(list(datamap.keys()))))
Exemplo n.º 24
0
def main(args,lims,epp_logger):
    p = Process(lims,id=args.pid)
    lines = []
    cs = []
    if args.container_id:
        cs = p.output_containers()
        for c in cs:
            logging.info('Constructing barcode for container {0}.'.format(c.id))
            lines += makeContainerBarcode(c.id, copies=1)
    if args.container_name:
        cs = p.output_containers()
        for c in cs:
            logging.info('Constructing name label for container {0}.'.format(c.id))
            lines += makeContainerNameBarcode(c.name,copies=1)
    if args.operator_and_date:
        op = p.technician.name
        date = str(datetime.date.today())
        if cs: # list of containers
            copies = len(cs)
        else:
            copies = args.copies
        lines += makeOperatorAndDateBarcode(op,date,copies=copies)
    if args.process_name:
        pn = p.type.name
        if cs: # list of containers
            copies = len(cs)
        else:
            copies = args.copies
        lines += makeProcessNameBarcode(pn,copies=copies)
    if not (args.container_id or args.container_name or 
            args.operator_and_date or args.process_name):
        logging.info('No recognized label type given, exiting.')
        sys.exit(-1)
    if not args.use_printer:
        logging.info('Writing to stdout.')
        epp_logger.saved_stdout.write('\n'.join(lines)+'\n')
    elif lines: # Avoid printing empty files
        lp_args = ["lp"]
        if args.hostname:
            #remove that when all the calls to this script have been updated
            if args.hostname == 'homer.scilifelab.se:631':
                args.hostname='homer2.scilifelab.se:631'
            lp_args += ["-h",args.hostname]
        if args.destination:
            lp_args += ["-d",args.destination]
        lp_args.append("-") # lp accepts stdin if '-' is given as filename
        logging.info('Ready to call lp for printing.')
        sp = subprocess.Popen(lp_args, 
                              stdin=subprocess.PIPE, 
                              stdout=subprocess.PIPE, 
                              stderr=subprocess.PIPE)
        sp.stdin.write(str('\n'.join(lines)))
        logging.info('lp command is called for printing.')
        stdout,stderr = sp.communicate() # Will wait for sp to finish
        logging.info('lp stdout: {0}'.format(stdout))
        logging.info('lp stderr: {0}'.format(stderr))
        logging.info('lp command finished')
        sp.stdin.close()
Exemplo n.º 25
0
def main(lims, args):
    process = Process(lims, id=args.pid)
    artifacts = process.all_outputs()
    updated_arts = 0
    for art in artifacts:
        updated_arts += get_buffer(art, args.process_types)

    print >> sys.stderr, 'Updated ' + str(
        updated_arts) + ' samples with volume from Buffer step.'
Exemplo n.º 26
0
def main(lims, args):
    process = Process(lims, id=args.p)
    duplicates = get_duplicate_samples(process.all_inputs())

    if duplicates:
        sys.exit('Samples: ' + ', '.join(duplicates) +
                 ' appeared more than once in this step.')
    else:
        print >> sys.stderr, 'No duplicated samples!'
Exemplo n.º 27
0
def storage_location(lims, process_id, output_file):
    """Generate storage location label file."""
    process = Process(lims, id=process_id)
    for artifact in process.analytes()[0]:
        storage_location = artifact.samples[0].udf['Dx Opslaglocatie']
        output_file.write(
            '{sample}\t{storage_location}\t{birth_date}\n'.format(
                sample=artifact.samples[0].name,
                storage_location=storage_location,
                birth_date=artifact.samples[0].udf['Dx Geboortejaar']))
Exemplo n.º 28
0
 def get(self, workset):
     self.set_header("Content-type", "application/json")
     p = Process(self.lims, id=workset)
     p.get(force=True)
     # Sorted running notes, by date
     workset_notes = json.loads(p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {}
     sorted_workset_notes = OrderedDict()
     for k, v in sorted(workset_notes.items(), key=lambda t: t[0], reverse=True):
         sorted_workset_notes[k] = v
     self.write(sorted_workset_notes)
 def __init__(self, lims, pid, inp_is_source):
     self.lims = lims
     self.source_is_input_artifact = inp_is_source
     self.process = Process(lims, id=pid)
     self.input_output_maps = self.process.input_output_maps
     self.artifacts = {}
     self.failed_arts = False
     self.passed_arts = 0
     self.all_arts = 0
     self.result_file = None
Exemplo n.º 30
0
 def get(self, workset):
     self.set_header("Content-type", "application/json")
     p = Process(self.lims, id=workset)
     p.get(force=True)
     # Sorted running notes, by date
     workset_notes = json.loads(p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {}
     sorted_workset_notes = OrderedDict()
     for k, v in sorted(workset_notes.iteritems(), key=lambda t: t[0], reverse=True):
         sorted_workset_notes[k] = v
     self.write(sorted_workset_notes)
Exemplo n.º 31
0
def main(args):
    log = []
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    process = Process(lims, id=args.pid)
    for io in process.input_output_maps:
        if io[1]['output-generation-type'] != 'PerInput':
            continue
        try:
            starting_amount = obtain_amount(io[0]['uri'])
        except Exception as e:
            log.append(str(e))
            starting_amount = 0

        log.append("Starting amount of {} : {} ng".format(
            io[0]['uri'].samples[0].name, starting_amount))
        current_amount = starting_amount
        #preps
        preps = lims.get_processes(
            inputartifactlimsid=io[0]['uri'].id,
            type=["Setup Workset/Plate", "Amount confirmation QC"])
        for pro in preps:
            if pro.id == args.pid:
                continue  # skip the current step
            for prepio in pro.input_output_maps:
                if prepio[1]['output-generation-type'] == 'PerInput' and prepio[
                        0]['uri'].id == io[0]['uri'].id:
                    if "Amount taken (ng)" in prepio[1][
                            'uri'].udf:  #should always be true
                        prep_amount = prepio[1]['uri'].udf["Amount taken (ng)"]
                        log.append(
                            "Removing {} ng for prep {} for sample {}".format(
                                prep_amount, pro.id,
                                io[0]['uri'].samples[0].name))
                        current_amount = current_amount - prep_amount
                    else:
                        log.append(
                            "No Amount Taken found for prep {} of sample {}".
                            format(pro.id, io[0]['uri'].samples[0].name))

        if current_amount < 0:
            log.append(
                "Estimated amount for sample {} is {}, correcting to zero".
                format(io[0]['uri'].samples[0].name, current_amount))
            current_amount = 0

        update_output_values(io[0]['uri'], io[1]['uri'], current_amount)

        with open("amount_check_log.txt", "w") as f:
            f.write("\n".join(log))

        for out in process.all_outputs():
            if out.name == "QC Assignment Log File":
                for f in out.files:
                    lims.request_session.delete(f.uri)
                lims.upload_new_file(out, "amount_check_log.txt")
Exemplo n.º 32
0
def unpooling(lims, process_id):
    """Unpool samples after sequencing."""
    process = Process(lims, id=process_id)

    pool_artifact = process.all_inputs()[0]
    pool_artifact_parent_process = pool_artifact.parent_process
    pool_artifact_demux = lims.get(pool_artifact.uri + '/demux')

    run_id = pool_artifact.name  # Assume run id is set as pool name using placement/artifact/set_runid_name
    sample_artifacts = []  # sample artifacts before pooling
    sample_projects = {}

    for artifact in pool_artifact_parent_process.result_files():
        if (artifact.name == 'SampleSheet csv'
                or artifact.name == 'Sample Sheet') and artifact.files:
            file_id = artifact.files[0].id
            sample_sheet = lims.get_file_contents(id=file_id)
            project_index = None
            sample_index = None
            for line in sample_sheet.split('\n'):
                data = line.rstrip().split(',')

                if 'Sample_Project' in data and 'Sample_ID' in data:
                    project_index = data.index('Sample_Project')
                    sample_index = data.index('Sample_ID')
                elif project_index and len(data) >= project_index:
                    sample_projects[data[sample_index]] = data[project_index]

    for node in pool_artifact_demux.getiterator('artifact'):
        if node.find('samples'):
            if len(node.find('samples').findall('sample')) == 1:
                sample_artifact = Artifact(lims, uri=node.attrib['uri'])
                sample = sample_artifact.samples[0]  # 1 sample per artifact.

                # Get sample sequencing run and project from samplesheet
                sample_artifact.udf['Dx Sequencing Run ID'] = run_id
                if 'Sample Type' in sample.udf and 'library' in sample.udf[
                        'Sample Type']:  # Use sample.name for external (clarity_portal) samples
                    sample_artifact.udf[
                        'Dx Sequencing Run Project'] = sample_projects[
                            sample.name]
                else:  # Use sample_artifact.name for Dx samples (upload via Helix)
                    sample_artifact.udf[
                        'Dx Sequencing Run Project'] = sample_projects[
                            sample_artifact.name]
                sample_artifact.put()

                if sample_artifact.samples[
                        0].project and sample_artifact.samples[0].project.udf[
                            'Application'] == 'DX':  # Only move DX production samples to post sequencing workflow
                    sample_artifacts.append(sample_artifact)

    lims.route_artifacts(sample_artifacts,
                         workflow_uri=Workflow(
                             lims, id=config.post_sequencing_workflow).uri)
Exemplo n.º 33
0
def main(lims, pid, epp_logger):
    process = Process(lims,id = pid)
    sample_names = map(lambda a: a.name, process.analytes()[0])
    target_files = process.result_files()
    file_handler = ReadResultFiles(process)
    files = file_handler.shared_files['Qubit Result File']
    qubit_result_file = file_handler.format_file(files, 
                                                 name = 'Qubit Result File',
                                                 first_header = 'Sample',
                                                 find_keys = sample_names)
    missing_samples = 0
    low_conc = 0
    bad_formated = 0
    abstract = []
    udfs = dict(process.udf.items())
    if udfs.has_key("Minimum required concentration (ng/ul)"):
        min_conc = udfs["Minimum required concentration (ng/ul)"]
    else:
        min_conc = None
        abstract.append("Set 'Minimum required concentration (ng/ul)' to get qc-flaggs based on this treshold!")
    for target_file in target_files:
        sample = target_file.samples[0].name
        if qubit_result_file.has_key(sample):
            sample_mesurements = qubit_result_file[sample]
            if "Sample Concentration" in sample_mesurements.keys():
                conc, unit = sample_mesurements["Sample Concentration"]
                if conc == 'Out Of Range':
                    target_file.qc_flag = "FAILED"
                elif conc.replace('.','').isdigit():
                    conc = float(conc)
                    if unit == 'ng/mL':
                        conc = np.true_divide(conc, 1000)
                    if min_conc:
                        if conc < min_conc:
                            target_file.qc_flag = "FAILED"
                            low_conc +=1
                        else:
                            target_file.qc_flag = "PASSED"
                    target_file.udf['Concentration'] = conc
                    target_file.udf['Conc. Units'] = 'ng/ul'
                else:
                    bad_formated += 1
                set_field(target_file)
        else:
            missing_samples += 1

    if low_conc:
        abstract.append('{0}/{1} samples have low concentration.'.format(low_conc, len(target_files)))
    if missing_samples:
        abstract.append('{0}/{1} samples are missing in Qubit Result File.'.format(missing_samples, len(target_files)))
    if bad_formated:
        abstract.append('There are {0} badly formated samples in Qubit Result File. Please fix these to get proper results.'.format(bad_formated))

    print >> sys.stderr, ' '.join(abstract)
Exemplo n.º 34
0
def samplesheet_normalization(lims, process_id, output_file):
    """Create manual pipetting samplesheet for normalizing (MIP) samples."""
    output_file.write(
        'Sample\tConcentration (ng/ul)\tVolume sample (ul)\tVolume water (ul)\tOutput (ng)\tIndampen\n'
    )
    process = Process(lims, id=process_id)

    # Find all QC process types
    qc_process_types = clarity_epp.export.utils.get_process_types(
        lims, ['Dx Qubit QC', 'Dx Tecan Spark 10M QC'])

    for input_artifact in process.all_inputs(resolve=True):
        artifact = process.outputs_per_input(
            input_artifact.id,
            Analyte=True)[0]  # assume one artifact per input
        sample = input_artifact.samples[
            0]  # asume one sample per input artifact

        # Find last qc process for artifact
        qc_process = sorted(
            lims.get_processes(type=qc_process_types,
                               inputartifactlimsid=input_artifact.id),
            key=lambda process: int(process.id.split('-')[-1]))[-1]

        # Find concentration measurement
        for qc_artifact in qc_process.outputs_per_input(input_artifact.id):
            if qc_artifact.name.split(' ')[0] == artifact.name:
                concentration = float(
                    qc_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])

        final_volume = float(artifact.udf['Dx Eindvolume (ul)'])
        input_ng = float(artifact.udf['Dx Input (ng)'])
        if 'Dx pipetteervolume (ul)' in artifact.udf:
            input_ng = concentration * float(
                artifact.udf['Dx pipetteervolume (ul)'])
        sample_volume = input_ng / concentration
        water_volume = final_volume - sample_volume
        evaporate = 'N'

        if sample_volume < 0.5:
            sample_volume = 0.5
            water_volume = final_volume - sample_volume
        elif sample_volume > final_volume:
            evaporate = 'J'
            water_volume = 0

        output_file.write(
            '{sample}\t{concentration:.1f}\t{sample_volume:.1f}\t{water_volume:.1f}\t{output:.1f}\t{evaporate}\n'
            .format(sample=sample.name,
                    concentration=concentration,
                    sample_volume=sample_volume,
                    water_volume=water_volume,
                    output=input_ng,
                    evaporate=evaporate))
Exemplo n.º 35
0
def old_main(lims, pid, epp_logger):
    process = Process(lims,id = pid)
    sample_names = map(lambda a: a.name, process.analytes()[0])
    target_files = process.result_files()
    file_handler = ReadResultFiles(process)
    files = file_handler.shared_files['Qubit Result File']
    qubit_result_file = file_handler.format_file(files, 
                                                 name = 'Qubit Result File',
                                                 first_header = ['Test','Sample'],
                                                 find_keys = sample_names)
    missing_samples = 0
    low_conc = 0
    bad_formated = 0
    abstract = []
    udfs = dict(process.udf.items())
    if udfs.has_key("Minimum required concentration (ng/ul)"):
        min_conc = udfs["Minimum required concentration (ng/ul)"]
    else:
        min_conc = None
        abstract.append("Set 'Minimum required concentration (ng/ul)' to get qc-flaggs based on this treshold!")
    for target_file in target_files:
        sample = target_file.samples[0].name
        if qubit_result_file.has_key(sample):
            sample_mesurements = qubit_result_file[sample]
            if "Sample Concentration" in sample_mesurements.keys():
                conc, unit = sample_mesurements["Sample Concentration"]
                if conc == 'Out Of Range':
                    target_file.qc_flag = "FAILED"
                elif conc.replace('.','').isdigit():
                    conc = float(conc)
                    if unit == 'ng/mL':
                        conc = np.true_divide(conc, 1000)
                    if min_conc:
                        if conc < min_conc:
                            target_file.qc_flag = "FAILED"
                            low_conc +=1
                        else:
                            target_file.qc_flag = "PASSED"
                    target_file.udf['Concentration'] = conc
                    target_file.udf['Conc. Units'] = 'ng/ul'
                else:
                    bad_formated += 1
                set_field(target_file)
        else:
            missing_samples += 1

    if low_conc:
        abstract.append('{0}/{1} samples have low concentration.'.format(low_conc, len(target_files)))
    if missing_samples:
        abstract.append('{0}/{1} samples are missing in Qubit Result File.'.format(missing_samples, len(target_files)))
    if bad_formated:
        abstract.append('There are {0} badly formated samples in Qubit Result File. Please fix these to get proper results.'.format(bad_formated))

    print >> sys.stderr, ' '.join(abstract)
Exemplo n.º 36
0
def samplesheet(lims, process_id, output_file):
    """Create Tapestation samplesheet."""
    process = Process(lims, id=process_id)
    well_plate = {}

    for placement, artifact in process.output_containers(
    )[0].placements.iteritems():
        placement = ''.join(placement.split(':'))
        well_plate[placement] = artifact.name.split('_')[0]

    for well in clarity_epp.export.utils.sort_96_well_plate(well_plate.keys()):
        output_file.write('{sample}\n'.format(sample=well_plate[well]))
Exemplo n.º 37
0
def main(lims, args, epp_logger):
    source_udfs = args.source_udf
    dest_udfs = args.dest_udf
    correct_artifacts = 0
    incorrect_artifacts = 0
    no_updated = 0
    p = Process(lims, id=args.pid)
    artifacts, inf = p.analytes()

    if args.status_changelog:
        epp_logger.prepend_old_log(args.status_changelog)

    if not dest_udfs:
        dest_udfs = source_udfs
    elif len(dest_udfs) != len(source_udfs):
        logging.error(
            "source_udfs and dest_udfs lists of arguments are uneven.")
        sys.exit(-1)
    for i in range(len(source_udfs)):
        source_udf = source_udfs[i]
        dest_udf = dest_udfs[i]
        with open(args.status_changelog, 'a') as changelog_f:
            for artifact in artifacts:
                if source_udf in artifact.udf:
                    correct_artifacts = correct_artifacts + 1
                    copy_sesion = CopyField(artifact, artifact.samples[0],
                                            source_udf, dest_udf)
                    test = copy_sesion.copy_udf(changelog_f)
                    if test:
                        no_updated = no_updated + 1
                else:
                    incorrect_artifacts = incorrect_artifacts + 1
                    logging.warning(("Found artifact for sample {0} with {1} "
                                     "undefined/blank, exiting").format(
                                         artifact.samples[0].name, source_udf))

    if incorrect_artifacts == 0:
        warning = "no artifacts"
    else:
        warning = "WARNING: skipped {0} udfs(s)".format(incorrect_artifacts)
    d = {
        'ua': no_updated,
        'ca': correct_artifacts,
        'ia': incorrect_artifacts,
        'warning': warning
    }

    abstract = ("Updated {ua} udf(s), out of {ca} in total, "
                "{warning} with incorrect udf info.").format(**d)

    print(abstract,
          file=sys.stderr)  # stderr will be logged and printed in GUI
def main(lims, args, epp_logger):
    d_elts = []
    no_updated = 0
    incorrect_udfs = 0
    project_names = ''
    source_udfs = args.source_udf
    dest_udfs = args.dest_udf
    s_elt = Process(lims, id=args.pid)
    analytes, inf = s_elt.analytes()

    for analyte in analytes:
        for samp in analyte.samples:
            d_elts.append(samp.project)
    d_elts = list(set(d_elts))

    if args.status_changelog:
        epp_logger.prepend_old_log(args.status_changelog)

    if not dest_udfs:
        dest_udfs = source_udfs
    elif len(dest_udfs) != len(source_udfs):
        logging.error(
            "source_udfs and dest_udfs lists of arguments are uneven.")
        sys.exit(-1)

    for d_elt in d_elts:
        project_names = ' '.join([project_names, d_elt.name])
        for i in range(len(source_udfs)):
            source_udf = source_udfs[i]
            dest_udf = dest_udfs[i]
            with open(args.status_changelog, 'a') as changelog_f:
                if source_udf in s_elt.udf:
                    copy_sesion = CopyField(s_elt, d_elt, source_udf, dest_udf)
                    test = copy_sesion.copy_udf(changelog_f)
                    if test:
                        no_updated = no_updated + 1
                else:
                    logging.warning(
                        ("Udf: {1} in Process {0} is undefined/blank, exiting"
                         ).format(s_elt.id, source_udf))
                    incorrect_udfs = incorrect_udfs + 1

    if incorrect_udfs > 0:
        warn = "Failed to update %s udf(s) due to missing/wrong source udf info." % incorrect_udfs
    else:
        warn = ''

    d = {'up': no_updated, 'ap': len(d_elts), 'w': warn, 'pr': project_names}

    abstract = ("Updated {up} udf(s). Handeled project(s): {pr} {w}").format(
        **d)
    print(abstract, file=sys.stderr)
Exemplo n.º 39
0
    def get(self, lims_step):
        self.set_header("Content-type", "application/json")
        p = Process(self.lims, id=lims_step)
        p.get(force=True)

        links = json.loads(p.udf['Links']) if 'Links' in p.udf else {}

        #Sort by descending date, then hopefully have deviations on top
        sorted_links = OrderedDict()
        for k, v in sorted(links.iteritems(), key=lambda t: t[0], reverse=True):
            sorted_links[k] = v
        sorted_links = OrderedDict(sorted(sorted_links.iteritems(), key=lambda (k,v): v['type']))
        self.write(sorted_links)
Exemplo n.º 40
0
    def get(self, lims_step):
        self.set_header("Content-type", "application/json")
        p = Process(self.lims, id=lims_step)
        p.get(force=True)

        links = json.loads(p.udf['Links']) if 'Links' in p.udf else {}

        #Sort by descending date, then hopefully have deviations on top
        sorted_links = OrderedDict()
        for k, v in sorted(links.items(), key=lambda t: t[0], reverse=True):
            sorted_links[k] = v
        sorted_links = OrderedDict(sorted(sorted_links.items(), key=lambda k : k[1]['type']))
        self.write(sorted_links)
def main(lims, args):

    p=Process(lims, id=args.pid)
    log=[]
    datamap={}
    wsname=None
    username="******".format(p.technician.first_name, p.technician.last_name)
    user_email=p.technician.email
    for art in p.all_inputs():
        if len(art.samples)!=1:
            log.append("Warning : artifact {0} has more than one sample".format(art.id))
        for sample in art.samples:
           #take care of lamda DNA
           if sample.project:
                if sample.project.id not in datamap:
                    datamap[sample.project.id]=[sample.name]
                else:
                    datamap[sample.project.id].append(sample.name)

    for art in p.all_outputs():
        try:
            wsname=art.location[0].name
            break
        except:
            pass

    now=datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
    for pid in datamap:
        pj=Project(lims, id=pid)
        running_notes=json.loads(pj.udf['Running Notes'])
        if len(datamap[pid]) > 1:
            rnt="{0} samples planned for {1}".format(len(datamap[pid]), wsname)
        else:
            rnt="{0} sample planned for {1}".format(len(datamap[pid]), wsname)

        running_notes[now]={"note": rnt, "user" : username, "email":user_email, "category":"Workset"}

        pj.udf['Running Notes']=json.dumps(running_notes)
        pj.put()
        log.append("Updated project {0} : {1}, {2} samples in this workset".format(pid,pj.name, len(datamap[pid])))


 
    with open("EPP_Notes.log", "w") as flog:
        flog.write("\n".join(log))
    for out in p.all_outputs():
        #attach the log file
        if out.name=="RNotes Log":
            attach_file(os.path.join(os.getcwd(), "EPP_Notes.log"), out)

    sys.stderr.write("Updated {0} projects successfully".format(len(datamap.keys())))
Exemplo n.º 42
0
def main(lims, args, epp_logger):
    d_elts = []
    no_updated = 0
    incorrect_udfs = 0
    project_names = ''
    source_udfs = args.source_udf
    dest_udfs = args.dest_udf
    s_elt = Process(lims,id = args.pid)
    analytes, inf = s_elt.analytes()

    for analyte in analytes:
        for samp in analyte.samples:
            d_elts.append(samp.project)
    d_elts = list(set(d_elts))

    if args.status_changelog:
        epp_logger.prepend_old_log(args.status_changelog)

    if not dest_udfs:
        dest_udfs = source_udfs
    elif len(dest_udfs) != len(source_udfs):
        logging.error("source_udfs and dest_udfs lists of arguments are uneven.")
        sys.exit(-1)
    
    for d_elt in d_elts:
        project_names = ' '.join([project_names, d_elt.name])
        for i in range(len(source_udfs)):
            source_udf = source_udfs[i]
            dest_udf = dest_udfs[i]
            with open(args.status_changelog, 'a') as changelog_f:
                if source_udf in s_elt.udf:
                    copy_sesion = CopyField(s_elt, d_elt, source_udf, dest_udf)
                    test = copy_sesion.copy_udf(changelog_f)
                    if test:
                        no_updated = no_updated + 1
                else:
                    logging.warning(("Udf: {1} in Process {0} is undefined/blank, exiting").format(s_elt.id, source_udf))
                    incorrect_udfs = incorrect_udfs + 1

    if incorrect_udfs > 0:
        warn = "Failed to update %s udf(s) due to missing/wrong source udf info." %incorrect_udfs
    else:
        warn = ''

    d = {'up': no_updated,
         'ap': len(d_elts),
         'w' : warn,
         'pr': project_names}

    abstract = ("Updated {up} udf(s). Handeled project(s): {pr} {w}").format(**d)
    print >> sys.stderr, abstract
Exemplo n.º 43
0
def main(lims, args):
    process = Process(lims, id=args.pid)
    # Read in run recipe file
    for outart in process.all_outputs():
        if outart.type == 'ResultFile' and outart.name == 'Run Recipe':
            try:
                fid = outart.files[0].id
                file_name = outart.files[0].original_location
                content = lims.get_file_contents(id=fid).read()
            except:
                raise(RuntimeError("Cannot access the run recipe file."))
            break

    with open("/srv/mfs/NovaSeq_data/gls_recipe_novaseq/{}".format(file_name), 'w') as sf:
        sf.write(content)
Exemplo n.º 44
0
def main(lims, args, epp_logger):
    source_udfs = args.source_udf
    dest_udfs = args.dest_udf
    correct_artifacts = 0
    incorrect_artifacts = 0
    no_updated = 0
    p = Process(lims,id = args.pid)
    artifacts, inf = p.analytes()


    if args.status_changelog:
        epp_logger.prepend_old_log(args.status_changelog)

    if not dest_udfs:
        dest_udfs = source_udfs
    elif len(dest_udfs) != len(source_udfs):
        logging.error("source_udfs and dest_udfs lists of arguments are uneven.")
        sys.exit(-1)
    for i in range(len(source_udfs)):
        source_udf = source_udfs[i]
        dest_udf = dest_udfs[i]
        with open(args.status_changelog, 'a') as changelog_f:
            for artifact in artifacts:
                if source_udf in artifact.udf:
                    correct_artifacts = correct_artifacts +1
                    copy_sesion = CopyField(artifact, artifact.samples[0], source_udf, dest_udf)
                    test = copy_sesion.copy_udf(changelog_f)
                    if test:
                        no_updated = no_updated + 1
                else:
                    incorrect_artifacts = incorrect_artifacts + 1
                    logging.warning(("Found artifact for sample {0} with {1} "
                                   "undefined/blank, exiting").format(artifact.samples[0].name, source_udf))

    if incorrect_artifacts == 0:
        warning = "no artifacts"
    else:
        warning = "WARNING: skipped {0} udfs(s)".format(incorrect_artifacts)
    d = {'ua': no_updated,
         'ca': correct_artifacts,
         'ia': incorrect_artifacts,
         'warning' : warning}

    abstract = ("Updated {ua} udf(s), out of {ca} in total, "
                "{warning} with incorrect udf info.").format(**d)

    print >> sys.stderr, abstract # stderr will be logged and printed in GUI
Exemplo n.º 45
0
def main(args):
    log = []
    lims = Lims(BASEURI,USERNAME,PASSWORD)
    process = Process(lims, id=args.pid)
    for io in process.input_output_maps:
        if io[1]['output-generation-type'] != 'PerInput':
            continue
        try:
            starting_amount = obtain_amount(io[0]['uri'])
        except Exception as e:
            log.append(str(e))
            starting_amount = 0

        log.append("Starting amount of {} : {} ng".format(io[0]['uri'].samples[0].name, starting_amount))
        current_amount = starting_amount
        #preps
        preps = lims.get_processes(inputartifactlimsid=io[0]['uri'].id, type=["Setup Workset/Plate", "Amount confirmation QC"])
        for pro in preps:
            if pro.id == args.pid:
                continue # skip the current step
            for prepio in pro.input_output_maps:
                if prepio[1]['output-generation-type'] == 'PerInput' and prepio[0]['uri'].id == io[0]['uri'].id:
                    if "Amount taken (ng)" in prepio[1]['uri'].udf: #should always be true
                        prep_amount = prepio[1]['uri'].udf["Amount taken (ng)"]
                        log.append("Removing {} ng for prep {} for sample {}".format(prep_amount, pro.id, io[0]['uri'].samples[0].name))
                        current_amount = current_amount - prep_amount
                    else:
                        log.append("No Amount Taken found for prep {} of sample {}".format(pro.id, io[0]['uri'].samples[0].name))

        if current_amount < 0:
            log.append("Estimated amount for sample {} is {}, correcting to zero".format(io[0]['uri'].samples[0].name, current_amount))
            current_amount = 0

        update_output_values(io[0]['uri'], io[1]['uri'], current_amount)

        with open("amount_check_log.txt", "w") as f:
            f.write("\n".join(log))

        for out in process.all_outputs():
            if out.name == "QC Assignment Log File" :
                for f in out.files:
                    lims.request_session.delete(f.uri)
                lims.upload_new_file(out, "amount_check_log.txt") 
Exemplo n.º 46
0
    def get(self):
        data={}
        lims_url=self.request.query
        lims_id="24-{}".format(lims_url.split("/")[-1])
        mylims = lims.Lims(BASEURI, USERNAME, PASSWORD)
        try:
            p=Process(mylims, id=lims_id)
            if p.type.name!='Setup Workset/Plate':
                raise Exception("Wrong process type")
        except:
            self.set_status(400, reason="Wrong process type : use a Setup Workset/Plate")
            self.finish()


        data['comments']={}
        data['samples']={}
        for i in p.all_inputs():
            sample_name=i.samples[0].name
            if not i.samples[0].project:
                continue
            else:
                project=i.samples[0].project
            if 'Project Comment' in project.udf and project.id not in data['comments']:
                data['comments'][project.id]=project.udf['Project Comment']
            data['samples'][sample_name]={}
            data['samples'][sample_name]['amount']=i.udf['Amount (ng)']
            data['samples'][sample_name]['previous_preps']={}
            if 'Library construction method' in project.udf:
                data['samples'][sample_name]['lib_method']=project.udf['Library construction method']
            if 'Sequencing platform' in project.udf:
                data['samples'][sample_name]['seq_pl']=project.udf['Sequencing platform']
            other_preps=mylims.get_processes(inputartifactlimsid=i.id, type="Setup Workset/Plate")
            for op in other_preps:
                if op.id != p.id:
                    for o in op.all_outputs():
                        if o.type=="Analyte" and o.samples[0].name==sample_name:
                            data['samples'][sample_name]['previous_preps'][o.location[0].name]={}
                            data['samples'][sample_name]['previous_preps'][o.location[0].name]['position']=o.location[1]
                            data['samples'][sample_name]['previous_preps'][o.location[0].name]['amount']=o.udf['Amount taken (ng)']


        self.set_header("Content-type", "application/json")
        self.write(json.dumps(data))
Exemplo n.º 47
0
    def post(self, lims_step):
        user = self.get_secure_cookie('user')
        email = self.get_secure_cookie('email')
        a_type = self.get_argument('type', '')
        title = self.get_argument('title', '')
        url = self.get_argument('url', '')
        desc = self.get_argument('desc','')

        if not a_type or not title:
            self.set_status(400)
            self.finish('<html><body>Link title and type is required</body></html>')
        else:
            p = Process(self.lims, id=lims_step)
            p.get(force=True)
            links = json.loads(p.udf['Links']) if 'Links' in p.udf else {}
            links[str(datetime.datetime.now())] = {'user': user,
                                                   'email': email,
                                                   'type': a_type,
                                                   'title': title,
                                                   'url': url,
                                                   'desc': desc}
            p.udf['Links'] = json.dumps(links)
            p.put()
            self.set_status(200)
            #ajax cries if it does not get anything back
            self.set_header("Content-type", "application/json")
            self.finish(json.dumps(links))
Exemplo n.º 48
0
 def delete(self, workset):
     note_id=self.get_argument('note_id')
     p = Process(self.lims, id=workset)
     p.get(force=True)
     workset_notes = json.loads(p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {}
     try:
         self.set_header("Content-type", "application/json")
         del workset_notes[note_id]
         p.udf['Workset Notes'] = json.dumps(workset_notes)
         p.put()
         self.set_status(201)
         self.write(json.dumps(workset_notes))
     except:
         self.set_status(400)
         self.finish('<html><body>No note found</body></html>')
Exemplo n.º 49
0
 def post(self, workset):
     note = self.get_argument('note', '')
     user = self.get_secure_cookie('user')
     email = self.get_secure_cookie('email')
     if not note:
         self.set_status(400)
         self.finish('<html><body>No workset id or note parameters found</body></html>')
     else:
         newNote = {'user': user, 'email': email, 'note': note}
         p = Process(self.lims, id=workset)
         p.get(force=True)
         workset_notes = json.loads(p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {}
         workset_notes[str(datetime.datetime.now())] = newNote
         p.udf['Workset Notes'] = json.dumps(workset_notes)
         p.put()
         self.set_status(201)
         self.write(json.dumps(newNote))
def main(lims, args):
    log=[]
    thisyear=datetime.now().year
    content = None
    if args.mytest:
        test()
    else:
        process = Process(lims, id=args.pid)
        if process.type.name == 'Cluster Generation (HiSeq X) 1.0':
            header = gen_X_header(process)
            reads = gen_X_reads_info(process)
            (data, obj) = gen_X_lane_data(process)
            check_index_distance(obj, log)
            content = "{}{}{}".format(header, reads, data)
            if os.path.exists("/srv/mfs/samplesheets/HiSeqX/{}".format(thisyear)):
                try:
                    with open("/srv/mfs/samplesheets/HiSeqX/{}/{}.csv".format(thisyear, obj[0]['fc']), 'w') as sf:
                        sf.write(content)
                    os.chmod("/srv/mfs/samplesheets/HiSeqX/{}/{}.csv".format(thisyear, obj[0]['fc']), 0664)
                except Exception as e:
                    log.append(e)

        elif process.type.name == 'Cluster Generation (Illumina SBS) 4.0':
            (content, obj) = gen_Hiseq_lane_data(process)
            check_index_distance(obj, log)
            if os.path.exists("/srv/mfs/samplesheets/{}".format(thisyear)):
                try:
                    with open("/srv/mfs/samplesheets/{}/{}.csv".format(thisyear, obj[0]['fc']), 'w') as sf:
                        sf.write(content)
                    os.chmod("/srv/mfs/samplesheets/{}/{}.csv".format(thisyear, obj[0]['fc']), 0664)
                except Exception as e:
                    log.append(e)
        elif process.type.name == 'Denature, Dilute and Load Sample (MiSeq) 4.0':
            header = gen_Miseq_header(process)
            reads = gen_Miseq_reads(process)
            settings = gen_Miseq_settings(process)
            (data, obj) = gen_Miseq_data(process)
            check_index_distance(obj, log)
            content = "{}{}{}{}".format(header, reads, settings, data)

        if not args.test:
            for out in process.all_outputs():
                if out.name == "Scilifelab SampleSheet" :
                    ss_art = out
                elif out.name == "Scilifelab Log" :
                    log_id= out.id
                elif out.type == "Analyte":
                    fc_name = out.location[0].name

            with open("{}.csv".format(fc_name), "w", 0o664) as f:
                f.write(content)
            os.chmod("{}.csv".format(fc_name),0664)
            for f in ss_art.files:
                lims.request_session.delete(f.uri)
            lims.upload_new_file(ss_art, "{}.csv".format(fc_name)) 
            if log:
                with open("{}_{}_Error.log".format(log_id, fc_name), "w") as f:
                    f.write('\n'.join(log))

                sys.stderr.write("Errors were met, check the log.")
                sys.exit(1)

        else:
            print content
            print log
Exemplo n.º 51
0
def main(lims, args):
    conc_is_local = True
    process = Process(lims, id=args.pid)
    log_art = None
    log = []
    fid = None
    # first, read the fragment analyzer results
    for o in process.all_outputs():
        if o.name == 'CSV Result File':
            try:
                fid = o.files[0].id
            except:
                sys.exit("Please upload a CSV result file.")
        if o.name == 'Calculation Log':
            log_art = o

    file_contents = lims.get_file_contents(id=fid)
    frag_data = {}
    keys = []
    for line in file_contents.splitlines():
        if not keys:
            keys = line.split(',')
        else:
            values = line.split(',')
            frag_data[values[0]] = {}
            for i in xrange(1, len(values)):
                frag_data[values[0]][keys[i]] = values[i]
    # Then, read the concentration from the step defined in the process udf
    try:
        conc_process_name = process.udf['Concentration Source']
        conc_is_local = False
    except KeyError:
        conc_is_local = True

    for io in process.input_output_maps:
        if 'Fragment Analyzer' in io[1]['uri'].name and io[1]['output-generation-type']== 'PerInput':
            base_concentration = None
            base_conc_unit = None
            well = io[1]['uri'].location[1].replace(":", "")
            if conc_is_local:
                base_concentration = float(frag_data[well]['ng/uL'])
                base_conc_unit = 'ng/uL'
            else:
                try:
                    concentration_step = lims.get_processes(type=conc_process_name, inputartifactlimsid=io[0]['limsid'])[0]
                except IndexError:
                    log.append("Cannot find a {} step starting with {}".format(conc_process_name, io[0]['limsid']))
                else:
                    for io2 in concentration_step.input_output_maps:
                        if io2[0]['limsid'] == io[0]['limsid'] and "Concentration" in io2[1]['uri'].udf:
                            base_concentration = io2[1]['uri'].udf['Concentration']
                            base_conc_unit = io2[1]['uri'].udf['Conc. Units']
            try:
                io[1]['uri'].udf['Min Size (bp)'] = int(frag_data[well]['Range'].split('to')[0].split('bp')[0].strip())
                io[1]['uri'].udf['Max Size (bp)'] = int(frag_data[well]['Range'].split('to')[1].split('bp')[0].strip())
                if 'Ratio (%)' not in io[1]['uri'].udf:
                    io[1]['uri'].udf['Ratio (%)'] = float(frag_data[well]['% Total'])
                io[1]['uri'].udf['Size (bp)'] = int(frag_data[well]['Avg. Size'])
                io[1]['uri'].put()

                if base_concentration and base_conc_unit:
                    if conc_is_local:
                        io[1]['uri'].udf['Concentration'] = base_concentration
                    else:
                        io[1]['uri'].udf['Concentration'] = base_concentration * (float(io[1]['uri'].udf['Ratio (%)']) / 100.0)
                    io[1]['uri'].udf['Conc. Units'] = base_conc_unit
                    io[1]['uri'].put()
                    log.append("Updated values for output {}".format(io[1]['uri'].name))
                else:
                    log.append("Failed to update the concentration of output {}".format(io[1]['uri'].name))

            except Exception as e:
                log.append("Error updating {} with fragment analyzer data : {}".format(io[1]['uri'].name, e))

        if log:
            with open("{}_frag_analyzer.log".format(log_art.id), "w") as logContext:
                logContext.write("\n".join(log))