コード例 #1
0
def main(lims, args):
    process = Process(lims, id=args.pid)
    AT = CalculationsTwist(process)
    AT.get_artifacts(args.calculate)
    if args.calculate == 'pooling':
        AT.calculate_volumes_for_pooling()
    elif args.calculate == 'libval':
        AT.calcualate_amount_for_libval()
    elif args.calculate == 'aliquot':
        AT.calculate_volumes_for_aliquot()
    else:
        sys.exit(
            'Non valid argument given. -c can take pooling/libval/aliquot')

    abstract = ''
    if AT.failed:
        missing = ', '.join(list(set(AT.missing_udfs)))
        abstract += 'Failed to perform calculations for ' + str(
            AT.failed
        ) + ' samples. Some of the following udfs are invalid or missing: ' + missing + '. '
    if AT.okej:
        abstract += 'Performed calculations for ' + str(AT.okej) + ' samples.'

    if AT.failed:
        sys.exit(abstract)
    else:
        print >> sys.stderr, abstract
コード例 #2
0
    def post(self, lims_step):
        user = self.get_current_user()
        a_type = self.get_argument('type', '')
        title = self.get_argument('title', '')
        url = self.get_argument('url', '')
        desc = self.get_argument('desc','')

        if not a_type or not title:
            self.set_status(400)
            self.finish('<html><body>Link title and type is required</body></html>')
        else:
            p = Process(self.lims, id=lims_step)
            p.get(force=True)
            links = json.loads(p.udf['Links']) if 'Links' in p.udf else {}
            links[str(datetime.datetime.now())] = {'user': user.name,
                                                   'email': user.email,
                                                   'type': a_type,
                                                   'title': title,
                                                   'url': url,
                                                   'desc': desc}
            p.udf['Links'] = json.dumps(links)
            p.put()
            self.set_status(200)
            #ajax cries if it does not get anything back
            self.set_header("Content-type", "application/json")
            self.finish(json.dumps(links))
コード例 #3
0
def main(process_lims_id, demux_id, log_id):
    #Sets up logger
    basic_name = "{}_logfile.txt".format(log_id)
    logger.setLevel(logging.DEBUG)
    fh = logging.FileHandler(basic_name)
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    logger.addHandler(fh)

    logger.info("--process_lims_id {} --demux_id {} --log_id {}".format(
        process_lims_id, demux_id, log_id))

    demux_process = Process(lims, id=process_lims_id)
    #Fetches info on "workflow" level
    proc_stats = manipulate_workflow(demux_process)
    #Sets up the process values
    manipulate_process(demux_process, proc_stats)
    #Create the demux output file
    parser_struct = write_demuxfile(proc_stats, demux_id)
    #Alters artifacts
    set_sample_values(demux_process, parser_struct, proc_stats)

    #Changing log file name, can't do this step earlier since proc_stats is made during runtime.
    new_name = "{}_logfile_{}.txt".format(log_id, proc_stats["Flow Cell ID"])
    move(basic_name, new_name)
コード例 #4
0
def main(lims, args, epp_logger):
    p = Process(lims, id=args.pid)
    udf_check = 'Conc. Units'
    value_check = 'ng/ul'
    concentration_udf = 'Concentration'
    size_udf = 'Size (bp)'

    if args.aggregate:
        artifacts = p.all_inputs(unique=True)
    else:
        all_artifacts = p.all_outputs(unique=True)
        artifacts = filter(lambda a: a.output_type == "ResultFile",
                           all_artifacts)

    correct_artifacts, no_concentration = check_udf_is_defined(
        artifacts, concentration_udf)
    correct_artifacts, no_size = check_udf_is_defined(correct_artifacts,
                                                      size_udf)
    correct_artifacts, wrong_value = check_udf_has_value(
        correct_artifacts, udf_check, value_check)

    apply_calculations(lims, correct_artifacts, concentration_udf, size_udf,
                       udf_check, epp_logger)

    d = {
        'ca': len(correct_artifacts),
        'ia': len(wrong_value) + len(no_size) + len(no_concentration)
    }

    abstract = ("Updated {ca} artifact(s), skipped {ia} artifact(s) with "
                "wrong and/or blank values for some udfs.").format(**d)

    print >> sys.stderr, abstract  # stderr will be logged and printed in GUI
コード例 #5
0
ファイル: artifact.py プロジェクト: UMCUGenetics/clarity_epp
def set_sequence_name(lims, process_id):
    """Change artifact name to sequnece name."""
    process = Process(lims, id=process_id)
    for artifact in process.analytes()[0]:
        sample = artifact.samples[0]
        artifact.name = get_sequence_name(sample)
        artifact.put()
コード例 #6
0
def main(lims, pid, epp_logger):
    process = Process(lims, id=pid)
    QiT = QuantitQC(process)
    QiT.assign_QC_flag()
    if QiT.flour_int_missing:
        QiT.abstract.append("Fluorescence intensity is missing for {0} "
                            "samples.".format(QiT.flour_int_missing))
    if QiT.missing_udfs:
        QiT.abstract.append("Could not set QC flags. Some of the following "
                            "required udfs seems to be missing: {0}.".format(
                                QiT.missing_udfs))
    else:
        QiT.abstract.append("{0} out of {1} samples failed "
                            "QC.".format(QiT.no_failed,
                                         len(process.result_files())))
    if QiT.saturated:
        QiT.abstract.append("{0} samples had saturated fluorescence "
                            "intensity.".format(QiT.saturated))
    if QiT.hig_CV_fract:
        QiT.abstract.append("{0} samples had high %CV.".format(
            QiT.hig_CV_fract))
    if QiT.low_conc:
        QiT.abstract.append("{0} samples had low concentration.".format(
            QiT.low_conc))
    if QiT.conc_missing:
        QiT.abstract.append("Concentration is missing for {0} "
                            "sample(s).".format(QiT.conc_missing))
    QiT.abstract = list(set(QiT.abstract))
    print >> sys.stderr, ' '.join(QiT.abstract)
コード例 #7
0
def main(args, lims, epp_logger):
    p = Process(lims, id=args.pid)
    lines = []
    cs = []
    if args.container_id:
        cs = p.output_containers()
        for c in cs:
            logging.info('Constructing barcode for container {0}.'.format(
                c.id))
            lines += makeContainerBarcode(c.id, copies=1)
    if args.container_name:
        cs = p.output_containers()
        for c in cs:
            logging.info('Constructing name label for container {0}.'.format(
                c.id))
            lines += makeContainerNameBarcode(c.name, copies=1)
    if args.operator_and_date:
        op = p.technician.name
        date = str(datetime.date.today())
        if cs:  # list of containers
            copies = len(cs)
        else:
            copies = args.copies
        lines += makeOperatorAndDateBarcode(op, date, copies=copies)
    if args.process_name:
        pn = p.type.name
        if cs:  # list of containers
            copies = len(cs)
        else:
            copies = args.copies
        lines += makeProcessNameBarcode(pn, copies=copies)
    if not (args.container_id or args.container_name or args.operator_and_date
            or args.process_name):
        logging.info('No recognized label type given, exiting.')
        sys.exit(-1)
    if not args.use_printer:
        logging.info('Writing to stdout.')
        epp_logger.saved_stdout.write('\n'.join(lines) + '\n')
    elif lines:  # Avoid printing empty files
        lp_args = ["lp"]
        if args.hostname:
            #remove that when all the calls to this script have been updated
            if args.hostname == 'homer.scilifelab.se:631':
                args.hostname = 'homer2.scilifelab.se:631'
            lp_args += ["-h", args.hostname]
        if args.destination:
            lp_args += ["-d", args.destination]
        lp_args.append("-")  # lp accepts stdin if '-' is given as filename
        logging.info('Ready to call lp for printing.')
        sp = subprocess.Popen(lp_args,
                              stdin=subprocess.PIPE,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.PIPE)
        sp.stdin.write(str('\n'.join(lines)))
        logging.info('lp command is called for printing.')
        stdout, stderr = sp.communicate()  # Will wait for sp to finish
        logging.info('lp stdout: {0}'.format(stdout))
        logging.info('lp stderr: {0}'.format(stderr))
        logging.info('lp command finished')
        sp.stdin.close()
コード例 #8
0
def main(lims, pid, epp_logger, App_QC_file):
    process = Process(lims, id=pid)
    AQC = AppQC(process)
    AQC.get_app_QC_file()
    AQC.set_result_file_udfs()
    AQC.make_App_QC_file(App_QC_file)
    AQC.logging()
コード例 #9
0
def main(lims, args):
    currentStep = Process(lims, id=args.pid)
    driver_file_out = None
    driver = []
    ar_driver = {}
    valid_cols = set()
    for output in currentStep.all_outputs():
        if output.name == "Driver File":
            driver_file_out = output
        elif output.output_type == "ResultFile":
            location_ar = output.location[1].split(":")
            valid_cols.add(location_ar[0])
            #idx = (ord(location_ar[0])-65)*12 + int(location_ar[1])-1
            ar_driver[output.location[1].replace(":",
                                                 "")] = output.samples[0].name

    col_idx = -1
    for column in sorted(list(valid_cols)):
        col_idx += 1
        for i in xrange(1, 13):
            location = "{}{}".format(column, i)
            driver.append((col_idx * 12 + i, location,
                           ar_driver.get(location,
                                         "ladder" if i == 12 else "")))

    with open("frag_an_driver.csv", "w") as f:
        for line in driver:
            f.write("{0},{1},{2}\n".format(line[0], line[1], line[2]))

    lims.upload_new_file(driver_file_out, "frag_an_driver.csv")
コード例 #10
0
ファイル: email.py プロジェクト: UMCUGenetics/clarity_epp
def sequencing_run(lims, email_settings, process_id):
    process = Process(lims, id=process_id)
    artifact = process.all_inputs()[0]

    subject = "LIMS QC Controle - {0}".format(artifact.name)

    message = "Sequencing Run: {0}\n".format(artifact.name)
    message += "Technician: {0}\n".format(process.technician.name)
    message += "LIMS Next Action: {0}\n\n".format(
        process.step.actions.next_actions[0]['action'])

    message += "UDF - Conversie rapport OK?: {0}\n".format(
        process.udf['Conversie rapport OK?'])
    if 'Fouten registratie (uitleg)' in process.udf:
        message += "UDF - Fouten registratie (uitleg): {0}\n".format(
            process.udf['Fouten registratie (uitleg)'])
    if 'Fouten registratie (oorzaak)' in process.udf:
        message += "UDF - Fouten registratie (oorzaak): {0}\n".format(
            process.udf['Fouten registratie (uitleg)'])

    if process.step.actions.escalation:
        message += "\nManager Review LIMS:\n"
        message += "{0}: {1}\n".format(
            process.step.actions.escalation['author'].name,
            process.step.actions.escalation['request'])
        message += "{0}: {1}\n".format(
            process.step.actions.escalation['reviewer'].name,
            process.step.actions.escalation['answer'])

    send_email(email_settings['server'], email_settings['from'],
               email_settings['to_sequencing_run_complete'], subject, message)
コード例 #11
0
def main(args):
    log = []
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    process = Process(lims, id=args.pid)
    for swp_iomap in process.input_output_maps:
        if swp_iomap[1]['output-generation-type'] != 'PerInput':
            continue
        inp_artifact = swp_iomap[0]['uri']
        amount_check_pros = lims.get_processes(
            type='Amount confirmation QC', inputartifactlimsid=inp_artifact.id)
        amount_check_pros.sort(reverse=True, key=lambda x: x.date_run)
        try:
            correct_amount_check_pro = amount_check_pros[0]
        except KeyError:
            sys.exit(
                "Cannot find an Amount Confirmation QC step for artifact {}".
                format(inp_artifact.id))
        else:
            for iomap in correct_amount_check_pro.input_output_maps:
                if iomap[1]['output-generation-type'] != 'PerInput':
                    continue
                if iomap[0]['limsid'] == inp_artifact.id:
                    for udf_name in [
                            'Concentration', 'Conc. Units', 'Total Volume (uL)'
                    ]:
                        try:
                            swp_iomap[1]['uri'].udf[udf_name] = iomap[1][
                                'uri'].udf[udf_name]
                        except:
                            import pdb
                            pdb.set_trace()
                    swp_iomap[1]['uri'].udf['Amount taken (ng)'] = iomap[1][
                        'uri'].udf['Amount to take (ng)']
                    swp_iomap[1]['uri'].put()
コード例 #12
0
def main(lims, args, logger):
    p = Process(lims, id=args.pid)

    operator, threshold = parse_qc_condition(args.qcPassCondition)
    if args.qcPassCondition2:
        operator2, threshold2 = parse_qc_condition(args.qcPassCondition2)

    for i, output in enumerate(get_outputs(p)):
        concentration = choose_concentration(output, args)
        output.udf[args.concUdfChosen] = concentration

        qc_pass = check_qc_pass(concentration, operator, threshold)
        qc_flag = 'PASSED' if qc_pass else 'FAILED'

        if args.qcPassCondition2:
            qc_pass2 = check_qc_pass(concentration, operator, threshold)
            # set qc flag based on both conditions
            qc_flag = 'PASSED' if (qc_pass and qc_pass2) else 'FAILED'

        output.qc_flag = qc_flag

#    # create fluent file
#    fluent_file = get_file_artifact(p, args.fluentNormalizationFilename)
#    if fluent_file:
#
#    # create overview file
#    overview_file = get_file_artifact(p, args.overviewFilename)
#    if overview_file:

    for i, output in enumerate(get_outputs(p)):
        output.put()
コード例 #13
0
def main(lims, args):
    process = Process(lims, id = args.pid)
    CNS = CheckNovaSettings(process)
    CNS.get_artifacts()
    CNS.check_protocol_setings()
    CNS.set_volumes_for_standard()
    CNS.set_udfs()
コード例 #14
0
 def __init__(self, lims, pid):
     self.lims = lims
     self.process = Process(lims, id = pid)
     self.input_output_maps = self.process.input_output_maps
     self.samples = []
     self.failed_samples = 0
     self.cgface_obj = CgFace(url=CG_URL)
コード例 #15
0
def main(lims, args):
    log = open(args.log, 'a')
    process = Process(lims, id=args.pid)
    QD = QpcrDilution(process, log)
    QD.get_artifacts()
    QD.make_dilution_data(args.dil_file)
    QD.set_all_samples()

    d = {'ca': QD.passed_arts, 'ia': QD.failed_arts}
    abstract = ("Updated {ca} artifact(s), skipped {ia} artifact(s) with "
                "wrong and/or blank values for some udfs.").format(**d)

    if QD.removed_replicates:
        abstract += ' WARNING: Removed replicate from ' + str(
            QD.removed_replicates) + ' samples. See log file for details.'

    if QD.failed_samples:
        abstract += ' WARNING: Failed to set udfs on ' + str(
            QD.failed_samples
        ) + ' samples, due to unstable dilution messurements'

    log.close()
    if QD.failed_arts or QD.failed_samples:
        sys.exit(abstract)
    else:
        print >> sys.stderr, abstract
コード例 #16
0
def samplesheet_capture(lims, process_id, output_file):
    """Create manual pipetting samplesheet for capture protocol."""
    process = Process(lims, id=process_id)
    sample_count = len(process.analytes()[0])

    # All input paramters
    data = [
        ['Ampligase Buffer 10X', process.udf['Ampligase Buffer 10X']],
        ['MIP pool werkoplossing', process.udf['MIP pool werkoplossing']],
        ['*dNTP 0.25mM', process.udf['*dNTP 0.25mM']],
        ['Hemo Klentaq 10U/ul', process.udf['Hemo Klentaq 10U/ul']],
        ['Ampligase 100U/ul', process.udf['Ampligase 100U/ul']],
        ['Water', process.udf['Water']],
    ]

    # Caculate for sample count
    for i, item in enumerate(data):
        data[i].append(sample_count * item[1] * 1.1)

    # Calculate final volume
    data.append([
        'ul MM in elke well',
        sum([item[1] for item in data]),
        sum([item[2] for item in data]),
    ])

    # Write samplesheet
    output_file.write('Mastermix\t1\t{0}\n'.format(sample_count))
    for item in data:
        output_file.write('{0}\t{1:.2f}\t{2:.2f}\n'.format(
            item[0], item[1], item[2]))
コード例 #17
0
def main(lims, args, epp_logger):
    pro = Process(lims, id=args.pid)
    source_udf = 'Reference genome'
    destination_udf = 'Reference Genome'

    artifacts = pro.all_inputs(unique=True)
    projects = all_projects_for_artifacts(artifacts)

    correct_projects, incorrect_udf = check_udf_is_defined(
        projects, source_udf)
    correct_samples = filter_samples(artifacts, correct_projects)

    session = Session(pro, source_udf, destination_udf)
    session.copy_main(correct_samples)

    if len(incorrect_udf) == 0:
        warning = "no projects"
    else:
        warning = "WARNING: skipped {0} project(s)".format(len(incorrect_udf))

    d = {'cs': len(correct_samples), 'warning': warning}

    abstract = (
        "Updated {cs} sample(s), {warning} with incorrect udf info.").format(
            **d)

    print >> sys.stderr, abstract  # stderr will be logged and printed in GUI
コード例 #18
0
def sammplesheet_exonuclease(lims, process_id, output_file):
    """Create manual pipetting samplesheet for Exonuclease protocol"""
    process = Process(lims, id=process_id)
    sample_count = len(process.analytes()[0])

    # All input paramters
    data = [
        ['EXO I', process.udf['EXO I']],
        ['EXO III', process.udf['EXO III']],
        ['Ampligase buffer 10X', process.udf['Ampligase buffer 10X']],
        ['H2O', process.udf['H2O']],
    ]

    # Caculate for sample count
    for i, item in enumerate(data):
        data[i].append(sample_count * item[1] * 1.25)

    # Calculate total
    data.append([
        'TOTAL (incl. 25% overmaat)',
        sum([item[1] for item in data]),
        sum([item[2] for item in data]),
    ])

    # Write samplesheet
    output_file.write('\tMaster Mix (ul)\t{0}\n'.format(sample_count))
    for item in data:
        output_file.write('{0}\t{1:.2f}\t{2:.2f}\n'.format(
            item[0], item[1], item[2]))
コード例 #19
0
def main(lims, args):
    process = Process(lims, id=args.pid)
    ASBP = AverageSizeBP(process)
    ASBP.get_artifacts()
    ASBP.make_average_size()
    ASBP.set_average_size()
    print >> sys.stderr, "'Average Size (bp)' has ben set."
コード例 #20
0
def samplesheet_dilute_library_pool(lims, process_id, output_file):
    """Create manual pipetting samplesheet for sequencing pools."""
    output_file.write('Sample\tContainer\tWell\tul Sample\tul EB\n')
    process = Process(lims, id=process_id)

    output = []  # save pool data to list, to be able to sort on pool number.
    nM_pool = process.udf['Dx Pool verdunning (nM)']
    output_ul = process.udf['Eindvolume (ul)']

    for input in process.all_inputs():
        search_number = re.search(r'Pool #(\d+)_', input.name)
        if search_number:
            input_number = int(search_number.group(1))
        else:
            input_number = 0
        qc_artifact = input.input_artifact_list()[0]

        size = float(qc_artifact.udf['Dx Fragmentlengte (bp)'])
        concentration = float(
            qc_artifact.udf['Dx Concentratie fluorescentie (ng/ul)'])

        nM_dna = (concentration * 1000 * (1 / 660.0) * (1 / size)) * 1000
        ul_sample = (nM_pool / nM_dna) * output_ul
        ul_EB = output_ul - ul_sample

        line = '{pool_name}\t{container}\t{well}\t{ul_sample:.2f}\t{ul_EB:.2f}\n'.format(
            pool_name=input.name,
            container=input.location[0].name,
            well=input.location[1],
            ul_sample=ul_sample,
            ul_EB=ul_EB)
        output.append((input_number, line))

    for number, line in sorted(output):
        output_file.write(line)
コード例 #21
0
 def __init__(self, original_source, lims, udf_list):
     self.process = Process(lims, id=args.pid)
     self.lims = lims
     self.input_output_maps = self.process.input_output_maps
     self.orig = original_source
     self.placement_map = {}
     self.udf_list = udf_list
コード例 #22
0
def sammplesheet_pcr_exonuclease(lims, process_id, output_file):
    """Create manual pipetting samplesheet for PCR after Exonuclease protocol"""
    process = Process(lims, id=process_id)
    sample_count = len(process.analytes()[0])

    # All input paramters
    data = [
        ['2X iProof', process.udf['2X iProof']],
        [
            'Illumina forward primer(100uM) MIP_OLD_BB_FOR',
            process.udf['Illumina forward primer(100uM) MIP_OLD_BB_FOR']
        ],
        ['H2O', process.udf['H2O']],
    ]

    # Caculate for sample count
    for i, item in enumerate(data):
        data[i].append(sample_count * item[1] * 1.1)

    # Calculate total
    data.append([
        'TOTAL (incl. 10% overmaat)',
        sum([item[1] for item in data]) * 1.1,
        sum([item[2] for item in data]),
    ])

    # Write samplesheet
    output_file.write('\tMaster Mix (ul)\t{0}\n'.format(sample_count))
    for item in data:
        output_file.write('{0}\t{1:.2f}\t{2:.2f}\n'.format(
            item[0], item[1], item[2]))
コード例 #23
0
ファイル: artifact.py プロジェクト: UMCUGenetics/clarity_epp
def route_to_workflow(lims, process_id, workflow):
    """Route artifacts to a workflow."""
    process = Process(lims, id=process_id)

    # Get all artifacts with workflow status == completed.
    artifacts_completed = [
        action_artifact['artifact']
        for action_artifact in process.step.actions.get_next_actions()
        if action_artifact['action'] == 'complete'
    ]

    if workflow == 'post_bioinf':
        # Remove research artifacts
        route_artifacts = [
            artifact for artifact in artifacts_completed
            if artifact.samples[0].udf['Dx Stoftest code'] !=
            config.research_stoftest_code
        ]
        lims.route_artifacts(route_artifacts,
                             workflow_uri=Workflow(
                                 lims, id=config.post_bioinf_workflow).uri)

    elif workflow == 'sequencing':
        lims.route_artifacts(artifacts_completed,
                             workflow_uri=Workflow(
                                 lims, id=config.sequencing_workflow).uri)
コード例 #24
0
def samplesheet_pool_samples(lims, process_id, output_file):
    """Create manual pipetting samplesheet for pooling samples."""
    process = Process(lims, id=process_id)

    # print header
    output_file.write('Sample\tContainer\tWell\tPool\n')

    # Get all input artifact and store per container
    input_containers = {}
    for input_artifact in process.all_inputs(resolve=True):
        container = input_artifact.location[0].name
        well = ''.join(input_artifact.location[1].split(':'))

        if container not in input_containers:
            input_containers[container] = {}

        input_containers[container][well] = input_artifact

    # print pool scheme per input artifact
    # sort on container and well
    for input_container in sorted(input_containers.keys()):
        input_artifacts = input_containers[input_container]
        for well in clarity_epp.export.utils.sort_96_well_plate(
                input_artifacts.keys()):
            output_file.write('{sample}\t{container}\t{well}\t{pool}\n'.format(
                sample=input_artifacts[well].name,
                container=input_artifacts[well].location[0].name,
                well=well,
                pool=process.outputs_per_input(input_artifacts[well].id,
                                               Analyte=True)[0].name))
コード例 #25
0
def main(lims, pid, epp_logger):
    process = Process(lims, id=pid)
    target_files = dict((r.samples[0].name, r) for r in process.result_files())
    file_handler = ReadResultFiles(process)
    QiT = QuantitConc(process, file_handler)
    QiT.fit_model()
    QiT.prepare_result_files_dict()
    if QiT.model and 'Linearity of standards' in QiT.udfs.keys():
        R2 = QiT.model[0]
        if R2 >= QiT.udfs['Linearity of standards']:
            QiT.abstract.insert(0, "R2 = {0}. Standards OK.".format(R2))
            if QiT.result_files:
                for sample, target_file in target_files.items():
                    rel_fluor_int = QiT.get_and_set_fluor_int(target_file)
                    QiT.calc_and_set_conc(target_file, rel_fluor_int)
                QiT.abstract.append("Concentrations uploaded for {0} "
                                    "samples.".format(QiT.no_samps))
            else:
                QiT.abstract.append("Upload input file(s) for samples.")
        else:
            QiT.abstract.insert(
                0, "R2 = {0}. Problem with standards! Redo "
                "measurement!".format(R2))
    else:
        QiT.missing_udfs.append('Linearity of standards')
    if QiT.missing_samps:
        QiT.abstract.append("The following samples are missing in Quant-iT "
                            "result File 1 or 2: {0}.".format(', '.join(
                                QiT.missing_samps)))
    if QiT.missing_udfs:
        QiT.abstract.append("Are all of the following udfs set? : {0}".format(
            ', '.join(QiT.missing_udfs)))
    print >> sys.stderr, ' '.join(QiT.abstract)
コード例 #26
0
def results(lims, process_id):
    """Upload bioanalyzer results to artifacts."""
    process = Process(lims, id=process_id)
    sample_measurements = {}

    # Parse File
    for output in process.all_outputs(unique=True):
        if output.name == 'Bioanalyzer Output':
            bioanalyzer_result_file = output.files[0]

            for line in lims.get_file_contents(
                    bioanalyzer_result_file.id).split('\n'):
                if line.startswith('Sample Name'):
                    sample = line.rstrip().split(',')[1]
                elif line.startswith('Region 1'):
                    line = re.sub(
                        r'"([0-9]+),([0-9\.]+)"', r'\1\2', line
                    )  # Fix remove thousands seperator (,) and quotes ("")
                    size = line.rstrip().split(',')[5]
                    sample_measurements[sample] = int(size)

    # Set UDF
    for artifact in process.all_outputs():
        if artifact.name in sample_measurements:
            artifact.udf['Dx Fragmentlengte (bp)'] = sample_measurements[
                artifact.name]
            artifact.put()
コード例 #27
0
def copy_layout(lims, process_id):
    """Copy placement layout from previous steps."""
    process = Process(lims, id=process_id)
    used_placements = []
    # Get parent container layout
    parent_container = None
    for parent_process in process.parent_processes():
        if parent_process:
            for container in parent_process.output_containers():
                if container.type != Containertype(lims, id='2'):  # skip tubes
                    parent_container = container

    if parent_container:
        parent_placements = {}
        for placement in parent_container.placements:
            sample = parent_container.placements[placement].samples[0].name
            parent_placements[sample] = placement

        # Create new container and copy layout
        new_container = Container.create(lims, type=parent_container.type)
        placement_list = []
        for artifact in process.analytes()[0]:
            sample_name = artifact.samples[0].name
            if sample_name in parent_placements:
                placement = parent_placements[sample_name]
                if placement not in used_placements:
                    placement_list.append([artifact, (new_container, placement)])
                    used_placements.append(placement)

        process.step.placements.set_placement_list(placement_list)
        process.step.placements.post()
コード例 #28
0
ファイル: epp_script.py プロジェクト: chuan-wang/genologics
def main(lims, pid, file):
    """Uploads a given file to the first output artifact of the process

    lims: The LIMS instance
    pid: Process Lims id
    file: File to be attached
    """
    p = Process(lims, id=pid)

    # Fetch all input-output artifact pairs
    io = p.input_output_maps

    # Filter them so that only PerInput output artifacts remains
    io_filtered = [
        x for x in io if x[1]['output-generation-type'] == 'PerInput'
    ]

    # Fetch the first input-output artifact pair
    (input, output) = io_filtered[0]

    # Instantiate the output artifact
    output_artifact = Artifact(output['limsid'])

    # Attach the file
    attach_file(args.file, output_artifact)
コード例 #29
0
def results(lims, process_id):
    """Upload tapestation results to artifacts."""
    process = Process(lims, id=process_id)
    sample_size_measurements = {}
    sample_concentration_measurements = {}

    # Parse File
    for output in process.all_outputs(unique=True):
        if output.name == 'TapeStation Output':
            tapestation_result_file = output.files[0]
            for line in lims.get_file_contents(
                    tapestation_result_file.id).split('\n'):
                if line.startswith('FileName'):
                    header = line.split(',')
                    if 'Size [bp]' in header:  # Tapestation compact peak table
                        size_index = header.index('Size [bp]')
                        concentration_index = None
                    else:  # Tapestation compact region table
                        size_index = header.index('Average Size [bp]')
                        try:
                            concentration_index = header.index(
                                u'Conc. [pg/\xb5l]')  # micro sign
                            concentration_correction = 1000  # Used to transform pg/ul to ng/ul
                        except ValueError:
                            concentration_index = header.index(
                                u'Conc. [ng/\xb5l]')  # micro sign
                            concentration_correction = 1
                    sample_index = header.index('Sample Description')

                elif line:
                    data = line.split(',')
                    sample = data[sample_index]
                    if sample != 'Ladder':
                        if data[size_index]:
                            size = int(data[size_index])
                            sample_size_measurements[sample] = size
                        if concentration_index and data[concentration_index]:
                            # Correct concentration
                            concentration = float(data[concentration_index]
                                                  ) / concentration_correction
                            sample_concentration_measurements[
                                sample] = concentration

    # Set UDF
    for artifact in process.all_outputs():
        if artifact.name not in [
                'TapeStation Output', 'TapeStation Samplesheet',
                'TapeStation Sampleplots PDF'
        ]:
            sample_name = artifact.name.split('_')[0]
            if sample_name in sample_size_measurements:
                artifact.udf[
                    'Dx Fragmentlengte (bp)'] = sample_size_measurements[
                        sample_name]
            if sample_name in sample_concentration_measurements:
                artifact.udf[
                    'Dx Concentratie fluorescentie (ng/ul)'] = sample_concentration_measurements[
                        sample_name]
            artifact.put()
コード例 #30
0
def main(lims, args):

    p = Process(lims, id=args.pid)
    log = []
    datamap = {}
    wsname = None
    username = "******".format(p.technician.first_name,
                                p.technician.last_name)
    user_email = p.technician.email
    for art in p.all_inputs():
        if len(art.samples) != 1:
            log.append(
                "Warning : artifact {0} has more than one sample".format(
                    art.id))
        for sample in art.samples:
            #take care of lamda DNA
            if sample.project:
                if sample.project.id not in datamap:
                    datamap[sample.project.id] = [sample.name]
                else:
                    datamap[sample.project.id].append(sample.name)

    for art in p.all_outputs():
        try:
            wsname = art.location[0].name
            break
        except:
            pass

    now = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
    for pid in datamap:
        pj = Project(lims, id=pid)
        if len(datamap[pid]) > 1:
            rnt = "{0} samples planned for {1}".format(len(datamap[pid]),
                                                       wsname)
        else:
            rnt = "{0} sample planned for {1}".format(len(datamap[pid]),
                                                      wsname)

        running_note = {
            "note": rnt,
            "user": username,
            "email": user_email,
            "category": "Workset"
        }
        write_note_to_couch(pid, now, running_note, lims.get_uri())
        log.append(
            "Updated project {0} : {1}, {2} samples in this workset".format(
                pid, pj.name, len(datamap[pid])))

    with open("EPP_Notes.log", "w") as flog:
        flog.write("\n".join(log))
    for out in p.all_outputs():
        #attach the log file
        if out.name == "RNotes Log":
            attach_file(os.path.join(os.getcwd(), "EPP_Notes.log"), out)

    sys.stderr.write("Updated {0} projects successfully".format(
        len(list(datamap.keys()))))