def post(self, lims_step): user = self.get_secure_cookie('user') email = self.get_secure_cookie('email') a_type = self.get_argument('type', '') title = self.get_argument('title', '') url = self.get_argument('url', '') desc = self.get_argument('desc','') if not a_type or not title: self.set_status(400) self.finish('<html><body>Link title and type is required</body></html>') else: p = Process(self.lims, id=lims_step) p.get(force=True) links = json.loads(p.udf['Links']) if 'Links' in p.udf else {} links[str(datetime.datetime.now())] = {'user': user, 'email': email, 'type': a_type, 'title': title, 'url': url, 'desc': desc} p.udf['Links'] = json.dumps(links) p.put() self.set_status(200) #ajax cries if it does not get anything back self.set_header("Content-type", "application/json") self.finish(json.dumps(links))
def delete(self, workset): note_id=self.get_argument('note_id') p = Process(self.lims, id=workset) p.get(force=True) workset_notes = json.loads(p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {} try: self.set_header("Content-type", "application/json") del workset_notes[note_id] p.udf['Workset Notes'] = json.dumps(workset_notes) p.put() self.set_status(201) self.write(json.dumps(workset_notes)) except: self.set_status(400) self.finish('<html><body>No note found</body></html>')
def post(self, workset): note = self.get_argument('note', '') user = self.get_secure_cookie('user') email = self.get_secure_cookie('email') if not note: self.set_status(400) self.finish('<html><body>No workset id or note parameters found</body></html>') else: newNote = {'user': user, 'email': email, 'note': note} p = Process(self.lims, id=workset) p.get(force=True) workset_notes = json.loads(p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {} workset_notes[str(datetime.datetime.now())] = newNote p.udf['Workset Notes'] = json.dumps(workset_notes) p.put() self.set_status(201) self.write(json.dumps(newNote))
def post(self, workset): note = self.get_argument('note', '') user = self.get_secure_cookie('user') email = self.get_secure_cookie('email') if not note: self.set_status(400) self.finish( '<html><body>No workset id or note parameters found</body></html>' ) else: newNote = {'user': user, 'email': email, 'note': note} p = Process(self.lims, id=workset) p.get(force=True) workset_notes = json.loads( p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {} workset_notes[str(datetime.datetime.now())] = newNote p.udf['Workset Notes'] = json.dumps(workset_notes) p.put() self.set_status(201) self.write(json.dumps(newNote))
def results(lims, process_id): """Upload magnis export to process.""" process = Process(lims, id=process_id) lot_error = False for output_file in process.result_files(): if output_file.name == 'Magnis export file': magnis_xml_file = output_file.files[0] magnis_data = xmltodict.parse(lims.get_file_contents(magnis_xml_file.id)) # Save lot nunmbers and check sample input strip barcode for labware in magnis_data['RunInfo']['LabwareInfos']['Labware']: if labware['@Name'] == 'Probe Input Strip': process.udf['lot # SureSelect v7'] = labware['@LotNumber'] elif labware['@Name'] == 'Reagent Plate': process.udf['lot # Magnis Sureselect XT HS reagent plate'] = labware['@LotNumber'] elif labware['@Name'] == 'Beads/Buffers Plate': process.udf['lot # Magnis SureSelect XT Beads/Buffers Plate'] = labware['@LotNumber'] elif labware['@Name'] == 'Index Strip': process.udf['lot # Dual BC strip'] = labware['@LotNumber'] index_strip_number = int(labware['@IndexStrip']) elif labware['@Name'] == 'Reagent Strip': process.udf['lot # BR Oligo strip (blockers)'] = labware['@LotNumber'] elif ( labware['@Name'] == 'Sample Input Strip' and process.udf['Barcode sample input strip'] != labware['@BarCode'] ): lot_error = True # Check sample reagents and fill Lotnr check flag for output in process.analytes()[0]: label_index_number = int(output.reagent_labels[0][3:5]) if lot_error or label_index_number != index_strip_number: output.udf['Lotnr check'] = False else: output.udf['Lotnr check'] = True output.put() process.put()
def main(lims, args): log = [] content = None process = Process(lims, id=args.pid) # Copy Read and index parameter from the step "Load to Flowcell (NovaSeq 6000 v2.0)" UDF_to_copy = [ 'Read 1 Cycles', 'Read 2 Cycles', 'Index Read 1', 'Index Read 2' ] for i in UDF_to_copy: if process.parent_processes()[0].udf.get(i): process.udf[i] = process.parent_processes()[0].udf[i] process.put() # Fetch Flowcell ID FCID = process.parent_processes()[0].output_containers()[0].name for outart in process.all_outputs(): if outart.type == 'ResultFile' and outart.name == 'Run Info': try: lims.upload_new_file( outart, max(glob.glob( '/srv/mfs/NovaSeq_data/*{}/RunInfo.xml'.format(FCID)), key=os.path.getctime)) except: raise RuntimeError("No RunInfo.xml Found!") elif outart.type == 'ResultFile' and outart.name == 'Run Parameters': try: lims.upload_new_file( outart, max(glob.glob( '/srv/mfs/NovaSeq_data/*{}/RunParameters.xml'.format( FCID)), key=os.path.getctime)) except: raise RuntimeError("No RunParameters.xml Found!")
def results(lims, process_id): """Upload tecan results to artifacts.""" process = Process(lims, id=process_id) concentration_range = map(float, re.findall('[\d\.]+', process.udf['Concentratiebereik (ng/ul)'])) # Parse output file for output in process.all_outputs(unique=True): if output.name == 'Tecan Spark Output': tecan_result_file = output.files[0] tecan_file_order = ['Dx Fluorescentie (nM)', 'sample_name'] tecan_file_part = -1 measurements = {} sample_measurements = {} for line in lims.get_file_contents(tecan_result_file.id).data.split('\n'): if not line.startswith('<>'): data = line.rstrip().split('\t') for index, value in enumerate(data[1:]): value = value.rstrip() if value: coordinate = '{0}{1}'.format(data[0], str(index)) if tecan_file_order[tecan_file_part] == 'Dx Fluorescentie (nM)': measurements[coordinate] = float(value) elif tecan_file_order[tecan_file_part] == 'sample_name': if value not in sample_measurements: sample_measurements[value] = [measurements[coordinate]] else: sample_measurements[value].append(measurements[coordinate]) else: tecan_file_part += 1 # Calculate linear regression for concentration # Assumes no std duplicates baseline_fluorescence = sample_measurements['Dx Tecan std 1'][0] fluorescence_values = [ sample_measurements['Dx Tecan std 1'][0] - baseline_fluorescence, sample_measurements['Dx Tecan std 2'][0] - baseline_fluorescence, sample_measurements['Dx Tecan std 3'][0] - baseline_fluorescence, sample_measurements['Dx Tecan std 4'][0] - baseline_fluorescence, sample_measurements['Dx Tecan std 5'][0] - baseline_fluorescence, sample_measurements['Dx Tecan std 6'][0] - baseline_fluorescence, ] if process.udf['Reagentia kit'] == 'Quant-iT High-Sensitivity dsDNA kit': ng_values = [0, 5, 10, 20, 40, 60, 80, 100] fluorescence_values.append(sample_measurements['Dx Tecan std 7'][0] - baseline_fluorescence) fluorescence_values.append(sample_measurements['Dx Tecan std 8'][0] - baseline_fluorescence) elif process.udf['Reagentia kit'] == 'Quant-iT Broad Range dsDNA kit': ng_values = [0, 50, 100, 200, 400, 600] regression_slope = sum([x*y for x, y in zip(fluorescence_values, ng_values)]) / sum([x**2 for x in fluorescence_values]) rsquared = 1 - (sum([(y - x*regression_slope)**2 for x, y in zip(fluorescence_values, ng_values)]) / sum([y**2 for y in ng_values])) # Set udf values process.udf['R-squared waarde'] = rsquared process.put() artifact_count = {} for artifact in process.all_outputs(): if artifact.name not in ['Tecan Spark Output', 'Tecan Spark Samplesheet', 'check gemiddelde concentratie', 'Label plaat']: if len(artifact.samples) == 1: # Remove 'meet_id' from artifact name if artifact is not a pool artifact_name = artifact.name.split('_')[0] else: artifact_name = artifact.name # Set Average Concentratie fluorescentie sample_fluorescence = sum(sample_measurements[artifact_name]) / float(len(sample_measurements[artifact_name])) sample_concentration = ((sample_fluorescence - baseline_fluorescence) * regression_slope) / 2.0 artifact.udf['Dx Concentratie fluorescentie (ng/ul)'] = sample_concentration # Set artifact Concentratie fluorescentie # Get artifact index == count if artifact_name not in artifact_count: artifact_count[artifact_name] = 0 else: artifact_count[artifact_name] += 1 artifact_fluorescence = sample_measurements[artifact_name][artifact_count[artifact_name]] artifact_concentration = ((artifact_fluorescence - baseline_fluorescence) * regression_slope) / 2.0 artifact.udf['Dx Conc. goedgekeurde meting (ng/ul)'] = artifact_concentration # Set QC flags if artifact_name.startswith('Dx Tecan std'): artifact.qc_flag = 'PASSED' std_number = int(artifact_name.split(' ')[3]) artifact.udf['Dx Conc. goedgekeurde meting (ng/ul)'] = ng_values[std_number - 1] artifact.udf['Dx Concentratie fluorescentie (ng/ul)'] = ng_values[std_number - 1] else: # Calculate measurement deviation from average. if concentration_range[0] <= sample_concentration <= concentration_range[1]: if len(sample_measurements[artifact_name]) == 1: artifact.qc_flag = 'PASSED' elif len(sample_measurements[artifact_name]) == 2: artifact_fluorescence_difference = abs(sample_measurements[artifact_name][0] - sample_measurements[artifact_name][1]) artifact_fluorescence_deviation = artifact_fluorescence_difference / sample_fluorescence if artifact_fluorescence_deviation <= 0.1: artifact.qc_flag = 'PASSED' else: artifact.qc_flag = 'FAILED' else: artifact.qc_flag = 'FAILED' artifact.put()