class WorksetNotesDataHandler(SafeHandler): """Serves all notes from a given workset. It connects to the genologics LIMS to fetch and update Workset Notes information. URL: /api/v1/workset_notes/([^/]*) """ lims = lims.Lims(BASEURI, USERNAME, PASSWORD) def get(self, workset): self.set_header("Content-type", "application/json") p = Process(self.lims, id=workset) p.get(force=True) # Sorted running notes, by date workset_notes = json.loads( p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {} sorted_workset_notes = OrderedDict() for k, v in sorted(workset_notes.iteritems(), key=lambda t: t[0], reverse=True): sorted_workset_notes[k] = v self.write(sorted_workset_notes) def post(self, workset): note = self.get_argument('note', '') user = self.get_secure_cookie('user') email = self.get_secure_cookie('email') if not note: self.set_status(400) self.finish( '<html><body>No workset id or note parameters found</body></html>' ) else: newNote = {'user': user, 'email': email, 'note': note} p = Process(self.lims, id=workset) p.get(force=True) workset_notes = json.loads( p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {} workset_notes[str(datetime.datetime.now())] = newNote p.udf['Workset Notes'] = json.dumps(workset_notes) p.put() self.set_status(201) self.write(json.dumps(newNote)) def delete(self, workset): note_id = self.get_argument('note_id') p = Process(self.lims, id=workset) p.get(force=True) workset_notes = json.loads( p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {} try: self.set_header("Content-type", "application/json") del workset_notes[note_id] p.udf['Workset Notes'] = json.dumps(workset_notes) p.put() self.set_status(201) self.write(json.dumps(workset_notes)) except: self.set_status(400) self.finish('<html><body>No note found</body></html>')
def login(context): BASEURI, USERNAME, PASSWORD, _, _ = config.load_config(sys.path[0] + '/genologics.conf') limslogin = lims.Lims(BASEURI, USERNAME, PASSWORD) # assert limslogin.check_version() #!= None return limslogin
class WorksetLinksHandler(SafeHandler): """ Serves external links for each project Links are stored as JSON in genologics LIMS / project URL: /api/v1/workset_links/([^/]*) """ lims = lims.Lims(BASEURI, USERNAME, PASSWORD) def get(self, lims_step): self.set_header("Content-type", "application/json") p = Process(self.lims, id=lims_step) p.get(force=True) links = json.loads(p.udf['Links']) if 'Links' in p.udf else {} #Sort by descending date, then hopefully have deviations on top sorted_links = OrderedDict() for k, v in sorted(links.iteritems(), key=lambda t: t[0], reverse=True): sorted_links[k] = v sorted_links = OrderedDict( sorted(sorted_links.iteritems(), key=lambda (k, v): v['type'])) self.write(sorted_links) def post(self, lims_step): user = self.get_secure_cookie('user') email = self.get_secure_cookie('email') a_type = self.get_argument('type', '') title = self.get_argument('title', '') url = self.get_argument('url', '') desc = self.get_argument('desc', '') if not a_type or not title: self.set_status(400) self.finish( '<html><body>Link title and type is required</body></html>') else: p = Process(self.lims, id=lims_step) p.get(force=True) links = json.loads(p.udf['Links']) if 'Links' in p.udf else {} links[str(datetime.datetime.now())] = { 'user': user, 'email': email, 'type': a_type, 'title': title, 'url': url, 'desc': desc } p.udf['Links'] = json.dumps(links) p.put() self.set_status(200) #ajax cries if it does not get anything back self.set_header("Content-type", "application/json") self.finish(json.dumps(links))
def get(self): data = {} lims_url = self.request.query lims_id = "24-{}".format(lims_url.split("/")[-1]) mylims = lims.Lims(BASEURI, USERNAME, PASSWORD) try: p = Process(mylims, id=lims_id) if p.type.name != 'Setup Workset/Plate': raise Exception("Wrong process type") except: self.set_status( 400, reason="Wrong process type : use a Setup Workset/Plate") self.finish() data['comments'] = {} data['samples'] = {} for i in p.all_inputs(): sample_name = i.samples[0].name if not i.samples[0].project: continue else: project = i.samples[0].project if 'Project Comment' in project.udf and project.id not in data[ 'comments']: data['comments'][project.id] = project.udf['Project Comment'] data['samples'][sample_name] = {} data['samples'][sample_name]['amount'] = i.udf['Amount (ng)'] data['samples'][sample_name]['previous_preps'] = {} if 'Library construction method' in project.udf: data['samples'][sample_name]['lib_method'] = project.udf[ 'Library construction method'] if 'Sequencing platform' in project.udf: data['samples'][sample_name]['seq_pl'] = project.udf[ 'Sequencing platform'] other_preps = mylims.get_processes(inputartifactlimsid=i.id, type="Setup Workset/Plate") for op in other_preps: if op.id != p.id: for o in op.all_outputs(): if o.type == "Analyte" and o.samples[ 0].name == sample_name: data['samples'][sample_name]['previous_preps'][ o.location[0].name] = {} data['samples'][sample_name]['previous_preps'][ o.location[0].name]['position'] = o.location[1] data['samples'][sample_name]['previous_preps'][ o.location[0]. name]['amount'] = o.udf['Amount taken (ng)'] self.set_header("Content-type", "application/json") self.write(json.dumps(data))
def get(self): limsg = lims.Lims(BASEURI, USERNAME, PASSWORD) queues = {} queues['TruSeqRNAprep'] = Queue(limsg, id='311') queues['TruSeqSmallRNA'] = Queue(limsg, id='410') queues['TruSeqDNAPCR_free'] = Queue(limsg, id='407') queues['ThruPlex'] = Queue(limsg, id='451') queues['Genotyping'] = Queue(limsg, id='901') queues['RadSeq'] = Queue(limsg, id='1201') queues['SMARTerPicoRNA'] = Queue(limsg, id='1551') queues['ChromiumGenomev2'] = Queue(limsg, id='1801') methods = queues.keys() pools = {} for method in methods: pools[method] = {} for artifact in queues[method].artifacts: name = artifact.name project = artifact.name.split('_')[0] if project in pools[method]: pools[method][project]['samples'].append(name) else: total_num_samples = limsg.get_sample_number( projectlimsid=project) proj = Project(limsg, id=project) try: date_queued = proj.udf['Queued'].strftime("%Y-%m-%d") except KeyError: # Queued should really be on a project at this point, but mistakes happen date_queued = None projName = proj.name pools[method][project] = { 'total_num_samples': total_num_samples, 'queued_date': date_queued, 'pname': projName, 'samples': [name] } self.set_header("Content-type", "application/json") self.write(json.dumps(pools))
def post(self): data = json.loads(self.request.body) mylims = lims.Lims(BASEURI, USERNAME, PASSWORD) lims_id = "24-{}".format(data['lims_url'].split("/")[-1]) st = Step(mylims, id=lims_id) cont = st.placements.selected_containers[0] pll = st.placements.placement_list for artl in pll: sample = artl[0].samples[0].name for row_idx in xrange(0, len(data['mat'])): for col_idx in xrange(0, len(data['mat'][row_idx])): if data['mat'][row_idx][col_idx] == sample: location = (cont, "{}:{}".format(chr(row_idx + 65), col_idx + 1)) artl[1] = location st.placements.placement_list = pll st.placements.post() self.set_header("Content-type", "application/json") self.write(json.dumps({'status': 'ok'}))
"""Set of handlers related with Flowcells """ import tornado.web import json import datetime from dateutil.relativedelta import relativedelta from genologics.entities import Container from genologics import lims from genologics.config import BASEURI, USERNAME, PASSWORD from collections import OrderedDict from status.util import SafeHandler from status.projects import RunningNotesDataHandler lims = lims.Lims(BASEURI, USERNAME, PASSWORD) thresholds = { 'HiSeq X': 320, 'RapidHighOutput': 188, 'HighOutput': 143, 'RapidRun': 114, 'MiSeq Version3': 18, 'MiSeq Version2': 10, 'MiSeq Version2Nano': 0.75, 'NovaSeq SP': 325, 'NovaSeq S1': 650, 'NovaSeq S2': 1650, 'NovaSeq S4': 2000, 'NextSeq Mid': 25, 'NextSeq High': 75,
class WorksetNotesDataHandler(SafeHandler): """Serves all notes from a given workset. It connects to LIMS to fetch and update Workset Notes information. URL: /api/v1/workset_notes/([^/]*) """ lims = lims.Lims(BASEURI, USERNAME, PASSWORD) def get(self, workset): self.set_header("Content-type", "application/json") p = Process(self.lims, id=workset) p.get(force=True) # Sorted running notes, by date workset_notes = json.loads(p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {} sorted_workset_notes = OrderedDict() for k, v in sorted(workset_notes.items(), key=lambda t: t[0], reverse=True): sorted_workset_notes[k] = v self.write(sorted_workset_notes) def post(self, workset): note = self.get_argument('note', '') user = self.get_current_user() category = self.get_argument('category', 'Workset') if category == '': category = 'Workset' workset_data = WorksetDataHandler.get_workset_data(self.application, workset) projects = list(workset_data[workset]['projects']) workset_name = workset_data[workset]['name'] if not note: self.set_status(400) self.finish('<html><body>No workset id or note parameters found</body></html>') else: newNote = {'user': user.name, 'email': user.email, 'note': note, 'category': category} p = Process(self.lims, id=workset) p.get(force=True) workset_notes = json.loads(p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {} workset_notes[str(datetime.datetime.now())] = newNote p.udf['Workset Notes'] = json.dumps(workset_notes) p.put() # Save the running note in statusdb per workset as well to be able # to quickly show it in the worksets list v = self.application.worksets_db.view("worksets/lims_id", key=workset) doc_id = v.rows[0].id doc = self.application.worksets_db.get(doc_id) doc['Workset Notes'] = json.dumps(workset_notes) self.application.worksets_db.save(doc) workset_link = '<a class="text-decoration-none" href="/workset/{0}">{0}</a>'.format(workset_name) project_note = "#####*Running note posted on workset {}:*\n".format(workset_link) project_note += note for project_id in projects: RunningNotesDataHandler.make_project_running_note( self.application, project_id, project_note, category, user.name, user.email ) self.set_status(201) self.write(json.dumps(newNote)) def delete(self, workset): note_id=self.get_argument('note_id') p = Process(self.lims, id=workset) p.get(force=True) workset_notes = json.loads(p.udf['Workset Notes']) if 'Workset Notes' in p.udf else {} try: self.set_header("Content-type", "application/json") del workset_notes[note_id] p.udf['Workset Notes'] = json.dumps(workset_notes) p.put() self.set_status(201) self.write(json.dumps(workset_notes)) except: self.set_status(400) self.finish('<html><body>No note found</body></html>')
def get(self): limsl = lims.Lims(BASEURI, USERNAME, PASSWORD) #qPCR queues queues = {} queues['MiSeq'] = Queue(limsl, id='1002') queues['NovaSeq'] = Queue(limsl, id='1666') queues['LibraryValidation'] = Queue(limsl, id='41') methods = queues.keys() pools = {} for method in methods: pools[method] = {} if queues[method].artifacts: tree = ET.fromstring(queues[method].xml()) if tree.find('next-page') is not None: flag = True next_page_uri = tree.find('next-page').attrib['uri'] while flag: next_page = ET.fromstring( Queue(limsl, uri=next_page_uri).xml()) for elem in next_page.findall('artifacts'): tree.insert(0, elem) if next_page.find('next-page') is not None: next_page_uri = next_page.find( 'next-page').attrib['uri'] else: flag = False for artifact in tree.iter('artifact'): queue_time = artifact.find('queue-time').text container = Container(limsl, uri=artifact.find('location').find( 'container').attrib['uri']).name art = Artifact(limsl, uri=artifact.attrib['uri']) value = artifact.find('location').find('value').text library_type = '' runmode = '' if not 'lambda DNA' in art.name: library_type = art.samples[0].project.udf[ "Library construction method"] try: runmode = art.samples[0].project.udf[ 'Sequencing platform'] except KeyError: runmode = 'NA' if container in pools[method]: pools[method][container]['samples'].append({ 'name': art.name, 'well': value, 'queue_time': queue_time }) if library_type and library_type not in pools[method][ container]['library_types']: pools[method][container]['library_types'].append( library_type) if runmode and runmode not in pools[method][container][ 'runmodes']: pools[method][container]['runmodes'].append( runmode) else: pools[method][container] = { 'samples': [{ 'name': art.name, 'well': value, 'queue_time': queue_time }], 'library_types': [library_type], 'runmodes': [runmode] } self.set_header("Content-type", "application/json") self.write(json.dumps(pools))
def get(self): limsl = lims.Lims(BASEURI, USERNAME, PASSWORD) #sequencing queues are currently taken as the following #Miseq- Step 7: Denature, Dilute and load sample #Novaseq Step 11: Load to flow cell queues = {} queues['MiSeq'] = Queue(limsl, id='55') queues['NovaSeq'] = Queue(limsl, id='1662') methods = queues.keys() pools = {} for method in methods: pools[method] = {} if queues[method].artifacts: tree = ET.fromstring(queues[method].xml()) for artifact in tree.iter('artifact'): queue_time = artifact.find('queue-time').text container = Container(limsl, uri=artifact.find('location').find( 'container').attrib['uri']).name attr_name = Artifact(limsl, uri=artifact.attrib['uri']).name value = artifact.find('location').find('value').text proj_and_samples = {} conc_qpcr = '' is_rerun = False art = Artifact(limsl, uri=artifact.attrib['uri']) if method is 'MiSeq': #FinishedLibrary if 'Concentration' in dict(art.udf.items()).keys(): conc_qpcr = art.udf['Concentration'] #InhouseLibrary elif 'Pool Conc. (nM)' in dict(art.udf.items()).keys(): conc_qpcr = str(art.udf['Pool Conc. (nM)']) else: pass is_rerun = art.udf["Rerun"] elif method is 'NovaSeq': if 'Concentration' in dict(art.udf.items()).keys(): conc_qpcr = art.udf["Concentration"] if 'Rerun' in dict(art.udf.items()).keys(): is_rerun = art.udf["Rerun"] else: new_art = art.parent_process.input_output_maps[0][ 0] # The loop iterates 4 times as the values were found within the first 4 preceding # parent processes(through trial and error). If the values are not found within 4 iterations, they can be looked up # manually in LIMS. The loop is structured so as its not very clear in the genologics API which of the parent processes # will contain the values in post process and 4 seemed to get everything for the data at hand. i = 0 while i < 4: if 'Concentration' in dict( new_art['post-process-uri'].udf.items( )).keys(): conc_qpcr = new_art[ 'post-process-uri'].udf[ "Concentration"] if 'Rerun' in dict( new_art['post-process-uri'].udf. items()).keys(): is_rerun = new_art[ 'post-process-uri'].udf["Rerun"] break else: new_art = new_art[ 'parent-process'].input_output_maps[0][ 0] i = i + 1 for sample in art.samples: project = sample.project.id if project in pools[method]: if container in pools[method][project]['plates']: pools[method][project]['plates'][container][ 'samples'].append(sample.name) else: pools[method][project]['plates'][container] = { 'samples': [sample.name], 'well': value, 'queue_time': queue_time, 'conc_pool_qpcr': conc_qpcr, 'is_rerun': is_rerun } else: setup = sample.project.udf['Sequencing setup'] lanes = sample.project.udf[ 'Sequence units ordered (lanes)'] librarytype = sample.project.udf[ 'Library construction method'] runmode = sample.project.udf['Sequencing platform'] final_loading_conc = 'TBD' if method is 'NovaSeq': try: final_loading_conc = Artifact( limsl, uri=artifact.attrib['uri'] ).udf['Final Loading Concentration (pM)'] except KeyError: pass pools[method][project] = { 'name': sample.project.name, 'setup': setup, 'lanes': lanes, 'runmode': runmode, 'final_loading_conc': final_loading_conc, 'librarytype': librarytype, 'plates': { container: { 'samples': [sample.name], 'well': value, 'queue_time': queue_time, 'conc_pool_qpcr': conc_qpcr, 'is_rerun': is_rerun } } } self.set_header("Content-type", "application/json") self.write(json.dumps(pools))
def get(self): limsl = lims.Lims(BASEURI, USERNAME, PASSWORD) #qPCR queues queues = {} queues['MiSeq'] = Queue(limsl, id='1002') queues['NovaSeq'] = Queue(limsl, id='1666') queues['LibraryValidation'] = Queue(limsl, id='41') methods = queues.keys() pools = {} qpcr_control_names = [ 'AM7852', 'E.Coli genDNA', 'Endogenous Positive Control', 'Exogenous Positive Control', 'Human Brain Reference RNA', 'lambda DNA', 'mQ Negative Control', 'NA10860', 'NA11992', 'NA11993', 'NA12878', 'NA12891', 'NA12892', 'No Amplification Control', 'No Reverse Transcriptase Control', 'No Template Control', 'PhiX v3', 'Universal Human Reference RNA', 'lambda DNA (qPCR)' ] for method in methods: pools[method] = {} if queues[method].artifacts: tree = ET.fromstring(queues[method].xml()) if tree.find('next-page') is not None: flag = True next_page_uri = tree.find('next-page').attrib['uri'] while flag: next_page = ET.fromstring( Queue(limsl, uri=next_page_uri).xml()) for elem in next_page.findall('artifacts'): tree.insert(0, elem) if next_page.find('next-page') is not None: next_page_uri = next_page.find( 'next-page').attrib['uri'] else: flag = False for artifact in tree.iter('artifact'): queue_time = artifact.find('queue-time').text container = Container(limsl, uri=artifact.find('location').find( 'container').attrib['uri']).name art = Artifact(limsl, uri=artifact.attrib['uri']) value = artifact.find('location').find('value').text library_type = '' runmode = '' #skip if the Artifact is a control if art.name in qpcr_control_names: continue library_type = art.samples[0].project.udf.get( "Library construction method", 'NA') try: runmode = art.samples[0].project.udf[ 'Sequencing platform'] except KeyError: runmode = 'NA' if container in pools[method]: pools[method][container]['samples'].append({ 'name': art.name, 'well': value, 'queue_time': queue_time }) if library_type and library_type not in pools[method][ container]['library_types']: pools[method][container]['library_types'].append( library_type) if runmode and runmode not in pools[method][container][ 'runmodes']: pools[method][container]['runmodes'].append( runmode) else: pools[method][container] = { 'samples': [{ 'name': art.name, 'well': value, 'queue_time': queue_time }], 'library_types': [library_type], 'runmodes': [runmode] } self.set_header("Content-type", "application/json") self.write(json.dumps(pools))