예제 #1
0
 def get(self, project):
     self.set_header("Content-type", "application/json")
     p = Project(lims, id=project)
     p.get(force=True)
     # Sorted running notes, by date
     running_notes = json.loads(p.udf['Running Notes']) if 'Running Notes' in p.udf else {}
     sorted_running_notes = OrderedDict()
     for k, v in sorted(running_notes.iteritems(), key=lambda t: t[0], reverse=True):
         sorted_running_notes[k] = v
     self.write(sorted_running_notes)
예제 #2
0
    def get(self, project):
        self.set_header("Content-type", "application/json")
        p = Project(lims, id=project)
        p.get(force=True)

        links = json.loads(p.udf['Links']) if 'Links' in p.udf else {}

        #Sort by descending date, then hopefully have deviations on top
        sorted_links = OrderedDict()
        for k, v in sorted(links.iteritems(), key=lambda t: t[0], reverse=True):
            sorted_links[k] = v
        sorted_links = OrderedDict(sorted(sorted_links.iteritems(), key=lambda (k,v): v['type']))
        self.write(sorted_links)
예제 #3
0
    def get(self, project):
        self.set_header("Content-type", "application/json")
        p = Project(lims, id=project)
        p.get(force=True)

        links = json.loads(p.udf['Links']) if 'Links' in p.udf else {}

        #Sort by descending date, then hopefully have deviations on top
        sorted_links = OrderedDict()
        for k, v in sorted(links.items(), key=lambda t: t[0], reverse=True):
            sorted_links[k] = v
        sorted_links = OrderedDict(sorted(sorted_links.items(), key=lambda k: k[1]['type']))
        self.write(sorted_links)
def main(lims, args):

    p=Process(lims, id=args.pid)
    log=[]
    datamap={}
    wsname=None
    username="******".format(p.technician.first_name, p.technician.last_name)
    user_email=p.technician.email
    for art in p.all_inputs():
        if len(art.samples)!=1:
            log.append("Warning : artifact {0} has more than one sample".format(art.id))
        for sample in art.samples:
           #take care of lamda DNA
           if sample.project:
                if sample.project.id not in datamap:
                    datamap[sample.project.id]=[sample.name]
                else:
                    datamap[sample.project.id].append(sample.name)

    for art in p.all_outputs():
        try:
            wsname=art.location[0].name
            break
        except:
            pass

    now=datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
    for pid in datamap:
        pj=Project(lims, id=pid)
        running_notes=json.loads(pj.udf['Running Notes'])
        if len(datamap[pid]) > 1:
            rnt="{0} samples planned for {1}".format(len(datamap[pid]), wsname)
        else:
            rnt="{0} sample planned for {1}".format(len(datamap[pid]), wsname)

        running_notes[now]={"note": rnt, "user" : username, "email":user_email, "category":"Workset"}

        pj.udf['Running Notes']=json.dumps(running_notes)
        pj.put()
        log.append("Updated project {0} : {1}, {2} samples in this workset".format(pid,pj.name, len(datamap[pid])))


 
    with open("EPP_Notes.log", "w") as flog:
        flog.write("\n".join(log))
    for out in p.all_outputs():
        #attach the log file
        if out.name=="RNotes Log":
            attach_file(os.path.join(os.getcwd(), "EPP_Notes.log"), out)

    sys.stderr.write("Updated {0} projects successfully".format(len(datamap.keys())))
예제 #5
0
def main(args):
    lims_db = get_session()
    lims = Lims(BASEURI,USERNAME,PASSWORD)
    with open(args.conf) as cf:
        db_conf = yaml.load(cf)
        couch = setupServer(db_conf)
    db = couch["expected_yields"]
    postgres_string="{} hours".format(args.hours)
    project_ids=get_last_modified_projectids(lims_db, postgres_string)

    min_yields = {}
    for row in db.view("yields/min_yield"):
        db_key = ' '.join(x if x else '' for x in row.key).strip()
        min_yields[db_key] = row.value

    for project in [Project(lims, id=x) for x in project_ids]:
        samples_count = 0
        samples = lims.get_samples(projectname=project.name)
        for sample in samples:
            if not("Status (manual)" in sample.udf and sample.udf["Status (manual)"] == "Aborted"):
                samples_count +=1
        try:
            lanes_ordered = project.udf['Sequence units ordered (lanes)']
            key = project.udf['Sequencing platform']
        except:
            continue
        if key in min_yields:
            value = min_yields[key]
            try:
                project.udf['Reads Min'] = float(value) * lanes_ordered / samples_count
                project.put()
            except ZeroDivisionError:
                pass
예제 #6
0
 def get(self, project):
     self.set_header("Content-type", "application/json")
     p = Project(lims, id=project)
     try:
         p.get(force=True)
     except:
         raise tornado.web.HTTPError(404, reason='Project not found: {}'.format(project))
         # self.set_status(404)
         # self.write({})
     else:
         # Sorted running notes, by date
         running_notes = json.loads(p.udf['Running Notes']) if 'Running Notes' in p.udf else {}
         sorted_running_notes = OrderedDict()
         for k, v in sorted(running_notes.iteritems(), key=lambda t: t[0], reverse=True):
             sorted_running_notes[k] = v
         self.write(sorted_running_notes)
예제 #7
0
 def get(self, project):
     self.set_header("Content-type", "application/json")
     p = Project(lims, id=project)
     try:
         p.get(force=True)
     except:
         raise tornado.web.HTTPError(404, reason='Project not found: {}'.format(project))
         # self.set_status(404)
         # self.write({})
     else:
         # Sorted running notes, by date
         running_notes = json.loads(p.udf['Running Notes']) if 'Running Notes' in p.udf else {}
         sorted_running_notes = OrderedDict()
         for k, v in sorted(running_notes.items(), key=lambda t: t[0], reverse=True):
             sorted_running_notes[k] = v
         self.write(sorted_running_notes)
예제 #8
0
def main(args):
    log = lutils.setupLog('bioinfologger', args.logfile)
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    with open(args.conf) as conf_file:
        conf = yaml.safe_load(conf_file)
    bioinfodb = lutils.setupServer(conf)['bioinfo_analysis']
    open_projects = bioinfodb.view('latest_data/sample_id_open')

    for row in open_projects.rows:
        project_id = row.key[0]
        sample_id = row.key[3]
        close_date = None
        try:
            close_date = Project(lims=lims, id=project_id).close_date
        except HTTPError as e:
            if '404: Project not found' in e.message:
                log.error('Project '+project_id+' not found in LIMS')
                continue
        if close_date is not None:
            try:
                doc = bioinfodb.get(row.id)
            except Exception as e:
                log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while accessing doc from statusdb')
            doc['project_closed'] = True
            try:
                bioinfodb.save(doc)
                log.info('Updated Project '+project_id+ ' Sample '+sample_id)
            except Exception as e:
                log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while saving to statusdb')
예제 #9
0
파일: order.py 프로젝트: mayabrandi/cg
    def submit_project(self,
                       project_name: str,
                       samples: List[dict],
                       researcher_id: str = '3'):
        """Parse Scout project."""
        containers = self.prepare(samples)

        lims_project = Project.create(
            self,
            researcher=Researcher(self, id=researcher_id),
            name=project_name,
        )
        LOG.info("%s: created new LIMS project", lims_project.id)

        containers_data = [
            batch.build_container(
                name=container['name'],
                con_type=Containertype(lims=self, id=container['type']),
            ) for container in containers
        ]
        container_details = batch.build_container_batch(containers_data)
        LOG.debug("%s: saving containers", lims_project.name)
        container_map = self.save_containers(container_details)

        reagentlabel_samples = [
            sample for container in containers
            for sample in container['samples'] if sample['index_sequence']
        ]

        samples_data = []
        for container in containers:
            for sample in container['samples']:
                LOG.debug("%s: adding sample to container: %s", sample['name'],
                          container['name'])
                lims_container = container_map[container['name']]
                sample_data = batch.build_sample(
                    name=sample['name'],
                    project=lims_project,
                    container=lims_container,
                    location=sample['location'],
                    udfs=sample['udfs'],
                )
                samples_data.append(sample_data)
        sample_details = batch.build_sample_batch(samples_data)
        process_reagentlabels = len(reagentlabel_samples) > 0
        sample_map = self.save_samples(sample_details,
                                       map_samples=process_reagentlabels)

        if process_reagentlabels:
            artifacts_data = [
                batch.build_artifact(
                    artifact=sample_map[sample['name']].artifact,
                    reagent_label=sample['index_sequence'],
                ) for sample in reagentlabel_samples
            ]
            artifact_details = batch.build_artifact_batch(artifacts_data)
            self.update_artifacts(artifact_details)

        lims_project_data = self._export_project(lims_project)
        return lims_project_data
def main(lims, args):

    p = Process(lims, id=args.pid)
    log = []
    datamap = {}
    wsname = None
    username = "******".format(p.technician.first_name,
                                p.technician.last_name)
    user_email = p.technician.email
    for art in p.all_inputs():
        if len(art.samples) != 1:
            log.append(
                "Warning : artifact {0} has more than one sample".format(
                    art.id))
        for sample in art.samples:
            #take care of lamda DNA
            if sample.project:
                if sample.project.id not in datamap:
                    datamap[sample.project.id] = [sample.name]
                else:
                    datamap[sample.project.id].append(sample.name)

    for art in p.all_outputs():
        try:
            wsname = art.location[0].name
            break
        except:
            pass

    now = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
    for pid in datamap:
        pj = Project(lims, id=pid)
        if len(datamap[pid]) > 1:
            rnt = "{0} samples planned for {1}".format(len(datamap[pid]),
                                                       wsname)
        else:
            rnt = "{0} sample planned for {1}".format(len(datamap[pid]),
                                                      wsname)

        running_note = {
            "note": rnt,
            "user": username,
            "email": user_email,
            "category": "Workset"
        }
        write_note_to_couch(pid, now, running_note, lims.get_uri())
        log.append(
            "Updated project {0} : {1}, {2} samples in this workset".format(
                pid, pj.name, len(datamap[pid])))

    with open("EPP_Notes.log", "w") as flog:
        flog.write("\n".join(log))
    for out in p.all_outputs():
        #attach the log file
        if out.name == "RNotes Log":
            attach_file(os.path.join(os.getcwd(), "EPP_Notes.log"), out)

    sys.stderr.write("Updated {0} projects successfully".format(
        len(list(datamap.keys()))))
예제 #11
0
 def post(self, project):
     note = self.get_argument('note', '')
     category = self.get_argument('category', '')
     user = self.get_secure_cookie('user')
     email = self.get_secure_cookie('email')
     timestamp = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
     if not note:
         self.set_status(400)
         self.finish('<html><body>No project id or note parameters found</body></html>')
     else:
         newNote = {'user': user, 'email': email, 'note': note, 'category' : category, 'timestamp': timestamp}
         p = Project(lims, id=project)
         p.get(force=True)
         running_notes = json.loads(p.udf['Running Notes']) if 'Running Notes' in p.udf else {}
         running_notes[timestamp] = newNote
         p.udf['Running Notes'] = json.dumps(running_notes)
         p.put()
         #saving running notes directly in genstat, because reasons.
         v=self.application.projects_db.view("project/project_id")
         for row in v[project]:
             doc_id=row.value
         doc=self.application.projects_db.get(doc_id)
         doc['details']['running_notes']=json.dumps(running_notes)
         self.application.projects_db.save(doc)
         self.set_status(201)
         self.write(json.dumps(newNote))
예제 #12
0
파일: projects.py 프로젝트: hoomag/NGI
    def post(self, project):
        user = self.get_current_user()
        a_type = self.get_argument('type', '')
        title = self.get_argument('title', '')
        url = self.get_argument('url', '')
        desc = self.get_argument('desc', '')

        if not a_type or not title:
            self.set_status(400)
            self.finish(
                '<html><body>Link title and type is required</body></html>')
        else:
            p = Project(lims, id=project)
            p.get(force=True)
            links = json.loads(p.udf['Links']) if 'Links' in p.udf else {}
            links[str(datetime.datetime.now())] = {
                'user': user.name,
                'email': user.email,
                'type': a_type,
                'title': title,
                'url': url,
                'desc': desc
            }
            p.udf['Links'] = json.dumps(links)
            p.put()
            self.set_status(200)
            #ajax cries if it does not get anything back
            self.set_header("Content-type", "application/json")
            self.finish(json.dumps(links))
예제 #13
0
    def post(self, project_id):
        try:
            data = json.loads(self.request.body)
            text = data.get('text', '')
            p = Project(lims, id=project_id)
            p.udf['Internal Costs'] = text
            p.put()
            view = self.application.projects_db.view("project/project_id")
            for row in view[project_id]:
                doc=self.application.projects_db.get(row.id)
                doc['details']['internal_costs']=text
                self.application.projects_db.save(doc)


        except Exception as e:
            self.set_status(400)
            self.finish('<html><body><p>could not update Entity {} :</p><pre>{}</pre></body></html>'.format( project_id, e))
        else:
            self.set_status(200)
            self.set_header("Content-type", "application/json")
            self.write(self.request.body)
예제 #14
0
    def post(self, project_id):
        try:
            data = json.loads(self.request.body)
            text=data.get('text', '')
            p=Project(lims, id=project_id)
            p.udf['Lab Status']=text
            p.put()
            view=self.application.projects_db.view("project/project_id")
            for row in view[project_id]:
                doc=self.application.projects_db.get(row.id)
                doc['details']['lab_status']=text
                self.application.projects_db.save(doc)


        except Exception as e:
            self.set_status(400)
            self.finish('<html><body><p>could not update Entity {} :</p><pre>{}</pre></body></html>'.format( cl_id, e))
        else:
            self.set_status(200)
            self.set_header("Content-type", "application/json")
            self.write(self.request.body)
예제 #15
0
 def post(self, project):
     note = self.get_argument('note', '')
     category = self.get_argument('category', '')
     user = self.get_secure_cookie('user')
     email = self.get_secure_cookie('email')
     timestamp = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
     if not note:
         self.set_status(400)
         self.finish('<html><body>No project id or note parameters found</body></html>')
     else:
         newNote = {'user': user, 'email': email, 'note': note, 'category' : category, 'timestamp': timestamp}
         p = Project(lims, id=project)
         p.get(force=True)
         running_notes = json.loads(p.udf['Running Notes']) if 'Running Notes' in p.udf else {}
         running_notes[timestamp] = newNote
         p.udf['Running Notes'] = json.dumps(running_notes)
         p.put()
         #saving running notes directly in genstat, because reasons.
         v=self.application.projects_db.view("project/project_id")
         for row in v[project]:
             doc_id=row.value
         doc=self.application.projects_db.get(doc_id)
         doc['details']['running_notes']=json.dumps(running_notes)
         self.application.projects_db.save(doc)
         self.set_status(201)
         self.write(json.dumps(newNote))
예제 #16
0
    def post(self, project):
        user = self.get_secure_cookie('user')
        email = self.get_secure_cookie('email')
        a_type = self.get_argument('type', '')
        title = self.get_argument('title', '')
        url = self.get_argument('url', '')
        desc = self.get_argument('desc','')

        if not a_type or not title:
            self.set_status(400)
            self.finish('<html><body>Link title and type is required</body></html>')
        else:
            p = Project(lims, id=project)
            p.get(force=True)
            links = json.loads(p.udf['Links']) if 'Links' in p.udf else {}
            links[str(datetime.datetime.now())] = {'user': user,
                                                   'email': email,
                                                   'type': a_type,
                                                   'title': title,
                                                   'url': url,
                                                   'desc': desc}
            p.udf['Links'] = json.dumps(links)
            p.put()
            self.set_status(200)
            #ajax cries if it does not get anything back
            self.set_header("Content-type", "application/json")
            self.finish(json.dumps(links))
예제 #17
0
    def get(self, project_id):

        project=Project(lims, id=project_id)
        processes=lims.get_processes(projectname=project.name, type='Project Summary 1.3')
        samples=lims.get_samples(projectname=project.name)
        self.getProjectSummaryFields(lims)


        t = self.application.loader.load("project_summary.html")
        self.write(t.generate(gs_globals=self.application.gs_globals,
                              project_id=project_id,
                              processes=processes,
                              samples=samples,
                              step_fields=self.step_fields,
                              sample_fields=self.sample_fields))
예제 #18
0
def submit_dx_samples():
    form = SubmitDXSampleForm()

    if form.validate_on_submit():
        container_type = Containertype(lims, id='2')  # Tube
        workflow = Workflow(lims, id=app.config['LIMS_DX_SAMPLE_SUBMIT_WORKFLOW'])

        for sample_name in form.parsed_samples:
            # Get or create project
            lims_projects = lims.get_projects(name=form.parsed_samples[sample_name]['project'])
            if not lims_projects:
                lims_project = Project.create(lims, name=form.parsed_samples[sample_name]['project'], researcher=form.researcher, udf={'Application': 'DX'})
            else:
                lims_project = lims_projects[0]

            # Set sample udf data
            udf_data = form.parsed_worklist[sample_name]
            udf_data['Sample Type'] = form.parsed_samples[sample_name]['type']
            udf_data['Dx Fragmentlengte (bp) Externe meting'] = form.pool_fragment_length.data
            udf_data['Dx Conc. (ng/ul) Externe meting'] = form.pool_concentration.data
            udf_data['Dx Exoomequivalent'] = form.parsed_samples[sample_name]['exome_count']

            # Create sample
            container = Container.create(lims, type=container_type, name=udf_data['Dx Fractienummer'])
            sample = Sample.create(lims, container=container, position='1:1', project=lims_project, name=sample_name, udf=udf_data)
            print sample.name, sample.artifact.name

            # Add reagent label (barcode)
            artifact = sample.artifact
            artifact_xml_dom = minidom.parseString(artifact.xml())

            for artifact_name_node in artifact_xml_dom.getElementsByTagName('name'):
                parent = artifact_name_node.parentNode
                reagent_label = artifact_xml_dom.createElement('reagent-label')
                reagent_label.setAttribute('name', form.parsed_samples[sample_name]['barcode'])
                parent.appendChild(reagent_label)
                lims.put(artifact.uri, artifact_xml_dom.toxml(encoding='utf-8'))

            lims.route_artifacts([sample.artifact], workflow_uri=workflow.uri)

        return render_template('submit_dx_samples_done.html', title='Submit DX samples', project_name=lims_project.name, form=form)
    return render_template('submit_dx_samples.html', title='Submit DX samples', form=form)
예제 #19
0
    def get(self):
        limsg = lims.Lims(BASEURI, USERNAME, PASSWORD)
        queues = {}
        queues['TruSeqRNAprep'] = Queue(limsg, id='311')
        queues['TruSeqSmallRNA'] = Queue(limsg, id='410')
        queues['TruSeqDNAPCR_free'] = Queue(limsg, id='407')
        queues['ThruPlex'] = Queue(limsg, id='451')
        queues['Genotyping'] = Queue(limsg, id='901')
        queues['RadSeq'] = Queue(limsg, id='1201')
        queues['SMARTerPicoRNA'] = Queue(limsg, id='1551')
        queues['ChromiumGenomev2'] = Queue(limsg, id='1801')

        methods = queues.keys()
        pools = {}

        for method in methods:
            pools[method] = {}
            for artifact in queues[method].artifacts:
                name = artifact.name
                project = artifact.name.split('_')[0]
                if project in pools[method]:
                    pools[method][project]['samples'].append(name)
                else:
                    total_num_samples = limsg.get_sample_number(
                        projectlimsid=project)
                    proj = Project(limsg, id=project)
                    try:
                        date_queued = proj.udf['Queued'].strftime("%Y-%m-%d")
                    except KeyError:
                        # Queued should really be on a project at this point, but mistakes happen
                        date_queued = None
                    projName = proj.name
                    pools[method][project] = {
                        'total_num_samples': total_num_samples,
                        'queued_date': date_queued,
                        'pname': projName,
                        'samples': [name]
                    }

        self.set_header("Content-type", "application/json")
        self.write(json.dumps(pools))
예제 #20
0
 def test_create_entity(self):
     with patch('genologics.lims.requests.post',
                return_value=Mock(content=self.sample_creation, status_code=201)) as patch_post:
         l = Sample.create(
             self.lims,
             project=Project(self.lims, uri='project'),
             container=Container(self.lims, uri='container'),
             position='1:1',
             name='s1',
         )
         data = '''<?xml version=\'1.0\' encoding=\'utf-8\'?>
         <smp:samplecreation xmlns:smp="http://genologics.com/ri/sample">
         <name>s1</name>
         <project uri="project" limsid="project" />
         <location>
           <container uri="container" />
           <value>1:1</value>
         </location>
         </smp:samplecreation>'''
         assert elements_equal(ElementTree.fromstring(patch_post.call_args_list[0][1]['data']),
                               ElementTree.fromstring(data))
예제 #21
0
 def post(self, project):
     note = self.get_argument('note', '')
     user = self.get_secure_cookie('user')
     email = self.get_secure_cookie('email')
     if not note:
         self.set_status(400)
         self.finish('<html><body>No project id or note parameters found</body></html>')
     else:
         newNote = {'user': user, 'email': email, 'note': note}
         p = Project(lims, id=project)
         p.get(force=True)
         running_notes = json.loads(p.udf['Running Notes']) if 'Running Notes' in p.udf else {}
         running_notes[str(datetime.datetime.now())] = newNote
         p.udf['Running Notes'] = json.dumps(running_notes)
         p.put()
         self.set_status(201)
         self.write(json.dumps(newNote))
예제 #22
0
    def make_project_running_note(application, project, note, category, user, email):
        timestamp = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
        newNote = {'user': user, 'email': email, 'note': note, 'category' : category, 'timestamp': timestamp}
        p = Project(lims, id=project)
        p.get(force=True)
        running_notes = json.loads(p.udf['Running Notes']) if 'Running Notes' in p.udf else {}
        running_notes.update({timestamp: newNote})
        # Saving running note in LIMS
        p.udf['Running Notes'] = json.dumps(running_notes)
        p.put()

        #saving running notes directly in genstat, because reasons.
        v=application.projects_db.view("project/project_id")
        for row in v[project]:
            doc_id=row.value
        doc=application.projects_db.get(doc_id)
        doc['details']['running_notes']=json.dumps(running_notes)
        application.projects_db.save(doc)
        return newNote
예제 #23
0
 def update_project(self, lims_id: str, name: str = None) -> None:
     """Update information about a project."""
     lims_project = Project(self, id=lims_id)
     if name:
         lims_project.name = name
         lims_project.put()
예제 #24
0
 def test_project_example(self):
     with patch("genologics.lims.Lims.get",
                side_effect=test_utils.patched_get):
         pj = Project(self.lims, id='BLA1')
         self.assertEquals(pj.name, 'Test')
예제 #25
0
파일: projects.py 프로젝트: hoomag/NGI
    def make_project_running_note(application, project, note, category, user,
                                  email):
        timestamp = datetime.datetime.strftime(datetime.datetime.now(),
                                               '%Y-%m-%d %H:%M:%S')
        newNote = {
            'user': user,
            'email': email,
            'note': note,
            'category': category,
            'timestamp': timestamp
        }
        p = Project(lims, id=project)
        p.get(force=True)
        running_notes = json.loads(
            p.udf['Running Notes']) if 'Running Notes' in p.udf else {}
        running_notes.update({timestamp: newNote})
        # Saving running note in LIMS
        p.udf['Running Notes'] = json.dumps(running_notes)
        p.put()
        p.get(force=True)
        #Retry once more
        if p.udf['Running Notes'] != json.dumps(running_notes):
            p.udf['Running Notes'] = json.dumps(running_notes)
            p.put()
        p.get(force=True)
        #In the rare case saving to LIMS does not work
        assert (p.udf['Running Notes'] == json.dumps(running_notes)
                ), "The Running note wasn't saved in LIMS!"

        #saving running notes directly in genstat, because reasons.
        v = application.projects_db.view("project/project_id")
        for row in v[project]:
            doc_id = row.value
        doc = application.projects_db.get(doc_id)
        doc['details']['running_notes'] = json.dumps(running_notes)
        application.projects_db.save(doc)
        #### Check and send mail to tagged users
        pattern = re.compile("(@)([a-zA-Z0-9.-]+)")
        userTags = pattern.findall(note)
        if userTags:
            RunningNotesDataHandler.notify_tagged_user(application, userTags,
                                                       project, note, category,
                                                       user, timestamp)
        ####
        return newNote
예제 #26
0
def submit_samples():
    form = SubmitSampleForm()

    if form.validate_on_submit():
        # Create lims project
        lims_project = Project.create(
            lims,
            name=app.config['LIMS_INDICATIONS'][form.indicationcode.data]['project_name_prefix'],
            researcher=form.researcher,
            udf={'Application': form.indicationcode.data}
        )
        lims_project.name = '{0}_{1}'.format(lims_project.name, lims_project.id)
        lims_project.put()

        # Save attachment
        attachment = form.attachment.data
        if attachment:
            temp_dir = mkdtemp()
            attachment_path = path.join(temp_dir, secure_filename(attachment.filename))
            attachment.save(attachment_path)
            print attachment_path
            lims.upload_new_file(lims_project, attachment_path)
            rmtree(temp_dir)

        # Create Samples
        lims_container_type = Containertype(lims, id='2')  # Tube
        sample_artifacts = []
        for sample in form.parsed_samples:
            lims_container = Container.create(lims, type=lims_container_type, name=sample['name'])
            sample_udf_data = {
                'Sample Type': 'DNA library',
                'Dx Fragmentlengte (bp) Externe meting': form.pool_fragment_length.data,
                'Dx Conc. (ng/ul) Externe meting': form.pool_concentration.data,
                'Dx Exoomequivalent': sample['exome_count'],
            }
            lims_sample = Sample.create(lims, container=lims_container, position='1:1', project=lims_project, name=sample['name'], udf=sample_udf_data)
            print lims_sample.name, lims_sample.artifact.name
            artifact = lims_sample.artifact
            sample_artifacts.append(artifact)

            # Add reagent label (barcode)
            artifact_xml_dom = minidom.parseString(artifact.xml())
            for artifact_name_node in artifact_xml_dom.getElementsByTagName('name'):
                parent = artifact_name_node.parentNode
                reagent_label = artifact_xml_dom.createElement('reagent-label')
                reagent_label.setAttribute('name', sample['barcode'])
                parent.appendChild(reagent_label)
                lims.put(artifact.uri, artifact_xml_dom.toxml(encoding='utf-8'))

        # Route artifacts to workflow
        workflow = Workflow(lims, id=app.config['LIMS_INDICATIONS'][form.indicationcode.data]['workflow_id'])
        lims.route_artifacts(sample_artifacts, workflow_uri=workflow.uri)

        # Send email
        subject = "Clarity Portal Sample Upload - {0}".format(lims_project.name)
        message = "Gebruikersnaam\t{0}\n".format(form.username.data)
        message += "Indicatie code\t{0}\n".format(form.indicationcode.data)
        message += "Lims Project naam\t{0}\n".format(lims_project.name)
        message += "Pool - Fragment lengte\t{0}\n".format(form.pool_fragment_length.data)
        message += "Pool - Concentratie\t{0}\n".format(form.pool_concentration.data)
        message += "Pool - Exoom equivalenten\t{0}\n\n".format(form.sum_exome_count)
        message += "Sample naam\tBarcode\tExome equivalenten\tSample type\n"

        for sample in form.parsed_samples:
            message += "{0}\t{1}\t{2}\t{3}\n".format(sample['name'], sample['barcode'], sample['exome_count'], sample['type'])
        send_email(app.config['EMAIL_FROM'], app.config['LIMS_INDICATIONS'][form.indicationcode.data]['email_to'], subject, message)

        return render_template('submit_samples_done.html', title='Submit samples', project_name=lims_project.name, form=form)
    return render_template('submit_samples.html', title='Submit samples', form=form)
예제 #27
0
def from_helix(lims, email_settings, input_file):
    """Upload samples from helix export file."""
    project_name = 'Dx {filename}'.format(filename=input_file.name.rstrip('.csv').split('/')[-1])
    helix_initials = project_name.split('_')[-1]

    # Try lims connection
    try:
        lims.check_version()
    except ConnectionError:
        subject = "ERROR Lims Helix Upload: {0}".format(project_name)
        message = "Can't connect to lims server, please contact a lims administrator."
        send_email(email_settings['server'], email_settings['from'], email_settings['to_import_helix'], subject, message)
        sys.exit(message)

    # Get researcher using helix initials
    for researcher in lims.get_researchers():
        if researcher.fax == helix_initials:  # Use FAX as intials field as the lims initials field can't be edited via the 5.0 web interface.
            email_settings['to_import_helix'].append(researcher.email)
            break
    else:   # No researcher found
        subject = "ERROR Lims Helix Upload: {0}".format(project_name)
        message = "Can't find researcher with initials: {0}.".format(helix_initials)
        send_email(email_settings['server'], email_settings['from'], email_settings['to_import_helix'], subject, message)
        sys.exit(message)

    # Create project
    if not lims.get_projects(name=project_name):
        project = Project.create(lims, name=project_name, researcher=researcher, udf={'Application': 'DX'})
    else:
        subject = "ERROR Lims Helix Upload: {0}".format(project_name)
        message = "Duplicate project / werklijst. Samples not loaded."
        send_email(email_settings['server'], email_settings['from'], email_settings['to_import_helix'], subject, message)
        sys.exit(message)

    container_type = Containertype(lims, id='2')  # Tube

    # match header and udf fields
    udf_column = {
        'Dx Onderzoeknummer': {'column': 'Onderzoeknummer'},
        'Dx Fractienummer': {'column': 'Fractienummer'},
        'Dx Monsternummer': {'column': 'Monsternummer'},
        'Dx Concentratie (ng/ul)': {'column': 'Concentratie (ng/ul)'},
        'Dx Materiaal type': {'column': 'Materiaal'},
        'Dx Foetus': {'column': 'Foetus'},
        'Dx Foetus ID': {'column': 'Foet_id'},
        'Dx Foetus geslacht': {'column': 'Foetus_geslacht'},
        'Dx Overleden': {'column': 'Overleden'},
        'Dx Opslaglocatie': {'column': 'Opslagpositie'},
        'Dx Spoed': {'column': 'Spoed'},
        'Dx NICU Spoed': {'column': 'NICU Spoed'},
        'Dx Persoons ID': {'column': 'Persoons_id'},
        'Dx Werklijstnummer': {'column': 'Werklijstnummer'},
        'Dx Familienummer': {'column': 'Familienummer'},
        'Dx Geslacht': {'column': 'Geslacht'},
        'Dx Geboortejaar': {'column': 'Geboortejaar'},
        'Dx Meet ID': {'column': 'Stof_meet_id'},
        'Dx Stoftest code': {'column': 'Stoftestcode'},
        'Dx Stoftest omschrijving': {'column': 'Stoftestomschrijving'},
        'Dx Onderzoeksindicatie': {'column': 'Onderzoeksindicatie'},
        'Dx Onderzoeksreden': {'column': 'Onderzoeksreden'},
        'Dx Protocolcode': {'column': 'Protocolcode'},
        'Dx Protocolomschrijving': {'column': 'Protocolomschrijving'},
        'Dx Einddatum': {'column': 'Einddatum'},
        'Dx Gerelateerde onderzoeken': {'column': 'Gerelateerde onderzoeken'},
    }
    header = input_file.readline().rstrip().split(',')  # expect header on first line
    for udf in udf_column:
        udf_column[udf]['index'] = header.index(udf_column[udf]['column'])

    # Setup email
    subject = "Lims Helix Upload: {0}".format(project_name)
    message = "Project: {0}\n\nSamples:\n".format(project_name)

    # Parse samples
    for line in input_file:
        data = line.rstrip().strip('"').split('","')

        udf_data = {'Sample Type': 'DNA isolated', 'Dx Import warning': ''}  # required lims input
        for udf in udf_column:
            # Transform specific udf
            if udf in ['Dx Overleden', 'Dx Spoed', 'Dx NICU Spoed']:
                udf_data[udf] = clarity_epp.upload.utils.char_to_bool(data[udf_column[udf]['index']])
            elif udf in ['Dx Geslacht', 'Dx Foetus geslacht']:
                udf_data[udf] = clarity_epp.upload.utils.transform_sex(data[udf_column[udf]['index']])
            elif udf == 'Dx Foetus':
                udf_data[udf] = bool(data[udf_column[udf]['index']].strip())
            elif udf == 'Dx Concentratie (ng/ul)':
                udf_data[udf] = data[udf_column[udf]['index']].replace(',', '.')
            elif udf in ['Dx Monsternummer', 'Dx Fractienummer']:
                udf_data[udf] = clarity_epp.upload.utils.transform_sample_name(data[udf_column[udf]['index']])
            elif udf == 'Dx Gerelateerde onderzoeken':
                udf_data[udf] = data[udf_column[udf]['index']].replace(',', ';')
            elif udf == 'Dx Einddatum':
                date = datetime.strptime(data[udf_column[udf]['index']], '%d-%m-%Y')  # Helix format (14-01-2021)
                udf_data[udf] = date.strftime('%Y-%m-%d')  # LIMS format (2021-01-14)
            else:
                udf_data[udf] = data[udf_column[udf]['index']]

        sample_name = udf_data['Dx Monsternummer']

        # Set 'Dx Handmatig' udf
        if udf_data['Dx Foetus'] or udf_data['Dx Overleden'] or udf_data['Dx Materiaal type'] not in ['BL', 'BLHEP', 'BM', 'BMEDTA']:
            udf_data['Dx Handmatig'] = True
        else:
            udf_data['Dx Handmatig'] = False

        # Set 'Dx Familie status' udf
        if udf_data['Dx Onderzoeksreden'] == 'Bevestiging diagnose':
            udf_data['Dx Familie status'] = 'Kind'
        elif udf_data['Dx Onderzoeksreden'] == 'Prenataal onderzoek':
            udf_data['Dx Familie status'] = 'Kind'
        elif udf_data['Dx Onderzoeksreden'] == 'Eerstegraads-verwantenond':
            udf_data['Dx Familie status'] = 'Kind'
        elif udf_data['Dx Onderzoeksreden'] == 'Partneronderzoek':
            udf_data['Dx Familie status'] = 'Kind'
        elif udf_data['Dx Onderzoeksreden'] == 'Informativiteitstest':
            udf_data['Dx Familie status'] = 'Ouder'
        else:
            udf_data['Dx Import warning'] = ';'.join(['Onbekende onderzoeksreden, familie status niet ingevuld.', udf_data['Dx Import warning']])

        # Set 'Dx Geslacht' and 'Dx Geboortejaar' with 'Foetus' information if 'Dx Foetus == True'
        if udf_data['Dx Foetus']:
            udf_data['Dx Geslacht'] = udf_data['Dx Foetus geslacht']
            udf_data['Dx Geboortejaar'] = ''

        # Set 'Dx Geslacht = Onbekend' if 'Dx Onderzoeksindicatie == DSD00'
        if udf_data['Dx Onderzoeksindicatie'] == 'DSD00' and udf_data['Dx Familie status'] == 'Kind':
            udf_data['Dx Geslacht'] = 'Onbekend'

        # Check 'Dx Familienummer' and correct
        if '/' in udf_data['Dx Familienummer']:
            udf_data['Dx Import warning'] = ';'.join([
                'Meerdere familienummers, laatste wordt gebruikt. ({0})'.format(udf_data['Dx Familienummer']),
                udf_data['Dx Import warning']
            ])
            udf_data['Dx Familienummer'] = udf_data['Dx Familienummer'].split('/')[-1].strip(' ')

        sample_list = lims.get_samples(name=sample_name)

        if sample_list:
            sample = sample_list[0]
            if udf_data['Dx Protocolomschrijving'] in sample.udf['Dx Protocolomschrijving']:
                message += "{0}\tERROR: Duplicate sample and Protocolomschrijving code: {1}.\n".format(sample_name, udf_data['Dx Protocolomschrijving'])
            else:
                # Update existing sample if new Protocolomschrijving and thus workflow.

                # Append udf fields
                append_udf = [
                    'Dx Onderzoeknummer', 'Dx Onderzoeksindicatie', 'Dx Onderzoeksreden', 'Dx Werklijstnummer', 'Dx Protocolcode', 'Dx Protocolomschrijving',
                    'Dx Meet ID', 'Dx Stoftest code', 'Dx Stoftest omschrijving'
                ]
                for udf in append_udf:
                    sample.udf[udf] = ';'.join([udf_data[udf], str(sample.udf[udf])])

                # Update udf fields
                update_udf = ['Dx Overleden', 'Dx Spoed', 'Dx NICU Spoed', 'Dx Handmatig', 'Dx Opslaglocatie', 'Dx Import warning']
                for udf in update_udf:
                    sample.udf[udf] = udf_data[udf]

                # Add to new workflow
                workflow = clarity_epp.upload.utils.stoftestcode_to_workflow(lims, udf_data['Dx Stoftest code'])
                if workflow:
                    sample.put()
                    lims.route_artifacts([sample.artifact], workflow_uri=workflow.uri)
                    message += "{0}\tUpdated and added to workflow: {1}.\n".format(sample.name, workflow.name)
                else:
                    message += "{0}\tERROR: Stoftest code {1} is not linked to a workflow.\n".format(sample.name, udf_data['Dx Stoftest code'])

        else:
            # Check other samples from patient
            sample_list = lims.get_samples(udf={'Dx Persoons ID': udf_data['Dx Persoons ID']})
            for sample in sample_list:
                if sample.udf['Dx Protocolomschrijving'] == udf_data['Dx Protocolomschrijving'] and sample.udf['Dx Foetus'] == udf_data['Dx Foetus']:
                    udf_data['Dx Import warning'] = ';'.join(['Onderzoek reeds uitgevoerd.', udf_data['Dx Import warning']])

            # Add sample to workflow
            workflow = clarity_epp.upload.utils.stoftestcode_to_workflow(lims, udf_data['Dx Stoftest code'])
            if workflow:
                container = Container.create(lims, type=container_type, name=udf_data['Dx Fractienummer'])
                sample = Sample.create(lims, container=container, position='1:1', project=project, name=sample_name, udf=udf_data)
                lims.route_artifacts([sample.artifact], workflow_uri=workflow.uri)
                message += "{0}\tCreated and added to workflow: {1}.\n".format(sample.name, workflow.name)
            else:
                message += "{0}\tERROR: Stoftest code {1} is not linked to a workflow.\n".format(sample_name, udf_data['Dx Stoftest code'])

    # Send final email
    send_email(email_settings['server'], email_settings['from'], email_settings['to_import_helix'], subject, message)