Exemplo n.º 1
0
 def test_route_artifact(self, mocked_post):
     lims = Lims(self.url, username=self.username, password=self.password)
     artifact = Mock(uri=self.url + "/artifact/2")
     lims.route_artifacts(artifact_list=[artifact],
                          workflow_uri=self.url +
                          '/api/v2/configuration/workflows/1')
     assert mocked_post.call_count == 1
Exemplo n.º 2
0
    def test_upload_new_file(self, mocked_open, mocked_isfile):
        lims = Lims(self.url, username=self.username, password=self.password)
        xml_intro = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>"""
        file_start = """<file:file xmlns:file="http://genologics.com/ri/file">"""
        file_start2 = """<file:file xmlns:file="http://genologics.com/ri/file" uri="{url}/api/v2/files/40-3501" limsid="40-3501">"""
        attached = """    <attached-to>{url}/api/v2/samples/test_sample</attached-to>"""
        upload = """    <original-location>filename_to_upload</original-location>"""
        content_loc = """    <content-location>sftp://{url}/opt/gls/clarity/users/glsftp/clarity/samples/test_sample/test</content-location>"""
        file_end = """</file:file>"""
        glsstorage_xml = '\n'.join([xml_intro,file_start, attached, upload, content_loc, file_end]).format(url=self.url)
        file_post_xml = '\n'.join([xml_intro, file_start2, attached, upload, content_loc, file_end]).format(url=self.url)
        with patch('requests.post', side_effect=[Mock(content=glsstorage_xml, status_code=200),
                                                 Mock(content=file_post_xml, status_code=200),
                                                 Mock(content="", status_code=200)]):

            file = lims.upload_new_file(Mock(uri=self.url+"/api/v2/samples/test_sample"),
                                        'filename_to_upload')
            assert file.id == "40-3501"

        with patch('requests.post', side_effect=[Mock(content=self.error_xml, status_code=400)]):

          self.assertRaises(HTTPError,
                            lims.upload_new_file,
                            Mock(uri=self.url+"/api/v2/samples/test_sample"),
                            'filename_to_upload')
Exemplo n.º 3
0
def main(args):
    lims_db = get_session()
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    with open(args.conf) as cf:
        db_conf = yaml.load(cf)
        couch = setupServer(db_conf)
    db = couch["expected_yields"]
    postgres_string = "{} hours".format(args.hours)
    project_ids = get_last_modified_projectids(lims_db, postgres_string)

    for project in [Project(lims, id=x) for x in project_ids]:
        samples_count = 0
        samples = lims.get_samples(projectname=project.name)
        for sample in samples:
            if not ("Status (manual)" in sample.udf
                    and sample.udf["Status (manual)"] == "Aborted"):
                samples_count += 1
        try:
            lanes_ordered = project.udf['Sequence units ordered (lanes)']
            key = parse_sequencing_platform(project.udf['Sequencing platform'])
        except:
            continue
        for row in db.view("yields/min_yield"):
            db_key = [x.lower() if x else None for x in row.key]
            if db_key == key:
                try:
                    project.udf['Reads Min'] = float(
                        row.value) * lanes_ordered / samples_count
                    project.put()
                except ZeroDivisionError:
                    pass
Exemplo n.º 4
0
def main(args):
    log = []
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    process = Process(lims, id=args.pid)
    for swp_iomap in process.input_output_maps:
        if swp_iomap[1]['output-generation-type'] != 'PerInput':
            continue
        inp_artifact = swp_iomap[0]['uri']
        amount_check_pros = lims.get_processes(
            type='Amount confirmation QC', inputartifactlimsid=inp_artifact.id)
        amount_check_pros.sort(reverse=True, key=lambda x: x.date_run)
        try:
            correct_amount_check_pro = amount_check_pros[0]
        except KeyError:
            sys.exit(
                "Cannot find an Amount Confirmation QC step for artifact {}".
                format(inp_artifact.id))
        else:
            for iomap in correct_amount_check_pro.input_output_maps:
                if iomap[1]['output-generation-type'] != 'PerInput':
                    continue
                if iomap[0]['limsid'] == inp_artifact.id:
                    for udf_name in [
                            'Concentration', 'Conc. Units', 'Total Volume (uL)'
                    ]:
                        try:
                            swp_iomap[1]['uri'].udf[udf_name] = iomap[1][
                                'uri'].udf[udf_name]
                        except:
                            import pdb
                            pdb.set_trace()
                    swp_iomap[1]['uri'].udf['Amount taken (ng)'] = iomap[1][
                        'uri'].udf['Amount to take (ng)']
                    swp_iomap[1]['uri'].put()
def main(args):
    log = []
    lims = Lims(BASEURI,USERNAME,PASSWORD)
    process = Process(lims, id=args.pid)
    for swp_iomap in process.input_output_maps:
        if swp_iomap[1]['output-generation-type'] !=  'PerInput':
            continue
        inp_artifact = swp_iomap[0]['uri']
        amount_check_pros = lims.get_processes(type='Amount confirmation QC', inputartifactlimsid=inp_artifact.id)
        amount_check_pros.sort(reverse=True, key=lambda x:x.date_run)
        try:
            correct_amount_check_pro = amount_check_pros[0]
        except KeyError:
            sys.exit("Cannot find an Amount Confirmation QC step for artifact {}".format(inp_artifact.id))
        else:
            for iomap in correct_amount_check_pro.input_output_maps:
                if iomap[1]['output-generation-type'] !=  'PerInput':
                    continue
                if iomap[0]['limsid'] == inp_artifact.id:
                    for udf_name in ['Concentration', 'Conc. Units', 'Total Volume (uL)']:
                        try:
                            swp_iomap[1]['uri'].udf[udf_name] = iomap[1]['uri'].udf[udf_name]
                        except:
                            import pdb;pdb.set_trace()
                    swp_iomap[1]['uri'].udf['Amount taken (ng)'] = iomap[1]['uri'].udf['Amount to take (ng)']
                    swp_iomap[1]['uri'].put()
Exemplo n.º 6
0
    def test_upload_new_file(self, mocked_open, mocked_isfile):
        lims = Lims(self.url, username=self.username, password=self.password)
        xml_intro = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>"""
        file_start = """<file:file xmlns:file="http://genologics.com/ri/file">"""
        file_start2 = """<file:file xmlns:file="http://genologics.com/ri/file" uri="{url}/api/v2/files/40-3501" limsid="40-3501">"""
        attached = """    <attached-to>{url}/api/v2/samples/test_sample</attached-to>"""
        upload = """    <original-location>filename_to_upload</original-location>"""
        content_loc = """    <content-location>sftp://{url}/opt/gls/clarity/users/glsftp/clarity/samples/test_sample/test</content-location>"""
        file_end = """</file:file>"""
        glsstorage_xml = '\n'.join(
            [xml_intro, file_start, attached, upload, content_loc,
             file_end]).format(url=self.url)
        file_post_xml = '\n'.join(
            [xml_intro, file_start2, attached, upload, content_loc,
             file_end]).format(url=self.url)
        with patch('requests.post',
                   side_effect=[
                       Mock(content=glsstorage_xml, status_code=200),
                       Mock(content=file_post_xml, status_code=200),
                       Mock(content="", status_code=200)
                   ]):

            file = lims.upload_new_file(
                Mock(uri=self.url + "/api/v2/samples/test_sample"),
                'filename_to_upload')
            assert file.id == "40-3501"

        with patch('requests.post',
                   side_effect=[Mock(content=self.error_xml,
                                     status_code=400)]):

            self.assertRaises(
                HTTPError, lims.upload_new_file,
                Mock(uri=self.url + "/api/v2/samples/test_sample"),
                'filename_to_upload')
Exemplo n.º 7
0
 def test_get(self, mocked_instance):
     lims = Lims(self.url, username=self.username, password=self.password)
     r = lims.get('{url}/api/v2/artifacts?sample_name=test_sample'.format(url=self.url))
     assert isinstance(r, xml.etree.ElementTree.Element)
     assert mocked_instance.call_count == 1
     mocked_instance.assert_called_with('http://testgenologics.com:4040/api/v2/artifacts?sample_name=test_sample', timeout=16,
                               headers={'accept': 'application/xml'}, params={}, auth=('test', 'password'))
Exemplo n.º 8
0
def generate_output(project_id, dest_plate_list, best_sample_struct,total_lanes, req_lanes, lane_maps, rounded_ratios, 
                    target_clusters, clusters_per_lane, extra_lanes, lane_volume, pool_excess, final_pool_sizes, volume_ratios, desired_ratios):
    """"Gathers the container id and well name for all samples in project"""
    timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H:%M')
    
    #Cred to Denis for providing a base epp
    location = dict()
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    allProjects = lims.get_projects()
    for proj in allProjects:
        if proj.id == project_id:
            projName = proj.name 
            break
    
    #Sets up source id    
    #All normalization processes for project
    norms=['Library Normalization (MiSeq) 4.0', 'Library Normalization (Illumina SBS) 4.0','Library Normalization (HiSeq X) 1.0']
    pros=lims.get_processes(type=norms, projectname=projName)
    #For all processes
    for p in pros:
        #For all artifacts in process
        for o in p.all_outputs():
            #If artifact is analyte type and has project name in sample
            if o.type=="Analyte" and project_id in o.name:
                location[o.name.split()[0]] = list()
                location[o.name.split()[0]].append(o.location[0].id)
                location[o.name.split()[0]].append(o.location[1])
    
    #Continue coding from here
    generate_summary(projName, best_sample_struct, timestamp, project_id, dest_plate_list, total_lanes, req_lanes, 
                     lane_maps, rounded_ratios, target_clusters, clusters_per_lane, extra_lanes, volume_ratios, desired_ratios, lane_volume, pool_excess)
    generate_csv(projName, timestamp, location, dest_plate_list, total_lanes, best_sample_struct, rounded_ratios, lane_volume, pool_excess, final_pool_sizes)
    generate_dumpfile(projName, timestamp, location, dest_plate_list, total_lanes, best_sample_struct, rounded_ratios, lane_volume, pool_excess, final_pool_sizes)
def main(args):
    lims_db = get_session()
    lims = Lims(BASEURI,USERNAME,PASSWORD)
    with open(args.conf) as cf:
        db_conf = yaml.load(cf)
        couch = setupServer(db_conf)
    db = couch["expected_yields"]
    postgres_string="{} hours".format(args.hours)
    project_ids=get_last_modified_projectids(lims_db, postgres_string)

    for project in [Project(lims, id=x) for x in project_ids]:
        samples_count = 0
        samples = lims.get_samples(projectname=project.name)
        for sample in samples:
            if not("Status (manual)" in sample.udf and sample.udf["Status (manual)"] == "Aborted"):
                samples_count +=1
        try:
            lanes_ordered = project.udf['Sequence units ordered (lanes)']
            key = parse_sequencing_platform(project.udf['Sequencing platform'])
        except:
            continue
        for row in db.view("yields/min_yield"):
            db_key = [x.lower() if x else None for x in row.key]
            if db_key==key:
                try:
                    project.udf['Reads Min'] = float(row.value) * lanes_ordered / samples_count
                    project.put()
                except ZeroDivisionError:
                    pass
Exemplo n.º 10
0
class LimsService(object):
    name = "lims_service"

    def __init__(self):
        super(LimsService, self).__init__()
        self.lims = Lims(BASEURI, USERNAME, PASSWORD)
        self.lims.check_version()

    @rpc
    def sample(self, lims_id):
        """Get a sample from LIMS."""
        sample_obj = LimsSample(self.lims, id=lims_id)
        sample_dict = transform_entry(sample_obj)
        return sample_dict

    @rpc
    def validate_sample(self, lims_id):
        """Validate information in the LIMS on sample level."""
        logger.debug("fetch sample from LIMS")
        sample_obj = LimsSample(self.lims, id=lims_id)

        try:
            case_id = sample_obj.udf["familyID"]
            cust_id = sample_obj.udf["customer"]
        except KeyError as error:
            raise MissingLimsDataException(error.message)
        except HTTPError as error:
            raise LimsSampleNotFoundError(error.message)

    @rpc
    def ls_case(self, cust_id, case_id):
        """Fetch all samples for a case from the LIMS."""
        sample_objs = self.lims.get_samples(udf={"customer": cust_id, "familyID": case_id})
        sample_dicts = [transform_entry(sample) for sample in sample_objs]
        analysis_types = set(sample["analysis_type"] for sample in sample_dicts)
        case_data = {"analysis_types": list(analysis_types), "samples": sample_dicts}
        return case_data

    @rpc
    def ls_project(self, project_id):
        """List all samples in a project."""
        samples = self.lims.get_samples(projectname=project_id)
        lims_ids = [sample.id for sample in samples]
        return lims_ids

    @rpc
    def pedigree(self, cust_id, case_id):
        """Generate pedigree content for a case."""
        ped_content = serialize_pedigree(self.lims, cust_id, case_id)
        return ped_content

    @rpc
    def target_reads(self, lims_id):
        """Determine the amount of reads to be sequenced."""
        sample_obj = LimsSample(self.lims, id=lims_id)
        app_tag = analysis_info(sample_obj)
        # millions of reads
        target_reads = app_tag["reads"] * 1000000
        return target_reads
Exemplo n.º 11
0
    def test_parse_response(self):
        lims = Lims(self.url, username=self.username, password=self.password)
        r = Mock(content = self.sample_xml, status_code=200)
        pr = lims.parse_response(r)
        assert isinstance(pr, xml.etree.ElementTree.Element)

        r = Mock(content = self.error_xml, status_code=400)
        self.assertRaises(HTTPError, lims.parse_response, r)
Exemplo n.º 12
0
 def test_get(self, mocked_instance):
     lims = Lims(self.url, username=self.username, password=self.password)
     r = lims.get('{url}/api/v2/artifacts?sample_name=test_sample'.format(url=self.url))
     assert r is not None
     assert callable(r.find)
     assert hasattr(r.attrib, '__getitem__')
     assert mocked_instance.call_count == 1
     mocked_instance.assert_called_with('http://testgenologics.com:4040/api/v2/artifacts?sample_name=test_sample', timeout=16,
                               headers={'accept': 'application/xml'}, params={}, auth=('test', 'password'))
Exemplo n.º 13
0
 def test_post(self):
     lims = Lims(self.url, username=self.username, password=self.password)
     uri = '{url}/api/v2/samples'.format(url=self.url)
     with patch('requests.post', return_value=Mock(content = self.sample_xml, status_code=200)) as mocked_put:
         response = lims.post(uri=uri, data=self.sample_xml)
         assert mocked_put.call_count == 1
     with patch('requests.post', return_value=Mock(content = self.error_xml, status_code=400)) as mocked_put:
         self.assertRaises(HTTPError, lims.post, uri=uri, data=self.sample_xml)
         assert mocked_put.call_count == 1
Exemplo n.º 14
0
def main(args):
    log = []
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    process = Process(lims, id=args.pid)
    for io in process.input_output_maps:
        if io[1]['output-generation-type'] != 'PerInput':
            continue
        try:
            starting_amount = obtain_amount(io[0]['uri'])
        except Exception as e:
            log.append(str(e))
            starting_amount = 0

        log.append("Starting amount of {} : {} ng".format(
            io[0]['uri'].samples[0].name, starting_amount))
        current_amount = starting_amount
        #preps
        preps = lims.get_processes(
            inputartifactlimsid=io[0]['uri'].id,
            type=["Setup Workset/Plate", "Amount confirmation QC"])
        for pro in preps:
            if pro.id == args.pid:
                continue  # skip the current step
            for prepio in pro.input_output_maps:
                if prepio[1]['output-generation-type'] == 'PerInput' and prepio[
                        0]['uri'].id == io[0]['uri'].id:
                    if "Amount taken (ng)" in prepio[1][
                            'uri'].udf:  #should always be true
                        prep_amount = prepio[1]['uri'].udf["Amount taken (ng)"]
                        log.append(
                            "Removing {} ng for prep {} for sample {}".format(
                                prep_amount, pro.id,
                                io[0]['uri'].samples[0].name))
                        current_amount = current_amount - prep_amount
                    else:
                        log.append(
                            "No Amount Taken found for prep {} of sample {}".
                            format(pro.id, io[0]['uri'].samples[0].name))

        if current_amount < 0:
            log.append(
                "Estimated amount for sample {} is {}, correcting to zero".
                format(io[0]['uri'].samples[0].name, current_amount))
            current_amount = 0

        update_output_values(io[0]['uri'], io[1]['uri'], current_amount)

        with open("amount_check_log.txt", "w") as f:
            f.write("\n".join(log))

        for out in process.all_outputs():
            if out.name == "QC Assignment Log File":
                for f in out.files:
                    lims.request_session.delete(f.uri)
                lims.upload_new_file(out, "amount_check_log.txt")
Exemplo n.º 15
0
def test_D(server_test1):
    # GIVEN: A lims with a sample with:
    #   name: 'maya'
    #   Udf "Source": "blood", "Reads missing (M)": 0

    # WHEN creating a genologics Lims object and filtering on the fields.
    lims = Lims("http://127.0.0.1:8000", 'dummy', 'dummy')
    samples = lims.get_samples(udf={"Source": "blood", "Reads missing (M)": 0}, name='maya')

    # Then the sample should be found
    assert samples == [Sample(lims, id='ACC2351A2')]
def main(args):
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    conts = lims.get_containers(name=args.flowcell)
    print "found {} containers with name {}".format(len(conts), args.flowcell)
    for cont in conts:
        if cont.type.name not in args.types:
            print "Container {} is a {} and will be renamed".format(cont.id, cont.type.name)
            cont.name = args.name
            cont.put()
        else:
            print "Container {} is a {} and will NOT be renamed".format(cont.id, cont.type.name)
Exemplo n.º 17
0
def test_C(server_test1):
    # GIVEN: A lims with a process with:
    #   process type: 'CG002 - qPCR QC (Library Validation) (Dev)'
    #   input artifact: '2-1155237'
    #   Udf 'Instrument Used': 'Cava'

    # WHEN creating a genologics Lims object and filtering on the fields.
    lims = Lims("http://127.0.0.1:8000", 'dummy', 'dummy')
    processes = lims.get_processes(type=['CG002 - qPCR QC (Library Validation) (Dev)'], inputartifactlimsid=['2-1155237'], udf={'Instrument Used': 'Cava'})

    # Then the process should be found
    assert processes == [Process(lims, id='24-168017')]
Exemplo n.º 18
0
    def test_tostring(self):
        lims = Lims(self.url, username=self.username, password=self.password)
        from xml.etree import ElementTree as ET
        a = ET.Element('a')
        b = ET.SubElement(a, 'b')
        c = ET.SubElement(a, 'c')
        d = ET.SubElement(c, 'd')
        etree = ET.ElementTree(a)
        expected_string = b"""<?xml version='1.0' encoding='utf-8'?>
<a><b /><c><d /></c></a>"""
        string = lims.tostring(etree)
        assert string == expected_string
Exemplo n.º 19
0
    def test_tostring(self):
        lims = Lims(self.url, username=self.username, password=self.password)
        from xml.etree import ElementTree as ET
        a = ET.Element('a')
        b = ET.SubElement(a, 'b')
        c = ET.SubElement(a, 'c')
        d = ET.SubElement(c, 'd')
        etree = ET.ElementTree(a)
        expected_string=b"""<?xml version='1.0' encoding='utf-8'?>
<a><b /><c><d /></c></a>"""
        string = lims.tostring(etree)
        assert string == expected_string
Exemplo n.º 20
0
def main(args):
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    conts = lims.get_containers(name=args.flowcell)
    print "found {} containers with name {}".format(len(conts), args.flowcell)
    for cont in conts:
        if cont.type.name not in args.types:
            print "Container {} is a {} and will be renamed".format(
                cont.id, cont.type.name)
            cont.name = args.name
            cont.put()
        else:
            print "Container {} is a {} and will NOT be renamed".format(
                cont.id, cont.type.name)
Exemplo n.º 21
0
    def test_parse_response(self):
        lims = Lims(self.url, username=self.username, password=self.password)
        r = Mock(content = self.sample_xml, status_code=200)
        pr = lims.parse_response(r)
        assert pr is not None
        assert callable(pr.find)
        assert hasattr(pr.attrib, '__getitem__')

        r = Mock(content = self.error_xml, status_code=400)
        self.assertRaises(HTTPError, lims.parse_response, r)

        r = Mock(content = self.error_no_msg_xml, status_code=400)
        self.assertRaises(HTTPError, lims.parse_response, r)
Exemplo n.º 22
0
    def test_parse_response(self):
        lims = Lims(self.url, username=self.username, password=self.password)
        r = Mock(content=self.sample_xml, status_code=200)
        pr = lims.parse_response(r)
        assert pr is not None
        assert callable(pr.find)
        assert hasattr(pr.attrib, '__getitem__')

        r = Mock(content=self.error_xml, status_code=400)
        self.assertRaises(HTTPError, lims.parse_response, r)

        r = Mock(content=self.error_no_msg_xml, status_code=400)
        self.assertRaises(HTTPError, lims.parse_response, r)
Exemplo n.º 23
0
 def test_get(self, mocked_instance):
     lims = Lims(self.url, username=self.username, password=self.password)
     r = lims.get('{url}/api/v2/artifacts?sample_name=test_sample'.format(
         url=self.url))
     assert r is not None
     assert callable(r.find)
     assert hasattr(r.attrib, '__getitem__')
     assert mocked_instance.call_count == 1
     mocked_instance.assert_called_with(
         'http://testgenologics.com:4040/api/v2/artifacts?sample_name=test_sample',
         timeout=16,
         headers={'accept': 'application/xml'},
         params={},
         auth=('test', 'password'))
Exemplo n.º 24
0
    def __init__(self, current_step, logger=None, cache=False):
        """
        Initializes the context.

        :param current_step: The step from which the extension is running
        :param logger: A logger instance
        :param cache: Set to True to use the cache folder (.cache) for downloaded files
        """
        lims = Lims(BASEURI, USERNAME, PASSWORD)
        lims.check_version()

        self.advanced = Advanced(lims)
        self.current_step = Process(lims, id=current_step)
        self.logger = logger or logging.getLogger(__name__)
        self._local_shared_files = []
        self.cache = cache
Exemplo n.º 25
0
def get_project(api: lims.Lims,
				project_name: str):
	project = api.get_projects(name=project_name)
	assert isinstance(project, list)
	print(len(project))
	assert len(project) == 2
	return project[0]
Exemplo n.º 26
0
 def test_post(self):
     lims = Lims(self.url, username=self.username, password=self.password)
     uri = '{url}/api/v2/samples'.format(url=self.url)
     with patch('requests.post',
                return_value=Mock(content=self.sample_xml,
                                  status_code=200)) as mocked_put:
         response = lims.post(uri=uri, data=self.sample_xml)
         assert mocked_put.call_count == 1
     with patch('requests.post',
                return_value=Mock(content=self.error_xml,
                                  status_code=400)) as mocked_put:
         self.assertRaises(HTTPError,
                           lims.post,
                           uri=uri,
                           data=self.sample_xml)
         assert mocked_put.call_count == 1
Exemplo n.º 27
0
    def __init__(self, current_step, logger=None, cache=False):
        """
        Initializes the context.

        :param current_step: The step from which the extension is running
        :param logger: A logger instance
        :param cache: Set to True to use the cache folder (.cache) for downloaded files
        """
        lims = Lims(BASEURI, USERNAME, PASSWORD)
        lims.check_version()

        self.advanced = Advanced(lims)
        self.current_step = Process(lims, id=current_step)
        self.logger = logger or logging.getLogger(__name__)
        self._local_shared_files = []
        self.cache = cache
Exemplo n.º 28
0
def main(args):
    log = lutils.setupLog('bioinfologger', args.logfile)
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    with open(args.conf) as conf_file:
        conf = yaml.safe_load(conf_file)
    bioinfodb = lutils.setupServer(conf)['bioinfo_analysis']
    open_projects = bioinfodb.view('latest_data/sample_id_open')

    for row in open_projects.rows:
        project_id = row.key[0]
        sample_id = row.key[3]
        close_date = None
        try:
            close_date = Project(lims=lims, id=project_id).close_date
        except HTTPError as e:
            if '404: Project not found' in e.message:
                log.error('Project '+project_id+' not found in LIMS')
                continue
        if close_date is not None:
            try:
                doc = bioinfodb.get(row.id)
            except Exception as e:
                log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while accessing doc from statusdb')
            doc['project_closed'] = True
            try:
                bioinfodb.save(doc)
                log.info('Updated Project '+project_id+ ' Sample '+sample_id)
            except Exception as e:
                log.error(e + 'in Project '+project_id+ ' Sample '+sample_id+ ' while saving to statusdb')
Exemplo n.º 29
0
def flowcell_samples(
    lims: Lims, flowcell_id: str, bcl_converter: str
) -> Iterable[Union[LimsFlowcellSampleBcl2Fastq, LimsFlowcellSampleDragen]]:
    lims_flowcell_sample = {
        "bcl2fastq": LimsFlowcellSampleBcl2Fastq,
        "dragen": LimsFlowcellSampleDragen,
    }
    LOG.info("Fetching samples from lims for flowcell %s", flowcell_id)
    containers: List[Container] = lims.get_containers(name=flowcell_id)
    if not containers:
        return []
    container: Container = containers[-1]  # only take the last one. See ÖA#217.
    raw_lanes: List[str] = sorted(container.placements.keys())
    for raw_lane in raw_lanes:
        lane: int = get_placement_lane(raw_lane)
        placement_artifact: Artifact = container.placements[raw_lane]
        non_pooled_artifacts: List[Artifact] = get_non_pooled_artifacts(placement_artifact)
        for artifact in non_pooled_artifacts:
            sample: Sample = artifact.samples[0]  # we are assured it only has one sample
            label: Optional[str] = get_reagent_label(artifact)
            index = get_index(lims=lims, label=label)
            yield lims_flowcell_sample[bcl_converter](
                flowcell_id=flowcell_id,
                lane=lane,
                sample_id=sample.id,
                index=index,
                sample_name=sample.name,
                project=sample.project.name,
            )
Exemplo n.º 30
0
class TestEntities(TestCase):
    dummy_xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
    <dummy></dummy>"""

    def setUp(self):
        self.lims = Lims(url, username='******', password='******')

    def _tostring(self, entity):
        return self.lims.tostring(ElementTree.ElementTree(entity.root)).decode("utf-8")
Exemplo n.º 31
0
class TestEntities(TestCase):
    dummy_xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
    <dummy></dummy>"""

    def setUp(self):
        self.lims = Lims(url, username='******', password='******')

    def _tostring(self, entity):
        return self.lims.tostring(ElementTree.ElementTree(entity.root)).decode("utf-8")
Exemplo n.º 32
0
def test_B(server_test1):
    # GIVEN: A lims with a process with the udf 'Instrument Used': 'Cava'

    # WHEN creating a genologics Process entity of that process
    lims = Lims("http://127.0.0.1:8000", 'dummy', 'dummy')
    p = Process(lims, id='24-168017')

    # THEN the process instance should have the udf
    assert p.udf['Instrument Used'] == 'Cava'
Exemplo n.º 33
0
def test_A(server_test1):
    # GIVEN: A lims with a sample with the udf 'customer': 'cust002'

    # WHEN creating a genologics Sample entity of that sample
    lims = Lims("http://127.0.0.1:8000", 'dummy', 'dummy')
    s = Sample(lims, id='ACC2351A1')

    # THEN the sample instance should have the udf
    assert s.udf['customer'] == 'cust002'
Exemplo n.º 34
0
def main(args):
    log = []
    lims = Lims(BASEURI,USERNAME,PASSWORD)
    process = Process(lims, id=args.pid)
    for io in process.input_output_maps:
        if io[1]['output-generation-type'] != 'PerInput':
            continue
        try:
            starting_amount = obtain_amount(io[0]['uri'])
        except Exception as e:
            log.append(str(e))
            starting_amount = 0

        log.append("Starting amount of {} : {} ng".format(io[0]['uri'].samples[0].name, starting_amount))
        current_amount = starting_amount
        #preps
        preps = lims.get_processes(inputartifactlimsid=io[0]['uri'].id, type=["Setup Workset/Plate", "Amount confirmation QC"])
        for pro in preps:
            if pro.id == args.pid:
                continue # skip the current step
            for prepio in pro.input_output_maps:
                if prepio[1]['output-generation-type'] == 'PerInput' and prepio[0]['uri'].id == io[0]['uri'].id:
                    if "Amount taken (ng)" in prepio[1]['uri'].udf: #should always be true
                        prep_amount = prepio[1]['uri'].udf["Amount taken (ng)"]
                        log.append("Removing {} ng for prep {} for sample {}".format(prep_amount, pro.id, io[0]['uri'].samples[0].name))
                        current_amount = current_amount - prep_amount
                    else:
                        log.append("No Amount Taken found for prep {} of sample {}".format(pro.id, io[0]['uri'].samples[0].name))

        if current_amount < 0:
            log.append("Estimated amount for sample {} is {}, correcting to zero".format(io[0]['uri'].samples[0].name, current_amount))
            current_amount = 0

        update_output_values(io[0]['uri'], io[1]['uri'], current_amount)

        with open("amount_check_log.txt", "w") as f:
            f.write("\n".join(log))

        for out in process.all_outputs():
            if out.name == "QC Assignment Log File" :
                for f in out.files:
                    lims.request_session.delete(f.uri)
                lims.upload_new_file(out, "amount_check_log.txt") 
Exemplo n.º 35
0
    def setUp(self):
        self.et = ElementTree.fromstring("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<test-entry>
<artifact uri="http://testgenologics.com:4040/api/v2/artifacts/a1"></artifact>
</test-entry>
""")
        self.lims = Lims('http://testgenologics.com:4040', username='******', password='******')
        self.a1 = Artifact(self.lims, id='a1')
        self.a2 = Artifact(self.lims, id='a2')
        self.instance = Mock(root=self.et, lims=self.lims)
Exemplo n.º 36
0
def create_containers(api: lims.Lims, size: int, prefix: str):
    type_element = api.get_container_types(name="Tube")
    assert isinstance(type_element, list)
    assert len(type_element) == 1
    container_list = []
    for i in range(size):
        container = entities.Container.create(lims=api,
                                              name="{}_{}".format(prefix, i),
                                              type=type_element[0])
        container_list.append(container)
 def __init__(self, project_name):
     self.project_name = project_name
     self.project_source = os.path.join(cfg.query('sample','delivery_source'), project_name)
     self.project_delivery = os.path.join(cfg.query('sample','delivery_dest'), project_name)
     self.lims=Lims(**cfg.get('clarity'))
     self.params = {'project_name':project_name}
     self.results = {}
     self.fill_sample_names_from_lims()
     self.samples_delivered = self.read_metrics_csv(os.path.join(self.project_delivery, 'summary_metrics.csv'))
     self.get_sample_param()
     self.fill_project_information_from_lims()
def main(args):
    lims = Lims(BASEURI, USERNAME, PASSWORD)

    CC = CheckConfigurations(lims)
    if args.udf_preset:
        udf_results = CC.search_steps_for_udf(search_by=args.udf_preset)
    elif args.automation_string:
        search_results = CC.search_automations(search_by=args.automation_string)
    elif args.print_all_bash:
        CC.print_all_bash()
    elif args.all_active:
        CC.serarch_all_active()
Exemplo n.º 39
0
    def __init__(self):

        self.log = logging.getLogger('multiqc')

        # Check that this plugin hasn't been disabled
        if config.kwargs.get('disable_clarity', False) is True:
            self.log.info(
                "Skipping MultiQC_Clarity as disabled on command line")
            return None
        if getattr(config, 'disable_clarity', False) is True:
            self.log.debug(
                "Skipping MultiQC_Clarity as specified in config file")
            return None

        super(MultiQC_clarity_metadata, self).__init__(
            name='Clarity',
            anchor='clarity',
            href='https://github.com/Galithil/MultiQC_Clarity',
            info="fetches data from your Basespace Clarity LIMS instance.")

        self.lims = Lims(BASEURI, USERNAME, PASSWORD)
        self.metadata = {}
        self.header_metadata = {}
        self.general_metadata = {}
        self.tab_metadata = {}
        self.samples = []
        self.sections = []

        self.schema = getattr(config, 'clarity', None)
        if self.schema is None:
            self.log.warn("No config found for MultiQC_Clarity")
            return None

        self.get_samples()
        self.get_metadata('Header')
        self.get_metadata('General Statistics')
        self.get_metadata('Clarity Tab')
        self.update_multiqc_report()
        self.make_sections()
        report.modules_output.append(self)
    def __init__(self):

        self.log = logging.getLogger('multiqc')

        # Check that this plugin hasn't been disabled
        if config.kwargs.get('disable_clarity', False) is True:
            self.log.info(
                "Skipping MultiQC_Clarity as disabled on command line")
            return None
        if getattr(config, 'disable_clarity', False) is True:
            self.log.debug(
                "Skipping MultiQC_Clarity as specified in config file")
            return None

        super(MultiQC_clarity_metadata, self).__init__(name='Clarity LIMS',
                                                       anchor='clarity')

        self.intro = '''<p>The <a href="https://github.com/MultiQC/MultiQC_Clarity" target="_blank">MultiQC_Clarity</a>
            plugin fetches data from a specified
            <a href="https://www.genologics.com/clarity-lims/" target="_blank">Basespace Clarity LIMS</a> instance.</p>'''

        self.lims = Lims(BASEURI, USERNAME, PASSWORD)
        self.metadata = {}
        self.header_metadata = {}
        self.general_metadata = {}
        self.tab_metadata = {}
        self.samples = []

        self.schema = getattr(config, 'clarity', None)
        if self.schema is None:
            self.log.debug("No config found for MultiQC_Clarity")
            return None

        self.get_samples()
        self.get_metadata('report_header_info')
        self.get_metadata('general_stats')
        self.get_metadata('clarity_module')
        self.update_multiqc_report()
        self.make_sections()
        report.modules_output.append(self)
Exemplo n.º 41
0
def main(args):
    lims = Lims(BASEURI,USERNAME,PASSWORD)
    process = Process(lims, id=args.pid)
    for io in process.input_output_maps:
        if io[1]['output-generation-type'] != 'PerInput':
            continue
        for idx, field in enumerate(args.fields):
            if field in io[0]['uri'].udf:
                if args.destfields:
                    io[1]['uri'].udf[args.destfields[idx]] = io[0]['uri'].udf[field]
                else:
                    io[1]['uri'].udf[field] = io[0]['uri'].udf[field]
        io[1]['uri'].put()
Exemplo n.º 42
0
def diff_project_objects(pj_id, couch, logfile, new=True):

    proj_db = couch['projects']
    samp_db = couch['samples']
    log = setupLog('diff - {}'.format(pj_id), logfile)
    lims = Lims(BASEURI, USERNAME, PASSWORD)

    view = proj_db.view('projects/lims_followed')

    try:
        old_project_couchid = view[pj_id].rows[0].value
    except KeyError, IndexError:
        log.error("No such project {}".format(pj_id))
Exemplo n.º 43
0
def main(args):
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    process = Process(lims, id=args.pid)
    for io in process.input_output_maps:
        if io[1]['output-generation-type'] != 'PerInput':
            continue
        if "Amount taken (ng)" in io[1]['uri'].udf:
            if "Amount left (ng)" in io[0]['uri'].udf:
                io[0]['uri'].udf["Amount left (ng)"] = io[0]['uri'].udf[
                    "Amount left (ng)"] - io[1]['uri'].udf["Amount taken (ng)"]
            else:
                io[0]['uri'].udf["Amount left (ng)"] = io[0]['uri'].udf[
                    "Amount(ng)"] - io[1]['uri'].udf["Amount taken (ng)"]
            io[0]['uri'].put()
def main(options):
    conf = options.conf
    output_f = options.output_f
    couch = load_couch_server(conf)
    mainlims = Lims(BASEURI, USERNAME, PASSWORD)
    lims_db = get_session()

    mainlog = logging.getLogger('psullogger')
    mainlog.setLevel(level=logging.INFO)
    mfh = logging.handlers.RotatingFileHandler(options.logfile,
                                               maxBytes=209715200,
                                               backupCount=5)
    mft = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    mfh.setFormatter(mft)
    mainlog.addHandler(mfh)

    # try getting orderportal config
    oconf = None
    try:
        with open(options.oconf, 'r') as ocf:
            oconf = yaml.load(ocf, Loader=yaml.SafeLoader)['order_portal']
    except Exception as e:
        mainlog.warn(
            "Loading orderportal config {} failed due to {}, so order information "
            "for project will not be updated".format(options.oconf, e))

    if options.project_name:
        host = get_configuration()['url']
        pj_id = lims_db.query(DBProject.luid).filter(
            DBProject.name == options.project_name).scalar()
        if not pj_id:
            pj_id = options.project_name
        P = ProjectSQL(lims_db, mainlog, pj_id, host, couch, oconf)
        if options.upload:
            P.save(
                update_modification_time=not options.no_new_modification_time)
        else:
            if output_f is not None:
                with open(output_f, 'w') as f:
                    f.write(json.dumps(P.obj))
            else:
                print(json.dumps(P.obj))

    else:
        projects = create_projects_list(options, lims_db, mainlims, mainlog)
        masterProcess(options, projects, mainlims, mainlog, oconf)
        lims_db.commit()
        lims_db.close()
Exemplo n.º 45
0
def get_index(lims: Lims, label: str) -> str:
    """Parse out the sequence from a reagent label"""

    reagent_types = lims.get_reagent_types(name=label)

    if len(reagent_types) > 1:
        raise ValueError("Expecting at most one reagent type. Got ({}).".format(len(reagent_types)))

    try:
        reagent_type = reagent_types.pop()
    except IndexError:
        return ""
    sequence = reagent_type.sequence

    match = re.match(r"^.+ \((.+)\)$", label)
    if match:
        assert match.group(1) == sequence

    return sequence
Exemplo n.º 46
0
    elif lines: # Avoid printing empty files
        lp_args = ["lp"]
        if args.hostname:
            #remove that when all the calls to this script have been updated
            if args.hostname == 'homer.scilifelab.se:631':
                args.hostname='homer2.scilifelab.se:631'
            lp_args += ["-h",args.hostname]
        if args.destination:
            lp_args += ["-d",args.destination]
        lp_args.append("-") # lp accepts stdin if '-' is given as filename
        logging.info('Ready to call lp for printing.')
        sp = subprocess.Popen(lp_args, 
                              stdin=subprocess.PIPE, 
                              stdout=subprocess.PIPE, 
                              stderr=subprocess.PIPE)
        sp.stdin.write(str('\n'.join(lines)))
        logging.info('lp command is called for printing.')
        stdout,stderr = sp.communicate() # Will wait for sp to finish
        logging.info('lp stdout: {0}'.format(stdout))
        logging.info('lp stderr: {0}'.format(stderr))
        logging.info('lp command finished')
        sp.stdin.close()

if __name__ == '__main__':
    arguments = getArgs()
    lims = Lims(BASEURI,USERNAME,PASSWORD)
    lims.check_version()
    prepend = not arguments.no_prepend
    with EppLogger(arguments.log,lims=lims,prepend=prepend) as epp_logger:
        main(arguments,lims,epp_logger)
Exemplo n.º 47
0
NOTE: You need to set the BASEURI, USERNAME AND PASSWORD.

Per Kraulis, Science for Life Laboratory, Stockholm, Sweden.
"""

import codecs

from genologics.lims import Lims

# Login parameters for connecting to a LIMS instance.
# NOTE: Modify according to your setup.
from genologics.site_cloud import BASEURI, USERNAME, PASSWORD

# Create the LIMS interface instance, and check the connection and version.
lims = Lims(BASEURI, USERNAME, PASSWORD)
lims.check_version()

# Get the list of all containers.
containers = lims.get_containers()
print len(containers), 'containers in total'

for state in ['Empty', 'Reagent-Only', 'Discarded', 'Populated']:
    containers = lims.get_containers(state=state)
    print len(containers), state, 'containers'

containers = lims.get_containers(type='96 well plate')
print len(containers)

container = containers[2]
print container, container.occupied_wells
Exemplo n.º 48
0
NOTE: You need to set the BASEURI, USERNAME AND PASSWORD.

Per Kraulis, Science for Life Laboratory, Stockholm, Sweden.
"""

import codecs

from genologics.lims import Lims

# Login parameters for connecting to a LIMS instance.
# NOTE: Modify according to your setup.
from genologics.site_cloud import BASEURI, USERNAME, PASSWORD

# Create the LIMS interface instance, and check the connection and version.
lims = Lims(BASEURI, USERNAME, PASSWORD)
lims.check_version()

# Get the list of all artifacts.
## artifacts = lims.get_artifacts()
## print len(artifacts), 'total artifacts'

## artifacts = lims.get_artifacts(qc_flag='UNKNOWN')
## print len(artifacts), 'QC UNKNOWN artifacts'
## artifacts = lims.get_artifacts(qc_flag='PASSED')
## print len(artifacts), 'QC PASSED artifacts'
## artifacts = lims.get_artifacts(qc_flag='FAILED')
## print len(artifacts), 'QC FAILED artifacts'

## artifacts = lims.get_artifacts(working_flag=True)
## print len(artifacts), 'Working-flag True artifacts'
Exemplo n.º 49
0
 def test_get_uri(self):
     lims = Lims(self.url, username=self.username, password=self.password)
     assert lims.get_uri('artifacts',sample_name='test_sample') == '{url}/api/v2/artifacts?sample_name=test_sample'.format(url=self.url)
Exemplo n.º 50
0
Arquivo: api.py Projeto: espre05/lims
 def connect(self, baseuri, username, password):
     """Connect to the LIMS instance."""
     self.lims = Lims(baseuri, username, password)
Exemplo n.º 51
0
NOTE: You need to set the BASEURI, USERNAME AND PASSWORD.

Per Kraulis, Science for Life Laboratory, Stockholm, Sweden.
"""

import codecs

from genologics.lims import Lims

# Login parameters for connecting to a LIMS instance.
# NOTE: Modify according to your setup.
from genologics.site_cloud import BASEURI, USERNAME, PASSWORD

# Create the LIMS interface instance, and check the connection and version.
lims = Lims(BASEURI, USERNAME, PASSWORD)
lims.check_version()

# Get the list of all projects.
projects = lims.get_projects()
print len(projects), 'projects in total'

# Get the list of all projects opened since May 30th 2012.
day = '2012-05-30'
projects = lims.get_projects(open_date=day)
print len(projects), 'projects opened since', day

# Get the project with the specified LIMS id, and print some info.
project = lims.get_project('KRA61')
print project, project.name, project.open_date
class ProjectReport:

    def __init__(self, project_name):
        self.project_name = project_name
        self.project_source = os.path.join(cfg.query('sample','delivery_source'), project_name)
        self.project_delivery = os.path.join(cfg.query('sample','delivery_dest'), project_name)
        self.lims=Lims(**cfg.get('clarity'))
        self.params = {'project_name':project_name}
        self.results = {}
        self.fill_sample_names_from_lims()
        self.samples_delivered = self.read_metrics_csv(os.path.join(self.project_delivery, 'summary_metrics.csv'))
        self.get_sample_param()
        self.fill_project_information_from_lims()

    def fill_project_information_from_lims(self):
        project = self.lims.get_projects(name=self.project_name)[0]
        self.project_info = {}
        self.project_info['project_name']=['Project name:',self.project_name]
        self.project_info['project_title']=['Project title:', project.udf.get('Project Title', '')]
        self.project_info['enquiry'] = ['Enquiry no:', project.udf.get('Enquiry Number', '')]
        self.project_info['quote'] = ['Quote no:', project.udf.get('Quote No.', '')]
        self.project_info['researcher'] = ['Researcher:','%s %s (%s)'%(project.researcher.first_name,
                                                                       project.researcher.last_name,
                                                                       project.researcher.email)]
        self.project_order = ['project_name', 'project_title', 'enquiry', 'quote', 'researcher']


    def fill_sample_names_from_lims(self):
        samples = self.lims.get_samples(projectname=self.project_name)
        self.samples = [s.name for s in samples]
        self.modified_samples = [re.sub(r'[: ]','_', s.name) for s in samples]


    def get_library_workflow_from_sample(self, sample_name):
        samples = self.lims.get_samples(projectname=self.project_name, name=sample_name)
        if len(samples) == 1:
            return samples[0].udf.get('Prep Workflow')
        else:
            app_logger.error('%s samples found for sample name %s'%sample_name)

    def get_species_from_sample(self, sample_name):
        samples = self.lims.get_samples(projectname=self.project_name, name=sample_name)
        if len(samples) == 1:
            s = samples[0].udf.get('Species')
            return species_alias.get(s, s)
        else:
            app_logger.error('%s samples found for sample name %s'%sample_name)

    def parse_program_csv(self, program_csv):
        all_programs = {}
        if os.path.exists(program_csv):
            with open(program_csv) as open_prog:
                for row in csv.reader(open_prog):
                    all_programs[row[0]]=row[1]
        #TODO: change the hardcoded version of bcl2fastq
        all_programs['bcl2fastq'] = '2.17.1.14'
        for p in ['bcl2fastq','bcbio', 'bwa', 'gatk', 'samblaster']:
            if p in all_programs:
                self.params[p + '_version']=all_programs.get(p)
        

    def parse_project_summary_yaml(self, summary_yaml):
        with open(summary_yaml, 'r') as open_file:
            full_yaml = yaml.safe_load(open_file)
        sample_yaml=full_yaml['samples'][0]
        path_to_bcbio = os.path.basename(os.path.dirname(sample_yaml['dirs']['galaxy']))
        self.params['bcbio_version'] = path_to_bcbio.split('/')[-2]
        if sample_yaml['genome_build'] == 'hg38':
            self.params['genome_version'] = 'GRCh38 (with alt, decoy and HLA sequences)'

    def read_metrics_csv(self, metrics_csv):
        samples_to_info={}
        with open(metrics_csv) as open_metrics:
            reader = csv.DictReader(open_metrics, delimiter='\t', quoting=csv.QUOTE_NONE)
            for row in reader:
                samples_to_info[row['Sample Id']] = row
        return samples_to_info

    def get_sample_param(self):
        self.fill_sample_names_from_lims()
        project_size = 0
        library_workflows=set()
        species = set()
        for sample in self.samples:
            library_workflow = self.get_library_workflow_from_sample(sample)
            library_workflows.add(library_workflow)
            species.add(self.get_species_from_sample(sample))
        if len(library_workflows) == 1 :
            self.library_workflow = library_workflows.pop()
        else:
            app_logger.error('More than one workfkow used in project %s: %s'%(self.project_name, ', '.join(library_workflows)))

        if len(species) == 1 :
            self.species = species.pop()
        else:
            app_logger.error('More than one species used in project %s: %s'%(self.project_name, ', '.join(species)))


        if self.library_workflow in ['TruSeq Nano DNA Sample Prep', None] :
            self.template = 'truseq_nano_template'
        elif self.library_workflow in ['TruSeq PCR-Free DNA Sample Prep', 'TruSeq PCR-Free Sample Prep'] :
            self.template = 'truseq_pcrfree_template'
        else:
            app_logger.error('Unknown library workflow %s for project %s'%(self.library_workflow, self.project_name))
            return None

        if self.species == 'Human':
            self.template += '.html'
        else:
            self.template += '_non_human.html'

        self.params['adapter1'] = "AGATCGGAAGAGCACACGTCTGAACTCCAGTCA"
        self.params['adapter2'] = "AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT"

        project_size = getFolderSize(self.project_delivery)
        for sample in set(self.modified_samples):
            sample_source=os.path.join(self.project_source, sample)
            if os.path.exists(sample_source):
                program_csv = os.path.join(sample_source, 'programs.txt')
                if not os.path.exists(program_csv):
                    program_csv = os.path.join(sample_source, '.qc', 'programs.txt')
                self.parse_program_csv(program_csv)
                summary_yaml = os.path.join(sample_source, 'project-summary.yaml')
                if not os.path.exists(summary_yaml):
                    summary_yaml = os.path.join(sample_source, '.qc', 'project-summary.yaml')
                if os.path.exists(summary_yaml):
                    self.parse_project_summary_yaml(summary_yaml)

        self.results['project_size']=['Total folder size:','%.2fTb'%(project_size/1000000000000.0)]
        self.results['nb_sample']=['Number of sample:', len(self.samples)]
        self.results['nb_sample_delivered']=['Number of sample delivered:',len(self.samples_delivered)]
        yields = [float(self.samples_delivered[s]['Yield']) for s in self.samples_delivered]
        self.results['yield']=['Total yield Gb:','%.2f'%sum(yields)]
        self.results['mean_yield']=['Average yield Gb:','%.1f'%(sum(yields)/max(len(yields), 1))]

        try:
            coverage = [float(self.samples_delivered[s]['Mean coverage']) for s in self.samples_delivered]
            self.results['coverage']=['Average coverage per samples:','%.2f'%(sum(coverage)/max(len(coverage), 1))]
            self.results_order=['nb_sample','nb_sample_delivered', 'yield', 'mean_yield', 'coverage', 'project_size']
        except KeyError:
            self.results_order=['nb_sample','nb_sample_delivered', 'yield', 'mean_yield', 'project_size']



    def generate_report(self):
        template_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates'))
        env = Environment(loader=FileSystemLoader(template_dir))
        template = env.get_template(self.template)
        output = template.render(results_order=self.results_order, results=self.results,
                                 project_info=self.project_info, project_order=self.project_order,
                                 **self.params)
        pdf = get_pdf(output)
        project_file = os.path.join(self.project_delivery, 'project_%s_report.pdf'%self.project_name)
        with open(project_file, 'w') as open_pdf:
            open_pdf.write(pdf.getvalue())
Exemplo n.º 53
0
 def __init__(self):
     super(LimsService, self).__init__()
     self.lims = Lims(BASEURI, USERNAME, PASSWORD)
     self.lims.check_version()
def namesetter(PID):

    lims = Lims(BASEURI, USERNAME, PASSWORD)
    lims.check_version()
    #Find LIMS entry with same PID
    allProjects = lims.get_projects()
    for proj in allProjects:
        if proj.id == PID:
            limsproject = proj.name
            break
    #Error handling
    if not 'limsproject' in locals():
        print "{} not available in LIMS.".format(PID)
        return None
    
    #Enter project summary process
    stepname=['Project Summary 1.3']
    process=lims.get_processes(type=stepname, projectname=limsproject)
    #Error handling
    if process == []:
        print "{} for {} is not available in LIMS.".format(stepname, limsproject)
        return None

    loop = True
    while loop:
        if "Bioinfo responsible" in process[0].udf:
            response = process[0].udf["Bioinfo responsible"]
        else:
            response = "Unassigned"
        print "Existing Bioinfo responsible for project {} aka {} is: {}".format(limsproject, PID, response.encode('utf-8'))
        
        #Checks for valid name
        in_responsibles = False
        config_responsibles =Udfconfig(lims, id="1128")
        while not in_responsibles:
            newname = raw_input("Enter name of new Bioinfo responsible: ")
            for names in config_responsibles.presets:
                if newname in names:
                    in_responsibles = True
                    newname = names
            if not in_responsibles:
                print "Subset {} not found in accepted Bioinfo responsible list.".format(newname)
            else:
                print "Suggested name is {}".format(newname)
        
        confirmation = raw_input("Project {} aka {} will have {} as new Bioinfo responsible, is this correct (Y/N)? ".format(limsproject, PID, newname))
        if confirmation == 'Y' or confirmation == 'y':
            try:
                newname.decode('ascii')
                process[0].udf["Bioinfo responsible"] = unicode(newname)
                process[0].put()
                print "Project {} aka {} assigned to {}".format(limsproject, PID, newname)
                return None
            except UnicodeDecodeError:
                #Weird solution due to put function
                process[0].udf["Bioinfo responsible"] = response
                print "ERROR: You tried to use a special character, didn't you? Don't do that. New standards and stuff..."
        elif confirmation == 'N' or confirmation == 'n':
            loop = False
        else:
            print "Invalid answer."
Exemplo n.º 55
0
 def setUp(self):
     self.lims = Lims(url, username='******', password='******')
Exemplo n.º 56
0
def generate_output(project, destid, total_lanes, req_lanes, lane_maps, acc_ratios):
    #Gathers the container id and well name for all samples in project
    #Cred to Denis for providing a base epp
    location = dict()
    lims = Lims(BASEURI, USERNAME, PASSWORD)
    allProjects = lims.get_projects()
    for proj in allProjects:
        if proj.id == project:
            projName = proj.name 
            break

    #All normalization processes for project
    norms=['Library Normalization (MiSeq) 4.0', 'Library Normalization (Illumina SBS) 4.0','Library Normalization (HiSeq X) 1.0']
    pros=lims.get_processes(type=norms, projectname=projName)
    #For all processes
    for p in pros:
        #For all artifacts in process
        for o in p.all_outputs():
            #If artifact is analyte type and has project name in sample
            if o.type=="Analyte" and project in o.name:
                location[o.name.split()[0]] = list()
                location[o.name.split()[0]].append(o.location[0].id)
                location[o.name.split()[0]].append(o.location[1])
                
    #PRINT section
    #Print stats including duplicates
    timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d_%H:%M')
    sumName = projName,  "_summary_", timestamp,".txt"
    sumName = ''.join(sumName)
    with open(sumName, "w") as summary:
        if sum(req_lanes.values()) != 0:
            OPT = sum(total_lanes)/sum(req_lanes.values())
        else: 
            OPT = 0
        output = "Ideal lanes (same schema): ", str(sum(req_lanes.values())) , ", Total lanes: ", str(sum(total_lanes)), ", OPT: ", str(round(OPT,3)),'\n'
        output = ''.join(output)
        summary.write( output )
        output = "Unique pools: ", str(len(total_lanes)), ", Average pool duplication: ", str(sum(total_lanes)/float(len(total_lanes))) ,'\n'
        output = ''.join(output)
        summary.write( output )
        
        bin = 0
        for index in xrange(1, len(lane_maps)+1):
            bin  += 1
            summary.write('\n')
            output = "Wells ", str(bin) , '-' , str(bin+int(total_lanes[index-1])-1),':','\n'
            output = ''.join(output)
            summary.write( output )
            bin += int(total_lanes[index-1]-1)
            for counter in xrange(1, len(lane_maps[index])):
                output = str(lane_maps[index][counter]),' ', str(acc_ratios[index][counter]), "%",'\n'
                output = ''.join(output)
                summary.write( output )

    
    #Creates csv   
    name = projName,"_repool_",timestamp,".csv"
    name = ''.join(name)
    wells = ['Empty','A','B','C','D','E','F','G','H']
    #Index 0 is number, index 1 is Letter
    wellIndex = [1, 1]
    destNo = 0
    
    with open(name, 'w') as csvfile:
        writer = csv.writer(csvfile)
        for index in xrange(1, len(lane_maps)+1):
            for dupes in xrange(1, int(total_lanes[index-1])+1):
                if lane_maps[index] == 0:
                    raise Exception('Error: Project not logged in x_flowcells database!')
                
                for counter in xrange(1, len(lane_maps[index])):
                    #<source plate ID>,<source well>,<volume>,<destination plate ID>,<destination well>
                    #Destination well 200 microL, minimum pipette 2 microL; acc_ratios multiplied by 2.
                    sample = lane_maps[index][counter]
                    position = wells[wellIndex[1]],':',str(wellIndex[0])
                    position = ''.join(position)
                    try:
                        output = location[sample][0],location[sample][1],str(int(acc_ratios[index][counter]*2)),str(destid[destNo]),position
                    except KeyError:
                        print "Error: Samples incorrectly parsed into database, thus causing sample name conflicts!"
                    if not acc_ratios[index][counter] == 0:
                        writer.writerow(output)
                #Increment wellsindex
                if not acc_ratios[index][counter] == 0:
                    if not wellIndex[1] >= 8:
                        wellIndex[1] += 1
                    else:
                        wellIndex[1] = 1
                        if not wellIndex[0] >= 8:
                            wellIndex[0] += 1
                        else:
                            wellIndex[0] = 1
                            destNo += 1
                            try:
                                destid[destNo]
                            except IndexError:
                                print "Critical error; not enough destination plates provided"
Exemplo n.º 57
0
Arquivo: api.py Projeto: espre05/lims
class LimsAPI(object):

    """docstring for LimsAPI"""

    def __init__(self, lims=None):
        super(LimsAPI, self).__init__()
        self.lims = lims

    def init_app(self, app):
        """Connect with credentials from Flask app config."""
        self.connect(**app.config['LIMS_CONFIG'])

    def connect(self, baseuri, username, password):
        """Connect to the LIMS instance."""
        self.lims = Lims(baseuri, username, password)

    def sample(self, lims_id):
        """Get a sample from the LIMS."""
        logger.debug('fetch sample from LIMS')
        sample_obj = Sample(self.lims, id=lims_id)

        try:
            sample_json = transform_entry(sample_obj)
        except KeyError as error:
            message = "missing UDF: {}".format(error.message)
            logger.warn(message)
            raise MissingLimsDataException(message)
        except HTTPError as error:
            logger.warn('unknown lims id')
            raise error

        return sample_json

    def samples(self, project_id=None, case=None, sample_ids=None, limit=20,
                **kwargs):
        if project_id:
            sample_objs = self.lims.get_samples(projectname=project_id)
        elif case:
            sample_objs = self.lims.get_samples(udf={'customer': case[0],
                                                     'familyID': case[1]})
        else:
            sample_objs = self.lims.get_samples(**kwargs)

        sample_dicts = []
        for index, sample in enumerate(sample_objs):
            if index < limit:
                sample_dicts.append(transform_entry(sample))
            else:
                break

        analysis_types = set(sample['analysis_type'] for sample in
                             sample_dicts)
        case_data = {
            'analysis_types': list(analysis_types),
            'samples': sample_dicts
        }
        return case_data

    def cases(self):
        """Return a list of cases from the database."""
        samples = ((sample.udf['customer'], sample.udf['familyID'])
                   for sample in self.lims.get_samples()
                   if 'familyID' in sample.udf and 'customer' in sample.udf)
        return samples
Exemplo n.º 58
0
Usage example: Get labs and lab info.

NOTE: You need to set the BASEURI, USERNAME AND PASSWORD.

Per Kraulis, Science for Life Laboratory, Stockholm, Sweden.
"""

import codecs
from genologics.lims import Lims

# Login parameters for connecting to a LIMS instance.
# NOTE: Modify according to your setup.
from genologics.site_cloud import BASEURI, USERNAME, PASSWORD

# Create the LIMS interface instance, and check the connection and version.
lims = Lims(BASEURI, USERNAME, PASSWORD)
lims.check_version()

# Get the list of all projects.
labs = lims.get_labs(name='SciLifeLab')
print len(labs), 'labs in total'
for lab in labs:
    print lab, lab.name, lab.shipping_address.items()
    for key, value in lab.udf.items():
        if isinstance(value, unicode):
            value = codecs.encode(value, 'UTF-8')
        print ' ', key, '=', value
    udt = lab.udt
    if udt:
        print 'UDT:', udt.udt
        for key, value in udt.items():
Exemplo n.º 59
0
Usage example: Get some processes.

NOTE: You need to set the BASEURI, USERNAME AND PASSWORD.

Per Kraulis, Science for Life Laboratory, Stockholm, Sweden.
"""

from genologics.lims import Lims

# Login parameters for connecting to a LIMS instance.
# NOTE: Modify according to your setup.
from genologics.site_cloud import BASEURI, USERNAME, PASSWORD

# Create the LIMS interface instance, and check the connection and version.
lims = Lims(BASEURI, USERNAME, PASSWORD)
lims.check_version()

# Get the list of all processes.
processes = lims.get_processes()
print len(processes), 'processes in total'

process = processes[0]

print process, process.type, process.type.name
for process in lims.get_processes(type=process.type.name):
    print process

name = '1a. Fragment DNA (TruSeq DNA) 3.0'
print name
for process in lims.get_processes(type=name):