コード例 #1
0
    def test_get_validated_learner_data(self):
        view = self.classlist1.restrictedTraverse(
            '@@upload-classlist-spreadsheet')

        testpath = os.path.dirname(__file__)

        # good file
        path = os.path.join(testpath, 'test_classlist_1.xls')
        spreadsheet_file = open(path, 'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        self.request['csv_file'] = myUpload
        test_out = view.get_validated_learner_data(self.request)
        self.assertEqual(test_out[0], None)

        # no learners in file
        path = os.path.join(testpath, 'test_classlist_2.xls')
        spreadsheet_file = open(path, 'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        self.request['csv_file'] = myUpload
        test_out = view.get_validated_learner_data(self.request)
        self.assertEqual(test_out[0], 'Please supply at least one learner.')
        self.assertEqual(test_out[1], None)

        # a learner with incorrect fields
        path = os.path.join(testpath, 'test_classlist_3.xls')
        spreadsheet_file = open(path, 'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        self.request['csv_file'] = myUpload
        test_out = view.get_validated_learner_data(self.request)
        self.assertEqual(test_out[0],
                         'Please supply a number, name, gender and language.')
        self.assertEqual(test_out[1], None)
コード例 #2
0
    def parse(self):
        order = []
        ext = splitext(self.infile.filename.lower())[-1]
        if ext == '.xlsx':
            order = (xlsx_to_csv, xls_to_csv)
        elif ext == '.xls':
            order = (xls_to_csv, xlsx_to_csv)
        elif ext == '.csv':
            self.csv_data = self.infile
        if order:
            for importer in order:
                try:
                    self.csv_data = importer(infile=self.infile,
                                             worksheet=self.worksheet,
                                             delimiter=self.delimiter)
                    break
                except SheetNotFound:
                    self.err("Sheet not found in workbook: %s" %
                             self.worksheet)
                    return -1
                except Exception as e:  # noqa
                    pass
            else:
                self.warn("Can't parse input file as XLS, XLSX, or CSV.")
                return -1
        stub = FileStub(file=self.csv_data, name=str(self.infile.filename))
        self.csv_data = FileUpload(stub)

        lines = self.csv_data.readlines()
        reader = csv.DictReader(lines)
        for row in reader:
            self.parse_row(reader.line_num, row)
        return 0
コード例 #3
0
    def test_add_learners(self):
        view = self.classlist1.restrictedTraverse(
               '@@upload-classlist-spreadsheet')

        testpath = os.path.dirname(__file__)

        # learner added
        path = os.path.join(testpath,'test_classlist_4.xls')
        spreadsheet_file = open(path,'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        contents = myUpload.read()
        book = xlrd.open_workbook(file_contents=contents)
        sheet = book.sheet_by_index(0)
        self.assertEqual(len(self.classlist1.getFolderContents()),3)
        test_out = view.add_learners(self.classlist1,sheet)
        self.assertEqual(len(self.classlist1.getFolderContents()),6)
        self.assertEqual(test_out,[])

        # learner not added
        path = os.path.join(testpath,'test_classlist_5.xls')
        spreadsheet_file = open(path,'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        contents = myUpload.read()
        book = xlrd.open_workbook(file_contents=contents)
        sheet = book.sheet_by_index(0)
        self.assertEqual(len(self.classlist1.getFolderContents()),6)
        test_out = view.add_learners(self.classlist1,sheet)
        self.assertEqual(len(self.classlist1.getFolderContents()),6)
        self.assertEqual(test_out,['Skipping existing learner: John'])
コード例 #4
0
    def parse(self):
        order = []
        ext = splitext(self.infile.filename.lower())[-1]
        if ext == '.xlsx':
            order = (xlsx_to_csv, xls_to_csv)
        elif ext == '.xls':
            order = (xls_to_csv, xlsx_to_csv)
        elif ext == '.csv':
            self.csv_data = self.infile
        if order:
            for importer in order:
                try:
                    self.csv_data = importer(infile=self.infile,
                                             worksheet=self.worksheet,
                                             delimiter=self.delimiter)
                    break
                except SheetNotFound:
                    self.err("Sheet not found in workbook: %s" %
                             self.worksheet)
                    return -1
                except Exception as e:  # noqa
                    pass
            else:
                self.warn("Can't parse input file as XLS, XLSX, or CSV.")
                return -1
        stub = FileStub(file=self.csv_data, name=str(self.infile.filename))
        self.csv_data = FileUpload(stub)

        try:
            sample_id, ext = splitext(basename(self.infile.filename))
            # maybe the filename is a sample ID, just the way it is
            ar = self.get_ar(sample_id)
            if not ar:
                # maybe we need to chop of it's -9digit suffix
                sample_id = '-'.join(sample_id.split('-')[:-1])
                ar = self.get_ar(sample_id)
                if not ar:
                    # or we are out of luck
                    msg = "Can't find sample for " + self.infile.filename
                    self.warn(msg)
                    return -1
            self.ar = ar
            self.sample_id = sample_id
            self.analyses = self.get_analyses(ar)
        except Exception as e:
            self.err(repr(e))
            return False
        lines = self.csv_data.readlines()
        reader = csv.DictReader(lines)
        for row in reader:
            self.parse_row(ar, reader.line_num, row)
        return 0
コード例 #5
0
 def test_import_ppm(self):
     ar = self.add_analysisrequest(
         self.client,
         dict(Client=self.client.UID(),
              Contact=self.contact.UID(),
              DateSampled=datetime.now().date().isoformat(),
              SampleType=self.sampletype.UID()),
         [srv.UID() for srv in self.services])
     api.do_transition_for(ar, 'receive')
     data = open(fn2, 'rb').read()
     import_file = FileUpload(TestFile(cStringIO.StringIO(data), fn2))
     request = TestRequest(form=dict(
         instrument_results_file_format='xlsx',
         submitted=True,
         artoapply='received_tobeverified',
         results_override='override',
         instrument_results_file=import_file,
         default_unit='ppm',
         instrument=''))
     results = importer.Import(self.portal, request)
     ag = ar.getAnalyses(full_objects=True, getKeyword='ag107')[0]
     al = ar.getAnalyses(full_objects=True, getKeyword='al27')[0]
     test_results = eval(results)  # noqa
     self.assertEqual(ag.getResult(), '1118000.0')
     self.assertEqual(al.getResult(), '2228000.0')
コード例 #6
0
    def test_add_learners(self):
        view = self.classlist1.restrictedTraverse(
            '@@upload-classlist-spreadsheet')

        testpath = os.path.dirname(__file__)

        # learner added
        path = os.path.join(testpath, 'test_classlist_4.xls')
        spreadsheet_file = open(path, 'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        contents = myUpload.read()
        book = xlrd.open_workbook(file_contents=contents)
        sheet = book.sheet_by_index(0)
        self.assertEqual(len(self.classlist1.getFolderContents()), 3)
        test_out = view.add_learners(self.classlist1, sheet)
        self.assertEqual(len(self.classlist1.getFolderContents()), 6)
        self.assertEqual(test_out, [])

        # learner not added
        path = os.path.join(testpath, 'test_classlist_5.xls')
        spreadsheet_file = open(path, 'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        contents = myUpload.read()
        book = xlrd.open_workbook(file_contents=contents)
        sheet = book.sheet_by_index(0)
        self.assertEqual(len(self.classlist1.getFolderContents()), 6)
        test_out = view.add_learners(self.classlist1, sheet)
        self.assertEqual(len(self.classlist1.getFolderContents()), 6)
        self.assertEqual(test_out, ['Skipping existing learner: John'])
コード例 #7
0
    def __init__(self, infile, worksheet, encoding='xlsx'):
        InstrumentResultsFileParser.__init__(self, infile, encoding.upper())
        # Convert xls to csv
        self._delimiter = "|"
        if encoding == 'xlsx':
            csv_data = xlsx_to_csv(
                infile, worksheet=worksheet, delimiter=self._delimiter)
            # adapt csv_data into a FileUpload for parse method
            self._infile = infile
            stub = FileStub(file=csv_data, name=str(infile.filename))
            self._csvfile = FileUpload(stub)
        elif encoding == 'xls':
            csv_data = xls_to_csv(
                infile, worksheet=worksheet, delimiter=self._delimiter)
            # adapt csv_data into a FileUpload for parse method
            self._infile = infile
            stub = FileStub(file=csv_data, name=str(infile.filename))
            self._csvfile = FileUpload(stub)
        elif encoding == 'csv':
            self._csvfile = infile

        self._encoding = encoding
        self._end_header = False
コード例 #8
0
    def test__call__5(self):

        view = self.classlist1.restrictedTraverse(
            '@@upload-classlist-spreadsheet')
        testpath = os.path.dirname(__file__)

        #nothing went wrong
        self.request.set('classlist_uid', 'classlist1')
        # good file
        path = os.path.join(testpath, 'test_classlist_1.xls')
        spreadsheet_file = open(path, 'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        self.request['csv_file'] = myUpload
        test_out = view()
        self.assertEqual(test_out, True)
        test = IStatusMessage(self.request).show()
        self.assertEqual(test, [])
コード例 #9
0
    def test__call__4(self):

        view = self.classlist1.restrictedTraverse(
            '@@upload-classlist-spreadsheet')
        testpath = os.path.dirname(__file__)

        # some learner errors redirect to new (or existing) classlist
        self.request.set('classlist_uid', 'classlist1')
        # file that contains same learner as is already in the system
        path = os.path.join(testpath, 'test_classlist_5.xls')
        spreadsheet_file = open(path, 'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        self.request['csv_file'] = myUpload
        test_out = view()
        self.assertEqual(test_out, True)
        test = IStatusMessage(self.request).show()
        self.assertEqual(test[0].type, 'error')
        self.assertEqual(test[0].message, 'Skipping existing learner: John')
コード例 #10
0
    def test__call__(self):

        view = self.classlist1.restrictedTraverse(
            '@@upload-classlist-spreadsheet')
        testpath = os.path.dirname(__file__)

        #fail classlist
        # no classlist_uid on request
        self.request.set('classlist_uid', 'classlist1')
        path = os.path.join(testpath, 'blank_file.txt')
        spreadsheet_file = open(path, 'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        self.request['csv_file'] = myUpload
        test_out = view()
        self.assertEqual(test_out, False)
        test = IStatusMessage(self.request).show()
        self.assertEqual(test[0].type, 'error')
        self.assertEqual(test[0].message, 'Please supply a valid file.')
コード例 #11
0
    def test__call__2(self):

        view = self.classlist1.restrictedTraverse(
            '@@upload-classlist-spreadsheet')
        testpath = os.path.dirname(__file__)

        #fail learner data
        # no learners in file
        path = os.path.join(testpath, 'test_classlist_1.xls')
        spreadsheet_file = open(path, 'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        self.request['csv_file'] = myUpload
        test_out = view()
        self.assertEqual(test_out, False)
        test = IStatusMessage(self.request).show()
        self.assertEqual(test[0].type, 'error')
        self.assertEqual(test[0].message,
                         'Please indicate which class to use.')
コード例 #12
0
    def test__call__3(self):

        # use topictrees as the incorrect context
        view = self.topictrees.restrictedTraverse(
            '@@upload-classlist-spreadsheet')
        testpath = os.path.dirname(__file__)

        # get classlist failed
        self.request.set('classlist_uid', 'classlist1')
        path = os.path.join(testpath, 'test_classlist_1.xls')
        spreadsheet_file = open(path, 'rb')
        aFieldStorage = FieldStorageStub(spreadsheet_file)
        myUpload = FileUpload(aFieldStorage)
        self.request['csv_file'] = myUpload
        test_out = view()
        self.assertEqual(test_out, False)
        test = IStatusMessage(self.request).show()
        self.assertEqual(test[0].type, 'error')
        self.assertEqual(test[0].message,
                         'import-learners called from incorrect context')
コード例 #13
0
 def test_import_csv_without_filename_suffix(self):
     ar = self.add_analysisrequest(
         self.client,
         dict(Client=self.client.UID(),
              Contact=self.contact.UID(),
              DateSampled=datetime.now().date().isoformat(),
              SampleType=self.sampletype.UID()),
         [srv.UID() for srv in self.services])
     api.do_transition_for(ar, 'receive')
     data = open(fn, 'r').read()
     import_file = FileUpload(TestFile(cStringIO.StringIO(data), fn))
     request = TestRequest(
         form=dict(submitted=True,
                   artoapply='received_tobeverified',
                   results_override='override',
                   instrument_results_file=import_file,
                   instrument=api.get_uid(self.instrument)))
     results = importer.Import(self.portal, request)
     ag = ar.getAnalyses(full_objects=True, getKeyword='Ag107')[0]
     al = ar.getAnalyses(full_objects=True, getKeyword='Al27')[0]
     test_results = eval(results)  # noqa
     self.assertEqual(ag.getResult(), '0.111')
     self.assertEqual(al.getResult(), '0.222')
コード例 #14
0
    def test_import_xlsx(self):
        ar1 = self.add_analysisrequest(
            self.client,
            dict(Client=self.client.UID(),
                 Contact=self.contact.UID(),
                 DateSampled=datetime.now().date().isoformat(),
                 SampleType=self.sampletype.UID()),
            [srv.UID() for srv in self.services])
        ar2 = self.add_analysisrequest(
            self.client,
            dict(Client=self.client.UID(),
                 Contact=self.contact.UID(),
                 DateSampled=datetime.now().date().isoformat(),
                 SampleType=self.sampletype.UID()),
            [srv.UID() for srv in self.services])
        api.do_transition_for(ar1, 'receive')
        api.do_transition_for(ar2, 'receive')

        data = open(fn, 'rb').read()
        import_file = FileUpload(TestFile(cStringIO.StringIO(data), fn))
        request = TestRequest(
            form=dict(submitted=True,
                      artoapply='received_tobeverified',
                      results_override='override',
                      instrument_results_file=import_file,
                      worksheet='Concentrations',
                      instrument=api.get_uid(self.instrument)))
        results = importer.Import(self.portal, request)
        test_results = eval(results)  # noqa
        ag1 = ar1.getAnalyses(full_objects=True, getKeyword='Ag107')[0]
        al1 = ar1.getAnalyses(full_objects=True, getKeyword='Al27')[0]
        ag2 = ar2.getAnalyses(full_objects=True, getKeyword='Ag107')[0]
        al2 = ar2.getAnalyses(full_objects=True, getKeyword='Al27')[0]
        self.assertEqual(ag1.getResult(), '0.111')
        self.assertEqual(al1.getResult(), '0.555')
        self.assertEqual(ag2.getResult(), '0.222')
        self.assertEqual(al2.getResult(), '0.666')
コード例 #15
0
class Winlab32(InstrumentResultsFileParser):
    ar = None

    def __init__(self, infile, worksheet=None, encoding=None, delimiter=None):
        self.delimiter = delimiter if delimiter else ','
        self.encoding = encoding
        self.infile = infile
        self.csv_data = None
        self.worksheet = worksheet if worksheet else 0
        self.sample_id = None
        mimetype, encoding = guess_type(self.infile.filename)
        InstrumentResultsFileParser.__init__(self, infile, mimetype)

    def parse(self):
        order = []
        ext = splitext(self.infile.filename.lower())[-1]
        if ext == '.xlsx':
            order = (xlsx_to_csv, xls_to_csv)
        elif ext == '.xls':
            order = (xls_to_csv, xlsx_to_csv)
        elif ext == '.csv':
            self.csv_data = self.infile
        if order:
            for importer in order:
                try:
                    self.csv_data = importer(
                        infile=self.infile,
                        worksheet=self.worksheet,
                        delimiter=self.delimiter)
                    break
                except SheetNotFound:
                    self.err("Sheet not found in workbook: %s" % self.worksheet)
                    return -1
                except Exception as e:  # noqa
                    pass
            else:
                self.warn("Can't parse input file as XLS, XLSX, or CSV.")
                return -1
        stub = FileStub(file=self.csv_data, name=str(self.infile.filename))
        self.csv_data = FileUpload(stub)

        lines = self.csv_data.readlines()
        reader = csv.DictReader(lines)
        for row in reader:
            self.parse_row(reader.line_num, row)
        return 0

    def parse_row(self, row_nr, row):
        # convert row to use interim field names
        try:
            value = float(row['Reported Conc (Calib)'])
        except (TypeError, ValueError):
            value = row['Reported Conc (Calib)']
        parsed = {'reading': value, 'DefaultResult': 'reading'}

        sample_id = subn(r'[^\w\d\-_]*', '', row.get('Sample ID', ""))[0]
        kw = subn(r"[^\w\d]*", "", row.get('Analyte Name', ""))[0]
        kw = kw
        if not sample_id or not kw:
            return 0

        try:
            ar = self.get_ar(sample_id)
            brain = self.get_analysis(ar, kw)
            new_kw = brain.getKeyword
        except Exception as e:
            self.warn(msg="Error getting analysis for '${s}/${kw}': ${e}",
                      mapping={'s': sample_id, 'kw': kw, 'e': repr(e)},
                      numline=row_nr, line=str(row))
            return

        self._addRawResult(sample_id, {new_kw: parsed})
        return 0

    @staticmethod
    def get_ar(sample_id):
        query = dict(portal_type="AnalysisRequest", getId=sample_id)
        brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING)
        try:
            return api.get_object(brains[0])
        except IndexError:
            pass

    @staticmethod
    def get_analyses(ar):
        brains = ar.getAnalyses()
        return dict((a.getKeyword, a) for a in brains)

    def get_analysis(self, ar, kw):
        kw = kw
        brains = self.get_analyses(ar)
        brains = [v for k, v in brains.items() if k.startswith(kw)]
        if len(brains) < 1:
            msg = "No analysis found matching Keyword '${kw}'",
            raise AnalysisNotFound(msg, kw=kw)
        if len(brains) > 1:
            msg = "Multiple brains found matching Keyword '${kw}'",
            raise MultipleAnalysesFound(msg, kw=kw)
        return brains[0]
コード例 #16
0
class Nexion350xParser(InstrumentResultsFileParser):
    ar = None

    def __init__(self, infile, worksheet=0, encoding=None, delimiter=None):
        self.delimiter = delimiter if delimiter else ','
        self.encoding = encoding
        self.infile = infile
        self.worksheet = worksheet
        self.csv_data = None
        self.sample_id = None
        mimetype, encoding = guess_type(self.infile.filename)
        InstrumentResultsFileParser.__init__(self, infile, mimetype)

    def parse(self):
        order = []
        ext = splitext(self.infile.filename.lower())[-1]
        if ext == '.xlsx':
            order = (xlsx_to_csv, xls_to_csv)
        elif ext == '.xls':
            order = (xls_to_csv, xlsx_to_csv)
        elif ext == '.csv':
            self.csv_data = self.infile
        if order:
            for importer in order:
                try:
                    self.csv_data = importer(infile=self.infile,
                                             worksheet=self.worksheet,
                                             delimiter=self.delimiter)
                    break
                except SheetNotFound:
                    self.err("Sheet not found in workbook: %s" %
                             self.worksheet)
                    return -1
                except Exception as e:  # noqa
                    pass
            else:
                self.warn("Can't parse input file as XLS, XLSX, or CSV.")
                return -1
        stub = FileStub(file=self.csv_data, name=str(self.infile.filename))
        self.csv_data = FileUpload(stub)

        lines = self.csv_data.readlines()
        reader = csv.DictReader(lines)
        for row in reader:
            self.parse_row(reader.line_num, row)
        return 0

    def parse_row(self, row_nr, row):
        if row['Sample Id'].lower().strip() in ('', 'sample id', 'blk', 'rblk',
                                                'calibration curves'):
            return 0

        # Get sample for this row
        sample_id = subn(r'[^\w\d\-_]*', '', row.get('Sample Id', ''))[0]
        ar = self.get_ar(sample_id)
        if not ar:
            msg = 'Sample not found for {}'.format(sample_id)
            self.warn(msg, numline=row_nr, line=str(row))
            return 0
        # Search for rows who's headers are analyte keys
        for key in row.keys():
            if key in non_analyte_row_headers:
                continue
            kw = subn(r'[^\w\d]*', '', key)[0]
            if not kw:
                continue
            try:
                brain = self.get_analysis(ar, kw, row_nr=row_nr, row=row)
                if not brain:
                    continue
                new_kw = brain.getKeyword
                parsed = dict(reading=float(row[key]), DefaultResult='reading')
                self._addRawResult(sample_id, {new_kw: parsed})
            except (TypeError, ValueError):
                self.warn('Value for keyword ${kw} is not numeric',
                          mapping=dict(kw=kw),
                          numline=row_nr,
                          line=str(row))

        return 0

    @staticmethod
    def get_ar(sample_id):
        query = dict(portal_type='AnalysisRequest', getId=sample_id)
        brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING)
        try:
            return api.get_object(brains[0])
        except IndexError:
            pass

    @staticmethod
    def get_analyses(ar):
        analyses = ar.getAnalyses()
        return dict((a.getKeyword, a) for a in analyses)

    def get_analysis(self, ar, kw, row_nr="", row=""):
        kw = kw
        items = self.get_analyses(ar)
        brains = [v for k, v in items.items() if k.startswith(kw)]
        if len(brains) < 1:
            return None
        if len(brains) > 1:
            msg = "Multiple brains found matching Keyword '${kw}'",
            raise MultipleAnalysesFound(msg, kw=kw)
        return brains[0]
    def test_BC4_Shimadzu_TQ8030Import(self):
        pc = getToolByName(self.portal, 'portal_catalog')
        workflow = getToolByName(self.portal, 'portal_workflow')
        arimport = self.addthing(self.client, 'ARImport')
        arimport.unmarkCreationFlag()
        arimport.setFilename("test1.csv")
        arimport.setOriginalFile("""
Header,      File name,  Client name,  Client ID, Contact,     CC Names - Report, CC Emails - Report, CC Names - Invoice, CC Emails - Invoice, No of Samples, Client Order Number, Client Reference,,
Header Data, test1.csv,  Happy Hills,  HH,        Rita Mohale,                  ,                   ,                    ,                    , 10,            HHPO-001,                            ,,
Batch Header, id,       title,     description,    ClientBatchID, ClientBatchComment, BatchLabels, ReturnSampleToClient,,,
Batch Data,   B15-0123, New Batch, Optional descr, CC 201506,     Just a batch,                  , TRUE                ,,,
Samples,    ClientSampleID,    SamplingDate,DateSampled,Sampler,SamplePoint,SampleMatrix,SampleType,ContainerType,ReportDryMatter,Priority,Total number of Analyses or Profiles,Price excl Tax,,,,,MicroBio,,
Analysis price,,,,,,,,,,,,,,
"Total Analyses or Profiles",,,,,,,,,,,,,9,,,
Total price excl Tax,,,,,,,,,,,,,,
"Sample 1", HHS14001,          3/9/2014,    3/9/2014,,Toilet,     Liquids,     Water,     Cup,          0,              Normal,  1,                                   0,             0,0,0,0,0,1
        """)

        # check that values are saved without errors
        arimport.setErrors([])
        arimport.save_header_data()
        arimport.save_sample_data()
        arimport.create_or_reference_batch()
        errors = arimport.getErrors()
        if errors:
            self.fail("Unexpected errors while saving data: " + str(errors))
        # check that batch was created and linked to arimport without errors
        if not pc(portal_type='Batch'):
            self.fail("Batch was not created!")
        if not arimport.schema['Batch'].get(arimport):
            self.fail("Batch was created, but not linked to ARImport.")

        # the workflow scripts use response.write(); silence them
        arimport.REQUEST.response.write = lambda x: x

        # check that validation succeeds without any errors
        workflow.doActionFor(arimport, 'validate')
        state = workflow.getInfoFor(arimport, 'review_state')
        if state != 'valid':
            errors = arimport.getErrors()
            self.fail(
                'Validation failed!  %s.Errors: %s' % (arimport.id, errors))

        # Import objects and verify that they exist
        workflow.doActionFor(arimport, 'import')
        state = workflow.getInfoFor(arimport, 'review_state')
        if state != 'imported':
            errors = arimport.getErrors()
            self.fail(
                'Importation failed!  %s.Errors: %s' % (arimport.id, errors))

        bc = getToolByName(self.portal, 'bika_catalog')
        ars = bc(portal_type='AnalysisRequest')
        ar = ars[0]
        api.content.transition(obj=ar.getObject(), transition='receive')
        transaction.commit()
        #Testing Import for Instrument
        path = os.path.dirname(__file__)
        filename = '%s/files/GC-MS output.txt' % path
        if not os.path.isfile(filename):
            self.fail("File %s not found" % filename)
        data = open(filename, 'r').read()
        file = FileUpload(TestFile(cStringIO.StringIO(data)))
        request = TestRequest()
        request = TestRequest(form=dict(
                                    submitted=True,
                                    artoapply='received',
                                    override='nooverride',
                                    file=file,
                                    sample='requestid',
                                    instrument=''))
        context = self.portal
        results = Import(context, request)
        transaction.commit()
        text = 'Import finished successfully: 1 ARs and 2 results updated'
        if text not in results:
            self.fail("AR Import failed")
        browser = self.getBrowser(loggedIn=True)
        browser.open(ar.getObject().absolute_url() + "/manage_results")
        content = browser.contents
        if '0.02604' not in content:
            self.fail("AR:alphaPinene Result did not get updated")

        if '0.02603' not in content:
            self.fail("AR: Ca  Result did not get updated")
コード例 #18
0
class S8TigerParser(InstrumentResultsFileParser):
    ar = None

    def __init__(self,
                 infile,
                 worksheet=None,
                 encoding=None,
                 default_unit=None,
                 delimiter=None):
        self.delimiter = delimiter if delimiter else ','
        self.unit = default_unit if default_unit else "pct"
        self.encoding = encoding
        self.ar = None
        self.analyses = None
        self.worksheet = worksheet if worksheet else 0
        self.infile = infile
        self.csv_data = None
        self.sample_id = None
        mimetype = guess_type(self.infile.filename)
        InstrumentResultsFileParser.__init__(self, infile, mimetype)

    def parse(self):
        order = []
        ext = splitext(self.infile.filename.lower())[-1]
        if ext == '.xlsx':
            order = (xlsx_to_csv, xls_to_csv)
        elif ext == '.xls':
            order = (xls_to_csv, xlsx_to_csv)
        elif ext == '.csv':
            self.csv_data = self.infile
        if order:
            for importer in order:
                try:
                    self.csv_data = importer(infile=self.infile,
                                             worksheet=self.worksheet,
                                             delimiter=self.delimiter)
                    break
                except SheetNotFound:
                    self.err("Sheet not found in workbook: %s" %
                             self.worksheet)
                    return -1
                except Exception as e:  # noqa
                    pass
            else:
                self.warn("Can't parse input file as XLS, XLSX, or CSV.")
                return -1
        stub = FileStub(file=self.csv_data, name=str(self.infile.filename))
        self.csv_data = FileUpload(stub)

        try:
            sample_id, ext = splitext(basename(self.infile.filename))
            # maybe the filename is a sample ID, just the way it is
            ar = self.get_ar(sample_id)
            if not ar:
                # maybe we need to chop of it's -9digit suffix
                sample_id = '-'.join(sample_id.split('-')[:-1])
                ar = self.get_ar(sample_id)
                if not ar:
                    # or we are out of luck
                    msg = "Can't find sample for " + self.infile.filename
                    self.warn(msg)
                    return -1
            self.ar = ar
            self.sample_id = sample_id
            self.analyses = self.get_analyses(ar)
        except Exception as e:
            self.err(repr(e))
            return False
        lines = self.csv_data.readlines()
        reader = csv.DictReader(lines)
        for row in reader:
            self.parse_row(ar, reader.line_num, row)
        return 0

    def parse_row(self, ar, row_nr, row):
        # convert row to use interim field names
        if 'reading' not in field_interim_map.values():
            self.err("Missing 'reading' interim field.")
            return -1
        parsed = {field_interim_map.get(k, ''): v for k, v in row.items()}

        formula = parsed.get('formula')
        kw = subn(r'[^\w\d\-_]*', '', formula)[0]
        kw = kw.lower()
        try:
            analysis = self.get_analysis(ar, kw)
            if not analysis:
                return 0
            keyword = analysis.getKeyword
        except Exception as e:
            self.warn(msg="Error getting analysis for '${kw}': ${e}",
                      mapping={
                          'kw': kw,
                          'e': repr(e)
                      },
                      numline=row_nr,
                      line=str(row))
            return

        # Concentration can be PPM or PCT as it likes, I'll save both.
        concentration = parsed['concentration']
        try:
            val = float(subn(r'[^.\d]', '', str(concentration))[0])
        except (TypeError, ValueError, IndexError):
            self.warn(msg="Can't extract numerical value from `concentration`",
                      numline=row_nr,
                      line=str(row))
            parsed['reading_pct'] = ''
            parsed['reading_ppm'] = ''
            return 0
        else:
            if 'reading_ppm' in concentration.lower():
                parsed['reading_pct'] = val * 0.0001
                parsed['reading_ppm'] = val
            elif '%' in concentration:
                parsed['reading_pct'] = val
                parsed['reading_ppm'] = 1 / 0.0001 * val
            else:
                self.warn("Can't decide if reading units are PPM or %",
                          numline=row_nr,
                          line=str(row))
                return 0

        if self.unit == 'ppm':
            reading = parsed['reading_ppm']
        else:
            reading = parsed['reading_pct']
        parsed['reading'] = reading
        parsed.update({'DefaultResult': 'reading'})

        self._addRawResult(self.sample_id, {keyword: parsed})
        return 0

    @staticmethod
    def get_ar(sample_id):
        query = dict(portal_type="AnalysisRequest", getId=sample_id)
        brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING)
        try:
            return api.get_object(brains[0])
        except IndexError:
            pass

    @staticmethod
    def get_analyses(ar):
        analyses = ar.getAnalyses()
        return dict((a.getKeyword, a) for a in analyses)

    def get_analysis(self, ar, kw):
        analyses = self.get_analyses(ar)
        analyses = [v for k, v in analyses.items() if k.startswith(kw)]
        if len(analyses) < 1:
            self.log('No analysis found matching keyword "${kw}"',
                     mapping=dict(kw=kw))
            return None
        if len(analyses) > 1:
            self.warn('Multiple analyses found matching Keyword "${kw}"',
                      mapping=dict(kw=kw))
            return None
        return analyses[0]