def test_include_04(self):
        #if PY2:
            #wb = 'wb'
        #else:
            #wb = 'w'

        with codec_open('include4.bdf', 'w') as f:
            f.write('$ pyNastran: punch=True\n')
            f.write('$ pyNastran: dumplines=True\n')
            f.write("INCLUDE 'include4b.inc'\n\n")

        with codec_open('include4b.inc', 'w') as f:
            f.write('$ GRID comment\n')
            f.write('GRID,2,,2.0\n')

        model = BDF(log=log, debug=False)
        model.read_bdf('include4.bdf')
        model.write_bdf('include4.out.bdf')

        os.remove('include4.out.bdf')
        os.remove('include4b.inc')
        #os.remove('include4.inc')
        # os.remove('c.bdf')
        # os.remove('executive_control.inc')
        # os.remove('case_control.inc')

        self.assertEqual(len(model.nodes), 1)
        self.assertEqual(model.nnodes, 1, 'nnodes=%s' % model.nnodes)
Beispiel #2
0
    def test_include_05(self):
        with codec_open('include5.bdf', 'w') as bdf_file:
            bdf_file.write('$ pyNastran: punch=True\n')
            bdf_file.write('$ pyNastran: dumplines=True\n')
            bdf_file.write("INCLUDE 'include5b.inc'\n\n")

        with codec_open('include5b.inc', 'w') as bdf_file:
            bdf_file.write('ECHOON\n')
            bdf_file.write('$ GRID comment\n')
            bdf_file.write('GRID,2,,2.0\n')
            bdf_file.write('ECHOOFF\n')
            bdf_file.write('GRID,3,,3.0\n')
            bdf_file.write('grid,4,,4.0\n')
            bdf_file.write('grid ,5,,5.0\n')

        model = BDF(log=log, debug=False)
        model.read_bdf('include5.bdf')
        assert model.echo is False, model.echo
        #model.write_bdf('include5.out.bdf')

        # os.remove('c.bdf')
        # os.remove('executive_control.inc')
        # os.remove('case_control.inc')

        self.assertEqual(len(model.nodes), 4)
        self.assertEqual(model.nnodes, 4, 'nnodes=%s' % model.nnodes)

        model2 = read_bdf(bdf_filename='include5.bdf', xref=True, punch=False,
                          log=log, encoding=None)
        self.assertEqual(len(model2.nodes), 4)
        self.assertEqual(model2.nnodes, 4, 'nnodes=%s' % model.nnodes)
        os.remove('include5.bdf')
        #os.remove('include5.out.bdf')
        os.remove('include5b.inc')
    def test_include_end_02(self):
        with codec_open('a.bdf', 'w') as bdf_file:
            bdf_file.write('CEND\n')
            bdf_file.write('BEGIN BULK\n')
            bdf_file.write('GRID,1,,1.0\n')
            bdf_file.write("INCLUDE 'b.bdf'\n\n")
            bdf_file.write('GRID,4,,4.0\n')

        with codec_open('b.bdf', 'w') as bdf_file:
            bdf_file.write('GRID,2,,2.0\n')
            bdf_file.write("INCLUDE 'c.bdf'\n\n")
            bdf_file.write('GRID,5,,5.0\n')

        with codec_open('c.bdf', 'w') as bdf_file:
            bdf_file.write('GRID,3,,3.0\n\n')

        model = BDF(log=log, debug=False)
        model.read_bdf('a.bdf')
        model.write_bdf('a.out.bdf')

        os.remove('a.bdf')
        os.remove('b.bdf')
        os.remove('c.bdf')
        os.remove('a.out.bdf')
        self.assertEqual(len(model.nodes), 5)
        self.assertEqual(model.nnodes, 5, 'nnodes=%s' % model.nnodes)
Beispiel #4
0
def Initialize():
    #key = chunk_id of first chunk in hit 
    #value = Row object
    fragment_dict = defaultdict(list) 
    
    #key = video_id 
    #value = Video object
    video_dict = defaultdict(Video)
    
    with codec_open(fragment_HIT, 'rb', 'utf-8') as fragment_csv:
        f = list( csv.reader(fragment_csv) )
        for row in f[1:]:
            fragment_list = []
            total_clip_time = 0
            
            chunks = [ row[i:i+5] for i in range(0, len(row), 5) ]
            for c in chunks:
                temp = Fragment(c[0], c[1], c[2], c[3])
                total_clip_time += temp.duration
                fragment_list.append( temp )
            
            cur_row = Row(total_clip_time, fragment_list)
            fragment_dict[fragment_list[0].id] = cur_row
    
    
    with codec_open(full_CSV, 'rb', 'utf-8') as full_csv:
        f = list( csv.reader(full_csv) )
        for row in f[1:]:
            temp = Video(row[0], row[5])
            video_dict[temp.id] = temp
    
    return fragment_dict,video_dict
Beispiel #5
0
    def test_include_04(self):
        """tests pyNastran: punch=True with includes"""
        with codec_open('include4.bdf', 'w') as bdf_file:
            bdf_file.write('$ pyNastran: punch=True\n')
            bdf_file.write('$ pyNastran: dumplines=True\n')
            bdf_file.write("INCLUDE 'include4b.inc'\n\n")

        with codec_open('include4b.inc', 'w') as bdf_file:
            bdf_file.write('$ GRID comment\n')
            bdf_file.write('GRID,2,,2.0\n')

        model = BDF(log=log, debug=False)
        model.read_bdf('include4.bdf')
        model.write_bdf('include4.out.bdf')

        os.remove('include4.out.bdf')
        os.remove('include4b.inc')
        os.remove('include4.bdf')
        #os.remove('include4.inc')
        # os.remove('c.bdf')
        # os.remove('executive_control.inc')
        # os.remove('case_control.inc')

        self.assertEqual(len(model.nodes), 1)
        self.assertEqual(model.nnodes, 1, 'nnodes=%s' % model.nnodes)
Beispiel #6
0
def Initialize():
    #key = chunk_id of first chunk in hit
    #value = Row object
    fragment_dict = defaultdict(list)

    #key = video_id
    #value = Video object
    video_dict = defaultdict(Video)

    with codec_open(fragment_HIT, 'rb', 'utf-8') as fragment_csv:
        f = list(csv.reader(fragment_csv))
        for row in f[1:]:
            fragment_list = []
            total_clip_time = 0

            chunks = [row[i:i + 5] for i in range(0, len(row), 5)]
            for c in chunks:
                temp = Fragment(c[0], c[1], c[2], c[3])
                total_clip_time += temp.duration
                fragment_list.append(temp)

            cur_row = Row(total_clip_time, fragment_list)
            fragment_dict[fragment_list[0].id] = cur_row

    with codec_open(full_CSV, 'rb', 'utf-8') as full_csv:
        f = list(csv.reader(full_csv))
        for row in f[1:]:
            temp = Video(row[0], row[5])
            video_dict[temp.id] = temp

    return fragment_dict, video_dict
Beispiel #7
0
    def test_include_end(self):
        """tests multiple levels of includes"""
        with codec_open('a.bdf', 'w') as bdf_file:
            bdf_file.write('CEND\n')
            bdf_file.write('BEGIN BULK\n')
            bdf_file.write('GRID,1,,1.0\n')
            bdf_file.write("INCLUDE 'b.bdf'\n\n")

        with codec_open('b.bdf', 'w') as bdf_file:
            bdf_file.write('GRID,2,,2.0\n')
            bdf_file.write("INCLUDE 'c.bdf'\n\n")

        with codec_open('c.bdf', 'w') as bdf_file:
            bdf_file.write('GRID,3,,3.0\n\n')
            bdf_file.write("ENDDATA\n")

        model = BDF(log=log, debug=False)
        model.read_bdf('a.bdf')
        model.write_bdf('a.out.bdf')

        os.remove('a.bdf')
        os.remove('b.bdf')
        os.remove('c.bdf')
        os.remove('a.out.bdf')
        self.assertEqual(len(model.nodes), 3)
        self.assertEqual(model.nnodes, 3, 'nnodes=%s' % model.nnodes)
    def test_include_05(self):
        #if PY2:
            #wb = 'wb'
        #else:
            #wb = 'w'

        with codec_open('include5.bdf', 'w') as f:
            f.write('$ pyNastran: punch=True\n')
            f.write('$ pyNastran: dumplines=True\n')
            f.write("INCLUDE 'include5b.inc'\n\n")

        with codec_open('include5b.inc', 'w') as f:
            f.write('ECHOON\n')
            f.write('$ GRID comment\n')
            f.write('GRID,2,,2.0\n')
            f.write('ECHOOFF\n')
            f.write('GRID,3,,3.0\n')
            f.write('grid,4,,4.0\n')
            f.write('grid ,5,,5.0\n')

        model = BDF(log=log, debug=False)
        model.read_bdf('include5.bdf')
        assert model.echo == False, model.echo
        #model.write_bdf('include4.out.bdf')

        os.remove('include5.bdf')
        #os.remove('include5.out.bdf')
        os.remove('include5b.inc')
        # os.remove('c.bdf')
        # os.remove('executive_control.inc')
        # os.remove('case_control.inc')

        self.assertEqual(len(model.nodes), 4)
        self.assertEqual(model.nnodes, 4, 'nnodes=%s' % model.nnodes)
Beispiel #9
0
    def test_include_end(self):
        with codec_open('a.bdf', 'w') as bdf_file:
            bdf_file.write('CEND\n')
            bdf_file.write('BEGIN BULK\n')
            bdf_file.write('GRID,1,,1.0\n')
            bdf_file.write("INCLUDE 'b.bdf'\n\n")

        with codec_open('b.bdf', 'w') as bdf_file:
            bdf_file.write('GRID,2,,2.0\n')
            bdf_file.write("INCLUDE 'c.bdf'\n\n")

        with codec_open('c.bdf', 'w') as bdf_file:
            bdf_file.write('GRID,3,,3.0\n\n')
            bdf_file.write("ENDDATA\n")

        model = BDF(log=log, debug=False)
        model.read_bdf('a.bdf')
        model.write_bdf('a.out.bdf')

        os.remove('a.bdf')
        os.remove('b.bdf')
        os.remove('c.bdf')
        os.remove('a.out.bdf')
        self.assertEqual(len(model.nodes), 3)
        self.assertEqual(model.nnodes, 3, 'nnodes=%s' % model.nnodes)
Beispiel #10
0
    def UpdateMturkCSV(self, name):
        csv_original = list(
            csv.reader(
                codec_open(os.path.join(MTURK_DIR, name + "_results.csv"),
                           'rb', 'utf-8')))

        filtered_dir = os.getcwd() + '/filtered'
        with codec_open(
                os.path.join(filtered_dir, name + "_results_filtered.csv"),
                'w', 'utf-8') as csv_filtered:
            csv_writer = csv.writer(csv_filtered)
            for i in range(len(csv_original)):
                if i == 0:
                    #                     AssignmentStatus = csv_original[0].index('AssignmentStatus')
                    #                     RequesterFeedback = csv_original[0].index('RequesterFeedback')
                    #                     Reject = csv_original[0].index('Reject')
                    pass

                else:
                    hit = self.HIT_list[i - 1]
                    if hit.reject_flag:
                        #csv_original[i][AssignmentStatus] = 'Rejected'
                        #csv_original[i][RequesterFeedback] = hit.reject_reason
                        csv_original[i].append('')
                        csv_original[i].append(hit.reject_reason)
                csv_writer.writerow(csv_original[i])
    def test_case_control_08(self):
        lines_expected = [
            '$pyNastran: version=msc\n',
            '$pyNastran: punch=True\n',
            '$pyNastran: encoding=ascii\n',
            '$NODES\n',
            'GRID,100000,,43.91715,-29.,.8712984\n',
        ]
        bdf_filename = 'test7.bdf'
        bdf_filename2 = 'test7_bad.bdf'
        with codec_open(bdf_filename, 'w', encoding='ascii') as f:
            for line in lines_expected:
                f.write(line)
        bdf = BDF()
        bdf.read_bdf(bdf_filename)
        bdf.write_bdf(bdf_filename2)

        with codec_open(bdf_filename, 'r', encoding='ascii') as f:
            lines = f.readlines()
            i = 0
            for line, line_expected in zip(lines, lines_expected):
                line = line.rstrip()
                line_expected = line_expected.rstrip()
                msg = 'The lines are not the same...i=%s\n' % i
                msg += 'line     = %r\n' % line
                msg += 'expected = %r\n' % line_expected
                msg += '-------------\n--Actual--\n%s' % ''.join(lines)
                msg += '-------------\n--Expected--\n%s' % ''.join(lines_expected)
                self.assertEqual(line, line_expected, msg)
                i += 1
    def test_include_end_02(self):
        with codec_open('a.bdf', 'w') as f:
            f.write('CEND\n')
            f.write('BEGIN BULK\n')
            f.write('GRID,1,,1.0\n')
            f.write("INCLUDE 'b.bdf'\n\n")
            f.write('GRID,4,,4.0\n')

        with codec_open('b.bdf', 'w') as f:
            f.write('GRID,2,,2.0\n')
            f.write("INCLUDE 'c.bdf'\n\n")
            f.write('GRID,5,,5.0\n')

        with codec_open('c.bdf', 'w') as f:
            f.write('GRID,3,,3.0\n\n')

        model = BDF(log=log, debug=False)
        model.read_bdf('a.bdf')
        model.write_bdf('a.out.bdf')

        os.remove('a.bdf')
        os.remove('b.bdf')
        os.remove('c.bdf')
        os.remove('a.out.bdf')
        self.assertEqual(len(model.nodes), 5)
        self.assertEqual(model.nnodes, 5, 'nnodes=%s' % model.nnodes)
Beispiel #13
0
    def test_tri_180_01(self):
        r"""
        Identify a reasonable tri with super tight tolerances

        y
        ^         4
        |       / /
        |     /   /
        |   /    /
        | /      /
        /       /
        1------2-------> x
        """
        msg = (
            'CEND\n'
            'BEGIN BULK\n'
            'GRID,1,,0.,0.,0.\n'
            'GRID,2,,1.,0.,0.\n'
            'GRID,4,,2., 1.,0.\n'

            'CTRIA3,100,1, 1,2,4\n'
            'PSHELL,1,1,0.1\n'
            'MAT1,1,3.0,, 0.3\n'
            'ENDDATA'
        )
        bdf_filename = 'ctria3.bdf'
        with codec_open(bdf_filename, 'w') as bdf_file:
            bdf_file.write(msg)

        model = read_bdf(bdf_filename, xref=True)
        eids_to_delete = get_bad_shells(model, max_theta=180.,
                                        max_skew=1000., max_aspect_ratio=1000.)
        assert eids_to_delete == [100], eids_to_delete
        os.remove(bdf_filename)
def get_content(article):
    source_content = None
    with codec_open(article.source_path, 'r', 'utf-8') as source_file:
        source_content = source_file.read()
    if path.splitext(article.source_path)[1] == '.rst':
        return parse_rst_content(article.source_path, source_content)
    return source_content
Beispiel #15
0
def AVFull(mturk_csv, experiment):
    #[0] = HITId
    #[15] = WorkerId
    #[23] = WorkTimeInSeconds
    #[27] = Input.id
    #[28] = Input.transcription
    #[33] = Answer.partial_transcription
    #[34] = Answer.“polarityâ€
    HIT_list = []

    with codec_open(mturk_csv, 'rb', 'utf-8') as audio_full_csv:
        f = list(csv.reader(audio_full_csv))
        for row in f[1:]:
            hit_id = row[0]
            worker_id = row[15]
            work_time = row[23]
            vid_id = [row[27]]
            vid_transcription = [row[33]]
            answer_polarity = [row[34]]
            age = row[30]
            location = row[31]
            gender = row[32]

            temp = HIT(hit_id, worker_id, work_time, vid_id, answer_polarity,
                       age, location, gender)
            temp.transcriptions = set(vid_transcription)

            for field in [age, location, gender]:
                if 'select one' in field:
                    temp.reject_flag = True
                    temp.reject_reason = 'Pre-survey was left incomplete'

            HIT_list.append(temp)

    return HIT_list
Beispiel #16
0
def TextFull(mturk_csv, experiment):
    #[0] = HITId
    #[15] = WorkerId
    #[23] = WorkTimeInSeconds
    #[27] = Input.id
    #[33] = Answer.“polarityâ€
    HIT_list = []

    with codec_open(mturk_csv, 'rb', 'utf-8') as text_full_csv:
        f = list(csv.reader(text_full_csv))
        for row in f[1:]:
            hit_id = row[0]
            worker_id = row[15]
            work_time = row[23]
            vid_id = [row[27]]
            answer_polarity = [row[32]]
            age, location = row[30].split('|')
            gender = row[31]

            temp = HIT(hit_id, worker_id, work_time, vid_id, answer_polarity,
                       age, location, gender)

            for field in [age, location, gender]:
                if 'select one' in field:
                    temp.reject_flag = True
                    temp.reject_reason = 'Pre-survey was left incomplete'

            HIT_list.append(temp)

    return HIT_list
def get_content(article):
    source_content = None
    with codec_open(article.source_path, 'r', 'utf-8') as source_file:
        source_content = source_file.read()
    if path.splitext(article.source_path)[1] == '.rst':
        return parse_rst_content(article.source_path, source_content)
    return source_content
Beispiel #18
0
def AVFull(mturk_csv, experiment):
    #[0] = HITId
    #[15] = WorkerId
    #[23] = WorkTimeInSeconds
    #[27] = Input.id
    #[28] = Input.transcription
    #[33] = Answer.partial_transcription
    #[34] = Answer.“polarityâ€
    HIT_list = []
    
    with codec_open(mturk_csv, 'rb', 'utf-8') as audio_full_csv:
        f = list( csv.reader(audio_full_csv) )
        for row in f[1:]:
            hit_id = row[0]
            worker_id = row[15]
            work_time = row[23]
            vid_id = [row[27]]
            vid_transcription = [row[33]]
            answer_polarity = [row[34]]
            age = row[30]
            location = row[31]
            gender = row[32]
            
            temp = HIT(hit_id, worker_id, work_time, vid_id, answer_polarity, age, location, gender)
            temp.transcriptions = set(vid_transcription)
            
            for field in [age,location,gender]:
                if 'select one' in field:
                    temp.reject_flag = True
                    temp.reject_reason = 'Pre-survey was left incomplete'
                    
            HIT_list.append( temp )
    
    return HIT_list
Beispiel #19
0
def TextFull(mturk_csv, experiment):
    #[0] = HITId
    #[15] = WorkerId
    #[23] = WorkTimeInSeconds
    #[27] = Input.id
    #[33] = Answer.“polarityâ€
    HIT_list = []
    
    with codec_open(mturk_csv, 'rb', 'utf-8') as text_full_csv:
        f = list( csv.reader(text_full_csv) )
        for row in f[1:]:
            hit_id = row[0]
            worker_id = row[15]
            work_time = row[23]
            vid_id = [row[27]]
            answer_polarity = [row[32]]
            age,location = row[30].split('|')
            gender = row[31]

            temp = HIT(hit_id, worker_id, work_time, vid_id, answer_polarity, age, location, gender)
            
            for field in [age,location,gender]:
                if 'select one' in field:
                    temp.reject_flag = True
                    temp.reject_reason = 'Pre-survey was left incomplete'
            
            HIT_list.append( temp )
    
    return HIT_list
    def test_enddata_1(self):
        """
        There is an ENDDATA is in the baseline BDF, so None -> ENDDATA
        """
        model = BDF(debug=False)
        full_path = os.path.join(test_path, 'include_dir')
        model2 = BDF(debug=False)

        bdf_filename = 'test_include.bdf'
        if not os.path.exists(bdf_filename):
            bdf_filename = os.path.join(test_path, bdf_filename)
        model2.read_bdf(bdf_filename, xref=True, punch=False)
        for out_filename, is_enddata, write_flag in [
            ('enddata1.bdf', True, None),
            ('enddata2.bdf', True, True),
            ('enddata3.bdf', False, False)]:
            out_filename = os.path.join(test_path, out_filename)
            model2.write_bdf(out_filename=out_filename+'.out', interspersed=True, size=8,
                             is_double=False, enddata=write_flag)

            with codec_open(out_filename + '.out', 'r') as f:
                data = f.read()

            if is_enddata:
                self.assertTrue('ENDDATA' in data)
            else:
                self.assertFalse('ENDDATA' in data)
            os.remove(out_filename + '.out')
Beispiel #21
0
def load_csv(out_filename, encoding='latin1'):
    """
    The GUI CSV loading function.

    Considers:
      - extension in determining how to load a file (e.g. commas or not)
      - header line of file for information regarding data types
    """
    ext = os.path.splitext(out_filename)[1].lower()
    if ext not in ['.csv', '.dat', '.txt']:
        raise NotImplementedError(
            'extension=%r is not supported (use .dat, .txt, or .csv)' % ext)

    with codec_open(_filename(out_filename), 'r',
                    encoding=encoding) as file_obj:
        names, fmt_dict, dtype, delimiter = _load_format_header(
            file_obj, ext, force_float=False)

        try:
            #A = loadtxt(file_obj, dtype=dtype, delimiter=delimiter)
            A = loadtxt_nice(file_obj, dtype=dtype, delimiter=delimiter)
        except:
            traceback.print_exc(file=sys.stdout)
            msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % (
                ext, len(names), delimiter, dtype)
            raise RuntimeError(msg)
    return A, fmt_dict, names
Beispiel #22
0
    def write_cart3d(self, outfilename, is_binary=False, float_fmt='%6.7f'):
        assert len(self.points) > 0, 'len(self.points)=%s' % len(self.points)

        if self.loads is None or self.loads == {}:
            loads = {}
            is_loads = False
            # print("no loads")
        else:
            is_loads = True

        self.log.info("---writing cart3d file...%r---" % outfilename)
        if is_binary:
            form = 'wb'
        else:
            if PY2:
                form = 'w'
            else:
                form = 'wb'
        with codec_open(outfilename, form) as outfile:
            int_fmt = self._write_header(outfile, self.points, self.elements, is_loads, is_binary)
            self._write_points(outfile, self.points, is_binary, float_fmt)
            self._write_elements(outfile, self.elements, is_binary, int_fmt)
            self._write_regions(outfile, self.regions, is_binary)

            if is_loads:
                assert is_binary is False, 'is_binary=%r is not supported for loads' % is_binary
                self._write_loads(outfile, self.loads, is_binary, float_fmt)
        outfile.close()
Beispiel #23
0
    def test_encoding_write(self):
        from pyNastran.bdf.bdf import BDF

        mesh = BDF()
        mesh.add_card(['GRID', 100000, 0, 43.91715, -29., .8712984], 'GRID')
        mesh.write_bdf('out.bdf')
        lines_expected = [
            '$pyNastran: version=msc',
            '$pyNastran: punch=False',
            '$pyNastran: encoding=ascii',
            '$NODES',
            'GRID      100000        43.91715    -29..8712984',
        ]
        bdf_filename = 'out.bdf'
        with codec_open(bdf_filename, 'r', encoding='ascii') as f:
            lines = f.readlines()
            i = 0
            for line, line_expected in zip(lines, lines_expected):
                line = line.rstrip()
                line_expected = line_expected.rstrip()
                msg = 'The lines are not the same...i=%s\n' % i
                msg += 'line     = %r\n' % line
                msg += 'expected = %r\n' % line_expected
                msg += '-------------\n--Actual--\n%s' % ''.join(lines)
                msg += '-------------\n--Expected--\n%s' % ''.join(lines_expected)
                self.assertEqual(line, line_expected, msg)
                i += 1
Beispiel #24
0
def read(*parts):
    """
    Build an absolute path from *parts* and and return the contents of the
    resulting file.  Assume UTF-8 encoding.
    """
    with codec_open(joinpath(HERE, *parts), 'rb', 'utf-8') as dfp:
        return dfp.read()
Beispiel #25
0
    def write_cart3d(self, outfilename, is_binary=False, float_fmt='%6.7f'):
        """
        writes a cart3d file
        """
        assert len(self.points) > 0, 'len(self.points)=%s' % len(self.points)

        if self.loads is None or self.loads == {}:
            loads = {}
            is_loads = False
        else:
            is_loads = True

        self.log.info("---writing cart3d...%r---" % outfilename)
        if is_binary:
            form = 'wb'
        else:
            form = write_ascii

        with codec_open(outfilename, form) as outfile:
            int_fmt = self._write_header(outfile, self.points, self.elements,
                                         is_loads, is_binary)
            self._write_points(outfile, self.points, is_binary, float_fmt)
            self._write_elements(outfile, self.elements, is_binary, int_fmt)
            self._write_regions(outfile, self.regions, is_binary)

            if is_loads:
                assert is_binary is False, 'is_binary=%r is not supported for loads' % is_binary
                self._write_loads(outfile, self.loads, is_binary, float_fmt)
Beispiel #26
0
    def _open_file(self, bdf_filename, basename=False, check=True):
        """
        Opens a new bdf_filename with the proper encoding and include directory

        Parameters
        ----------
        bdf_filename : str
            the filename to open
        basename : bool (default=False)
            should the basename of bdf_filename be appended to the include directory
        """
        if basename:
            bdf_filename_inc = os.path.join(self.include_dir,
                                            os.path.basename(bdf_filename))
        else:
            bdf_filename_inc = os.path.join(self.include_dir, bdf_filename)

        self._validate_open_file(bdf_filename, bdf_filename_inc, check)

        self.log.debug('opening %r' % bdf_filename_inc)
        self.active_filenames.append(bdf_filename_inc)

        #print('ENCODING - _open_file=%r' % self.encoding)
        bdf_file = codec_open(_filename(bdf_filename_inc),
                              'r',
                              encoding=self.encoding)
        return bdf_file
Beispiel #27
0
def load_file_contents(file_path, as_list=True):
    """Load file as string or list"""
    abs_file_path = path.join(HERE, file_path)
    with codec_open(abs_file_path, encoding='utf-8') as file_pointer:
        if as_list:
            return file_pointer.read().splitlines()
        return file_pointer.read()
Beispiel #28
0
 def test_include_stop(self):
     with codec_open('a.bdf', 'w') as bdf_file:
         bdf_file.write('CEND\n')
         bdf_file.write('BEGIN BULK\n')
         bdf_file.write("INCLUDE 'b.bdf'\n\n")
         bdf_file.write('GRID,1,,1.0\n')
     model = BDF(log=log, debug=False)
     with self.assertRaises(IOError):
         model.read_bdf(bdf_filename='a.bdf',
                        xref=True,
                        punch=False,
                        read_includes=True,
                        encoding=None)
     with self.assertRaises(IOError):
         read_bdf(bdf_filename='a.bdf',
                  xref=True,
                  punch=False,
                  encoding=None,
                  log=log)
     model.read_bdf(bdf_filename='a.bdf',
                    xref=True,
                    punch=False,
                    read_includes=False,
                    encoding=None)
     model.write_bdf('out.bdf')
     os.remove('a.bdf')
     os.remove('out.bdf')
Beispiel #29
0
    def test_enddata_1(self):
        """
        There is an ENDDATA is in the baseline BDF, so None -> ENDDATA
        """
        model2 = BDF(log=log, debug=False)

        bdf_filename = 'test_include.bdf'
        if not os.path.exists(bdf_filename):
            bdf_filename = os.path.join(test_path, bdf_filename)
        model2.read_bdf(bdf_filename, xref=True, punch=False)

        cases = [
            ('enddata1.bdf', True, None),
            ('enddata2.bdf', True, True),
            ('enddata3.bdf', False, False),
        ]
        for out_filename, is_enddata, write_flag in cases:
            out_filename = os.path.join(test_path, out_filename)
            model2.write_bdf(out_filename=out_filename + '.out',
                             interspersed=True,
                             size=8,
                             is_double=False,
                             enddata=write_flag)

            with codec_open(out_filename + '.out', 'r') as bdf_file:
                data = bdf_file.read()

            if is_enddata:
                self.assertTrue('ENDDATA' in data)
            else:
                self.assertFalse('ENDDATA' in data)
            os.remove(out_filename + '.out')
    def test_enddata_2(self):
        """
        There is no ENDDATA is in the baseline BDF, so None -> no ENDDATA
        """
        model = BDF(debug=False)
        full_path = os.path.join(test_path, 'include_dir')
        model2 = BDF(debug=False)
        bdf_name = os.path.join(test_path, 'test_mass.dat')
        model2.read_bdf(bdf_name, xref=True, punch=False)
        for out_filename, is_enddata, write_flag in [
            ('test_mass1.dat', False, None),
            ('test_mass2.dat', True, True),
            ('test_mass3.dat', False, False)]:
            model2.write_bdf(out_filename=out_filename, interspersed=True, size=8,
                             is_double=False, enddata=write_flag)

            with codec_open(out_filename, 'r') as f:
                data = f.read()

            msg = 'outfilename=%r expected=%r write_flag=%s card_count=%r' % (out_filename, is_enddata, write_flag, model2.card_count.keys())
            if is_enddata:
                self.assertTrue('ENDDATA' in data, msg)
            else:
                self.assertFalse('ENDDATA' in data, msg)
            os.remove(out_filename)
Beispiel #31
0
    def test_tri_180_01(self):
        r"""
        Identify a reasonable tri with super tight tolerances

        y
        ^         4
        |       / /
        |     /   /
        |   /    /
        | /      /
        /       /
        1------2-------> x
        """
        msg = (
            'CEND\n'
            'BEGIN BULK\n'
            'GRID,1,,0.,0.,0.\n'
            'GRID,2,,1.,0.,0.\n'
            'GRID,4,,2., 1.,0.\n'

            'CTRIA3,100,1, 1,2,4\n'
            'PSHELL,1,1,0.1\n'
            'MAT1,1,3.0,, 0.3\n'
            'ENDDATA'
        )
        bdf_filename = 'ctria3.bdf'
        with codec_open(bdf_filename, 'w') as bdf_file:
            bdf_file.write(msg)

        model = read_bdf(bdf_filename, xref=True)
        eids_to_delete = get_bad_shells(model, max_theta=180.,
                                        max_skew=1000., max_aspect_ratio=1000.)
        assert eids_to_delete == [100], eids_to_delete
        os.remove(bdf_filename)
Beispiel #32
0
    def read_cart3d(self, infilename, result_names=None):
        """extracts the points, elements, and Cp"""
        self.infilename = infilename
        self.log.info("---starting reading cart3d file...%r---" % self.infilename)

        self.infilename = infilename
        if is_binary_file(infilename):
            with open(infilename, 'rb') as self.infile:
                npoints, nelements, nresults = self._read_header_binary()
                self.points = self._read_points_binary(npoints)
                self.elements = self._read_elements_binary(nelements)
                self.regions = self._read_regions_binary(nelements)
                # TODO: loads
        else:
            with codec_open(_filename(infilename), 'r', encoding=self._encoding) as self.infile:
                npoints, nelements, nresults = self._read_header_ascii()
                self.points = self._read_points_ascii(npoints)
                self.elements = self._read_elements_ascii(nelements)
                self.regions = self._read_regions_ascii(nelements)
                self._read_results_ascii(0, self.infile, nresults, result_names=result_names)

        self.log.debug("npoints=%s nelements=%s" % (self.npoints, self.nelements))
        self.log.info("---finished reading cart3d file...%r---" % self.infilename)
        assert self.npoints > 0, 'npoints=%s' % self.npoints
        assert self.nelements > 0, 'nelements=%s' % self.nelements
Beispiel #33
0
    def test_enddata_2(self):
        """
        There is no ENDDATA is in the baseline BDF, so None -> no ENDDATA
        """
        model2 = BDF(log=log, debug=False)
        bdf_name = os.path.join(test_path, 'test_mass.dat')
        model2.read_bdf(bdf_name, xref=True, punch=False)

        cases = [('test_mass1.dat', False, None),
                 ('test_mass2.dat', True, True),
                 ('test_mass3.dat', False, False)]
        for out_filename, is_enddata, write_flag in cases:
            model2.write_bdf(out_filename=out_filename,
                             interspersed=True,
                             size=8,
                             is_double=False,
                             enddata=write_flag)

            with codec_open(out_filename, 'r') as bdf_file:
                data = bdf_file.read()

            msg = 'outfilename=%r expected=%r write_flag=%s card_count=%r' % (
                out_filename, is_enddata, write_flag, model2.card_count.keys())
            if is_enddata:
                self.assertTrue('ENDDATA' in data, msg)
            else:
                self.assertFalse('ENDDATA' in data, msg)
            os.remove(out_filename)
Beispiel #34
0
    def test_eq4(self):
        r"""
          5
        6 *-------* 40
          | \     |
          |   \   |
          |     \ |
          *-------* 3
          1       20
        """
        msg = 'CEND\n'
        msg += 'BEGIN BULK\n'
        msg += 'GRID,1, , 0.,   0.,   0.\n'
        msg += 'GRID,20,, 1.,   0.,   0.\n'
        msg += 'GRID,3, , 1.01, 0.,   0.\n'

        msg += 'GRID,41,, 1.,   1.,   0.\n'  # eq
        msg += 'GRID,4,, 1.,   1.,   0.\n'  # eq
        msg += 'GRID,40,, 1.,   1.,   0.\n'  # eq
        msg += 'GRID,4,, 1.,   1.,   0.\n'  # eq

        msg += 'GRID,5, , 0.,   1.,   0.\n'
        msg += 'GRID,6, , 0.,   1.01, 0.\n'
        msg += 'CTRIA3,1, 100,1,20,6\n'
        msg += 'CTRIA3,10,100,3,40,5\n'
        msg += 'PSHELL,100,1000,0.1\n'
        msg += 'MAT1,1000,3.0,, 0.3\n'
        msg += 'ENDDATA'
        bdf_filename = 'nonunique.bdf'
        bdf_filename_out = 'unique.bdf'

        with codec_open(bdf_filename, 'w') as bdf_file:
            bdf_file.write(msg)

        tol = 0.2
        node_set = [4, 40, 41]
        # Collapse 5/6 and 20/3; Put a 40 and 20 to test non-sequential IDs
        bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
                              renumber_nodes=False, neq_max=4, xref=True,
                              node_set=node_set, crash_on_collapse=False,
                              log=log, debug=False)

        model = BDF(log=log, debug=False)
        model.read_bdf(bdf_filename_out)
        nids = model.nodes.keys()
        assert len(model.nodes) == 6, 'nnodes=%s nodes=%s' % (len(model.nodes), nids)
        assert 1 in nids, nids
        assert 20 in nids, nids
        assert 3 in nids, nids
        assert 4 in nids, nids
        assert 5 in nids, nids
        assert 6 in nids, nids
        assert 40 not in nids, nids
        assert 41 not in nids, nids
        #print(nids)
        os.remove(bdf_filename)
        os.remove(bdf_filename_out)
Beispiel #35
0
def writePage(path, page):
    (parent, file) = os.path.split(path)
    if settings.verbose:
        print('writing ' + path)
    if (os.path.isfile(path)):
        os.remove(path)
    ensure_dir(parent)
    with codec_open(path, mode="a", encoding="utf-8") as htmlFile:
        htmlFile.write(page)
Beispiel #36
0
    def test_eq4(self):
        r"""
          5
        6 *-------* 40
          | \     |
          |   \   |
          |     \ |
          *-------* 3
          1       20
        """
        msg = 'CEND\n'
        msg += 'BEGIN BULK\n'
        msg += 'GRID,1, , 0.,   0.,   0.\n'
        msg += 'GRID,20,, 1.,   0.,   0.\n'
        msg += 'GRID,3, , 1.01, 0.,   0.\n'

        msg += 'GRID,41,, 1.,   1.,   0.\n'  # eq
        msg += 'GRID,4,, 1.,   1.,   0.\n'  # eq
        msg += 'GRID,40,, 1.,   1.,   0.\n'  # eq
        msg += 'GRID,4,, 1.,   1.,   0.\n'  # eq

        msg += 'GRID,5, , 0.,   1.,   0.\n'
        msg += 'GRID,6, , 0.,   1.01, 0.\n'
        msg += 'CTRIA3,1, 100,1,20,6\n'
        msg += 'CTRIA3,10,100,3,40,5\n'
        msg += 'PSHELL,100,1000,0.1\n'
        msg += 'MAT1,1000,3.0,, 0.3\n'
        msg += 'ENDDATA'
        bdf_filename = 'nonunique.bdf'
        bdf_filename_out = 'unique.bdf'

        bdf_file = codec_open(bdf_filename, 'w')
        bdf_file.write(msg)
        bdf_file.close()

        tol = 0.2
        node_set = [4, 40, 41]
        # Collapse 5/6 and 20/3; Put a 40 and 20 to test non-sequential IDs
        bdf_equivalence_nodes(bdf_filename, bdf_filename_out, tol,
                              renumber_nodes=False, neq_max=4, xref=True,
                              node_set=node_set, crash_on_collapse=False, debug=False)

        model = BDF(debug=False)
        model.read_bdf(bdf_filename_out)
        nids = model.nodes.keys()
        assert len(model.nodes) == 6, 'nnodes=%s nodes=%s' % (len(model.nodes), nids)
        assert 1 in nids, nids
        assert 20 in nids, nids
        assert 3 in nids, nids
        assert 4 in nids, nids
        assert 5 in nids, nids
        assert 6 in nids, nids
        assert 40 not in nids, nids
        assert 41 not in nids, nids
        #print(nids)
        os.remove(bdf_filename)
        os.remove(bdf_filename_out)
Beispiel #37
0
def writePage (path, page):
  (parent, file ) = os.path.split(path) 
  if settings.verbose:
    print ('writing ' + path)
  if (os.path.isfile(path)):
    os.remove(path)  
  ensure_dir(parent)
  with codec_open(path, mode="a", encoding="utf-8") as htmlFile:
    htmlFile.write(page)    
def nastran_to_cart3d_filename(bdf_filename, cart3d_filename, log=None, debug=False):
    """
    Creates a Nastran BDF from a Cart3D file.

    Parameters
    ----------
    bdf_filename : str
        the path to the bdf file
    cart3d_filename : str
        the path to the cart3d output file
    log : log; default=None -> dummyLogger
        a logger object
    debug : bool; default=False
        True/False (used if log is not defined)
    """
    model = BDF(log=log, debug=debug)
    model.read_bdf(bdf_filename)
    nnodes = len(model.nodes)
    nelements = len(model.elements)

    f = codec_open(cart3d_filename, 'w', encoding='utf8')
    f.write('%s %s\n' % (nnodes, nelements))
    node_id_shift = {}
    i = 1
    for node_id, node in sorted(iteritems(model.nodes)):
        node_id_shift[node_id] = i
        x, y, z = node.get_position()
        f.write('%s %s %s\n' % (x, y, z))
        i += 1
    mids = ''
    j = 0
    for element_id, element in sorted(iteritems(model.elements)):
        if element.type in ['CQUADR', 'CQUAD4', 'CONM2']:
            print('element type=%s is not supported' % element.type)
            continue
        assert element.type in ['CTRIA3', 'CTRIAR'], element.type


        out = element.node_ids
        try:
            n1, n2, n3 = out
        except:
            print("type =", element.type)
            raise
        #print out
        n1 = node_id_shift[n1]
        n2 = node_id_shift[n2]
        n3 = node_id_shift[n3]
        mid = element.Mid()
        f.write('%i %i %i\n' % (n1, n2, n3))
        mids += '%i ' % mid
        if j != 0 and j % 20 == 0:
            mids += '\n'
        j += 1
    f.write(mids + '\n')
    f.close()
def nastran_to_cart3d_filename(bdf_filename,
                               cart3d_filename,
                               log=None,
                               debug=False):
    """
    Creates a Nastran BDF from a Cart3D file.

    Parameters
    ----------
    bdf_filename : str
        the path to the bdf file
    cart3d_filename : str
        the path to the cart3d output file
    log : log; default=None -> dummyLogger
        a logger object
    debug : bool; default=False
        True/False (used if log is not defined)
    """
    model = BDF(log=log, debug=debug)
    model.read_bdf(bdf_filename)
    nnodes = len(model.nodes)
    nelements = len(model.elements)

    with codec_open(cart3d_filename, 'w', encoding='utf8') as cart3d:
        cart3d.write('%s %s\n' % (nnodes, nelements))
        node_id_shift = {}
        i = 1
        for node_id, node in sorted(iteritems(model.nodes)):
            node_id_shift[node_id] = i
            x, y, z = node.get_position()
            cart3d.write('%s %s %s\n' % (x, y, z))
            i += 1
        mids = ''
        j = 0
        for element_id, element in sorted(iteritems(model.elements)):
            if element.type in ['CQUADR', 'CQUAD4', 'CONM2']:
                print('element type=%s is not supported' % element.type)
                continue
            assert element.type in ['CTRIA3', 'CTRIAR'], element.type

            out = element.node_ids
            try:
                n1, n2, n3 = out
            except:
                print("type =", element.type)
                raise
            n1 = node_id_shift[n1]
            n2 = node_id_shift[n2]
            n3 = node_id_shift[n3]
            mid = element.Mid()
            cart3d.write('%i %i %i\n' % (n1, n2, n3))
            mids += '%i ' % mid
            if j != 0 and j % 20 == 0:
                mids += '\n'
            j += 1
        cart3d.write(mids + '\n')
    def test_include_03(self):
        if PY2:
            wb = 'wb'
        else:
            wb = 'w'

        with codec_open('a.bdf', 'w') as f:
            f.write("INCLUDE 'executive_control.inc'\n\n")
            f.write('CEND\n')
            f.write("INCLUDE 'case_control.inc'\n\n")
            f.write('BEGIN BULK\n')
            f.write('GRID,1,,1.0\n')
            f.write("INCLUDE 'b.bdf'\n\n")
            f.write('GRID,4,,4.0\n')

        with codec_open('executive_control.inc', 'w') as f:
            f.write('SOL = 103\n')

        with codec_open('case_control.inc', 'w') as f:
            f.write('DISP = ALL\n')

        with codec_open('b.bdf', 'w') as f:
            f.write('GRID,2,,2.0\n')
            f.write("INCLUDE 'c.bdf'\n\n")
            f.write('GRID,5,,5.0\n')

        with codec_open('c.bdf', 'w') as f:
            f.write('GRID,3,,3.0\n\n')

        model = BDF(log=log, debug=False)
        model.read_bdf('a.bdf')
        model.write_bdf('a.out.bdf')

        os.remove('a.bdf')
        os.remove('b.bdf')
        os.remove('c.bdf')
        os.remove('executive_control.inc')
        os.remove('case_control.inc')

        os.remove('a.out.bdf')
        self.assertEqual(len(model.nodes), 5)
        self.assertEqual(model.nnodes, 5, 'nnodes=%s' % model.nnodes)
    def test_case_control_08(self):
        lines_expected = [
            '$pyNastran: version=msc\n',
            '$pyNastran: punch=True\n',
            '$pyNastran: encoding=ascii\n' if PY2 else '$pyNastran: encoding=utf-8\n',
            '$NODES\n',
            'GRID,100000,,43.91715,-29.,.8712984\n',
        ]
        bdf_filename = 'test7.bdf'
        bdf_filename2 = 'test7_bad.bdf'
        with codec_open(bdf_filename, 'w', encoding='ascii') as f:
            for line in lines_expected:
                f.write(line)
        bdf = BDF()
        bdf.read_bdf(bdf_filename)
        bdf.write_bdf(bdf_filename2)

        with codec_open(bdf_filename, 'r', encoding='ascii') as f:
            lines = f.readlines()
            compare_lines(self, lines, lines_expected, has_endline=True)
Beispiel #42
0
def MakeCSV(transcriptions_list):
    
    with codec_open(OUTFILE, 'w', 'utf-8-sig') as outfile:
        outfile.write(','.join(LABELS.split()) + os.linesep)
        while len(transcriptions_list) > 0:
            HIT = random.sample(transcriptions_list, min(5, len(transcriptions_list)) )
            
            hit = [x.comma_delimited for x in HIT]
            outfile.write(",".join(hit) + os.linesep)
            
            
            transcriptions_list = set(transcriptions_list).difference(set(HIT))
Beispiel #43
0
    def UpdateMturkCSV(self, name):
        csv_original = list( csv.reader(codec_open(os.path.join(MTURK_DIR,name + "_results.csv"), 'rb', 'utf-8')) ) 
        
        filtered_dir = os.getcwd() + '/filtered'
        with codec_open(os.path.join(filtered_dir, name + "_results_filtered.csv"), 'w', 'utf-8') as csv_filtered:
            csv_writer = csv.writer(csv_filtered)
            for i in range( len(csv_original) ):
                if i == 0:
#                     AssignmentStatus = csv_original[0].index('AssignmentStatus')
#                     RequesterFeedback = csv_original[0].index('RequesterFeedback')
#                     Reject = csv_original[0].index('Reject')
                    pass
                
                else: 
                    hit = self.HIT_list[i-1]
                    if hit.reject_flag:
                        #csv_original[i][AssignmentStatus] = 'Rejected'
                        #csv_original[i][RequesterFeedback] = hit.reject_reason
                        csv_original[i].append('') 
                        csv_original[i].append( hit.reject_reason )
                csv_writer.writerow(csv_original[i])
    def test_case_control_08(self):
        lines_expected = [
            '$pyNastran: version=msc\n',
            '$pyNastran: punch=True\n',
            '$pyNastran: encoding=ascii\n' if PY2 else '$pyNastran: encoding=utf-8\n',
            '$NODES\n',
            'GRID,100000,,43.91715,-29.,.8712984\n',
        ]
        bdf_filename = 'test7.bdf'
        bdf_filename2 = 'test7_bad.bdf'
        with codec_open(bdf_filename, 'w', encoding='ascii') as f:
            for line in lines_expected:
                f.write(line)
        bdf = BDF(debug=False)
        bdf.read_bdf(bdf_filename)
        bdf.write_bdf(bdf_filename2)

        with codec_open(bdf_filename, 'r', encoding='ascii') as f:
            lines = f.readlines()
            compare_lines(self, lines, lines_expected, has_endline=True)
        os.remove(bdf_filename)
        os.remove(bdf_filename2)
Beispiel #45
0
    def test_include_03(self):
        """tests executive/case control includes"""
        with codec_open('a.bdf', 'w') as bdf_file:
            bdf_file.write("INCLUDE 'executive_control.inc'\n\n")
            bdf_file.write('CEND\n')
            bdf_file.write("INCLUDE 'case_control.inc'\n\n")
            bdf_file.write('BEGIN BULK\n')
            bdf_file.write('GRID,1,,1.0\n')
            bdf_file.write("INCLUDE 'b.bdf'\n\n")
            bdf_file.write('GRID,4,,4.0\n')

        with codec_open('executive_control.inc', 'w') as bdf_file:
            bdf_file.write('SOL = 103\n')

        with codec_open('case_control.inc', 'w') as bdf_file:
            bdf_file.write('DISP = ALL\n')

        with codec_open('b.bdf', 'w') as bdf_file:
            bdf_file.write('GRID,2,,2.0\n')
            bdf_file.write("INCLUDE 'c.bdf'\n\n")
            bdf_file.write('GRID,5,,5.0\n')

        with codec_open('c.bdf', 'w') as bdf_file:
            bdf_file.write('GRID,3,,3.0\n\n')

        model = BDF(log=log, debug=False)
        model.read_bdf('a.bdf')
        model.write_bdf('a.out.bdf')

        os.remove('a.bdf')
        os.remove('b.bdf')
        os.remove('c.bdf')
        os.remove('executive_control.inc')
        os.remove('case_control.inc')

        os.remove('a.out.bdf')
        self.assertEqual(len(model.nodes), 5)
        self.assertEqual(model.nnodes, 5, 'nnodes=%s' % model.nnodes)
Beispiel #46
0
def TextFragment(mturk_csv, experiment):
    #[0] = HITId
    #[15] = WorkerId
    #[23] = WorkTimeInSeconds
    #[27] = Input.chunk_1_id
    #[32] = Input.chunk_2_id
    #[37] = Input.chunk_3_id
    #[42] = Input.chunk_4_id
    #[47] = Input.chunk_5_id
    #[60] = Answer.“chunk_1_polarityâ€
    #[61] = Answer.“chunk_2_polarityâ€
    #[62] = Answer.“chunk_3_polarityâ€
    #[63] = Answer.“chunk_4_polarityâ€
    #[64] = Answer.“chunk_5_polarityâ€
    HIT_list = []

    with codec_open(mturk_csv, 'rb', 'utf-8') as text_fragment_csv:
        f = list(csv.reader(text_fragment_csv))
        for row in f[1:]:
            hit_id = row[0]
            worker_id = row[15]
            work_time = row[23]
            chunk_ids = row[27:52:5]
            answer_polarities = row[54:59]

            #this checks for the two cases where a text transcription
            #fragments 5.1 and 14.1
            #was mistakenly omitted from the experiment. This ensures that
            #HITs with these fragments are not rejected for being incomplete
            if chunk_ids[0] == ('5.1'):
                chunk_ids = chunk_ids[1:]
                answer_polarities = answer_polarities[1:]
            elif chunk_ids[2] == ('14.1'):
                chunk_ids.pop(2)
                answer_polarities.pop(2)

            age, location = row[52].split('|')
            gender = row[53]

            temp = HIT(hit_id, worker_id, work_time, chunk_ids,
                       answer_polarities, age, location, gender)

            for field in [age, location, gender]:
                if 'select one' in field:
                    temp.reject_flag = True
                    temp.reject_reason = 'Pre-survey was left incomplete'

            HIT_list.append(temp)

    return HIT_list
Beispiel #47
0
def load_deflection_csv(out_filename, encoding='latin1'):
    """
    The GUI deflection CSV loading function.

    Considers:
      - extension in determining how to load a file (e.g. commas or not)
      - header line of file for information regarding data types
    """
    ext = os.path.splitext(out_filename)[1].lower()
    if ext not in ['.csv', '.dat', '.txt']:
        raise NotImplementedError('extension=%r is not supported (use .dat, .txt, or .csv)' % ext)

    with codec_open(_filename(out_filename), 'r', encoding=encoding) as file_obj:
        names, fmt_dict, dtype, delimiter = _load_format_header(file_obj, ext, force_float=False)

        try:
            #A = np.loadtxt(file_obj, dtype=dtype, delimiter=delimiter)
            A = loadtxt_nice(file_obj, delimiter=delimiter)
        except:
            traceback.print_exc(file=sys.stdout)
            msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % (
                ext, len(names), delimiter, dtype)
            raise RuntimeError(msg)

    names_without_index = names[1:]
    fmt_dict_without_index = {key:fmt_dict[key] for key in names_without_index}

    nnames_without_index = len(names_without_index)
    nexpected_results = 1 + 3 * nnames_without_index

    try:
        _nrows, ncols = A.shape
    except ValueError:
        msg = ('A should be (nnodes, 1+ndeflection_results); '
               'A.shape=%s nexpected_results=%s names=%s' % (
                   str(A.shape), nexpected_results, names))
        raise ValueError(msg)

    if ncols != nexpected_results:
        msg = 'A.shape=%s ncols=%s nexpected_results=%s names=%s nnames_without_index=%s' % (
            str(A.shape), ncols, nexpected_results, names, nnames_without_index)
        raise ValueError(msg)

    B = {}
    for i, name in enumerate(names_without_index):
        B[name] = A[:, 3*i:3*i+3]

    assert len(B) == len(fmt_dict_without_index), 'B.keys()=%s fmt_dict.keys()=%s' % (list(B.keys()), list(fmt_dict_without_index.keys()))
    assert len(B) == len(names_without_index), 'B.keys()=%s names.keys()=%s' % (list(B.keys()), names_without_index)
    return B, fmt_dict_without_index, names_without_index
Beispiel #48
0
    def test_include_05(self):
        with codec_open('include5.bdf', 'w') as bdf_file:
            bdf_file.write('$ pyNastran: punch=True\n')
            bdf_file.write('$ pyNastran: dumplines=True\n')
            bdf_file.write("INCLUDE 'include5b.inc'\n\n")

        with codec_open('include5b.inc', 'w') as bdf_file:
            bdf_file.write('ECHOON\n')
            bdf_file.write('$ GRID comment\n')
            bdf_file.write('GRID,2,,2.0\n')
            bdf_file.write('ECHOOFF\n')
            bdf_file.write('GRID,3,,3.0\n')
            bdf_file.write('grid,4,,4.0\n')
            bdf_file.write('grid ,5,,5.0\n')

        model = BDF(log=log, debug=False)
        model.read_bdf('include5.bdf')
        assert model.echo is False, model.echo
        #model.write_bdf('include5.out.bdf')

        # os.remove('c.bdf')
        # os.remove('executive_control.inc')
        # os.remove('case_control.inc')

        self.assertEqual(len(model.nodes), 4)
        self.assertEqual(model.nnodes, 4, 'nnodes=%s' % model.nnodes)

        model2 = read_bdf(bdf_filename='include5.bdf',
                          xref=True,
                          punch=False,
                          log=log,
                          encoding=None)
        self.assertEqual(len(model2.nodes), 4)
        self.assertEqual(model2.nnodes, 4, 'nnodes=%s' % model.nnodes)
        os.remove('include5.bdf')
        #os.remove('include5.out.bdf')
        os.remove('include5b.inc')
Beispiel #49
0
def TextFragment(mturk_csv, experiment):
    #[0] = HITId
    #[15] = WorkerId
    #[23] = WorkTimeInSeconds
    #[27] = Input.chunk_1_id
    #[32] = Input.chunk_2_id
    #[37] = Input.chunk_3_id
    #[42] = Input.chunk_4_id
    #[47] = Input.chunk_5_id
    #[60] = Answer.“chunk_1_polarityâ€
    #[61] = Answer.“chunk_2_polarityâ€
    #[62] = Answer.“chunk_3_polarityâ€
    #[63] = Answer.“chunk_4_polarityâ€
    #[64] = Answer.“chunk_5_polarityâ€
    HIT_list = []
    
    with codec_open(mturk_csv, 'rb', 'utf-8') as text_fragment_csv:
        f = list( csv.reader(text_fragment_csv) )
        for row in f[1:]:
            hit_id = row[0]
            worker_id = row[15]
            work_time = row[23]
            chunk_ids = row[27:52:5]
            answer_polarities = row[54:59]
            
            #this checks for the two cases where a text transcription 
            #fragments 5.1 and 14.1
            #was mistakenly omitted from the experiment. This ensures that
            #HITs with these fragments are not rejected for being incomplete
            if chunk_ids[0] == ('5.1'):
                chunk_ids = chunk_ids[1:]
                answer_polarities = answer_polarities[1:]
            elif chunk_ids[2] == ('14.1'):
                chunk_ids.pop(2)
                answer_polarities.pop(2)
                
            age,location = row[52].split('|')
            gender = row[53]

            temp = HIT(hit_id, worker_id, work_time, chunk_ids, answer_polarities, age, location, gender)
            
            for field in [age,location,gender]:
                if 'select one' in field:
                    temp.reject_flag = True
                    temp.reject_reason = 'Pre-survey was left incomplete'
            
            HIT_list.append( temp )
    
    return HIT_list
Beispiel #50
0
 def test_encoding_write(self):
     mesh = BDF(log=log, debug=False)
     mesh.add_card(['GRID', 100000, 0, 43.91715, -29., .8712984], 'GRID')
     mesh.write_bdf('out.bdf')
     lines_expected = [
         '$pyNastran: version=msc',
         '$pyNastran: punch=True',
         '$pyNastran: encoding=ascii' if PY2 else '$pyNastran: encoding=utf-8\n',
         '$pyNastran: nnodes=1',
         '$pyNastran: nelements=0',
         '$NODES',
         'GRID      100000        43.91715    -29..8712984',
     ]
     bdf_filename = 'out.bdf'
     with codec_open(bdf_filename, 'r', encoding='ascii') as bdf_file:
         lines = bdf_file.readlines()
         compare_lines(self, lines, lines_expected, has_endline=False)
Beispiel #51
0
def AVFragment(mturk_csv, experiment):
    #[0] = HITId
    #[15] = WorkerId
    #[23] = WorkTimeInSeconds
    #[27] = Input.chunk_1_id
    #[32] = Input.chunk_2_id
    #[37] = Input.chunk_3_id
    #[42] = Input.chunk_4_id
    #[47] = Input.chunk_5_id
    #[55] = Answer.chunk_1_transcription
    #[56] = Answer.chunk_2_transcription
    #[57] = Answer.chunk_3_transcription
    #[58] = Answer.chunk_4_transcription
    #[59] = Answer.chunk_5_transcription
    #[60] = Answer.“chunk_1_polarityâ€
    #[61] = Answer.“chunk_2_polarityâ€
    #[62] = Answer.“chunk_3_polarityâ€
    #[63] = Answer.“chunk_4_polarityâ€
    #[64] = Answer.“chunk_5_polarityâ€
    HIT_list = []

    with codec_open(mturk_csv, 'rb', 'utf-8') as audio_fragment_csv:
        f = list(csv.reader(audio_fragment_csv))
        for row in f[1:]:
            hit_id = row[0]
            worker_id = row[15]
            work_time = row[23]
            chunk_ids = row[27:52:5]
            chunk_transcriptions = row[55:60]
            answer_polarities = row[60:65]
            age = row[52]
            location = row[53]
            gender = row[54]

            temp = HIT(hit_id, worker_id, work_time, chunk_ids,
                       answer_polarities, age, location, gender)
            temp.transcriptions = set(chunk_transcriptions)

            for field in [age, location, gender]:
                if 'select one' in field:
                    temp.reject_flag = True
                    temp.reject_reason = 'Pre-survey was left incomplete'

            HIT_list.append(temp)

    return HIT_list
Beispiel #52
0
def AVFragment(mturk_csv, experiment):
    #[0] = HITId
    #[15] = WorkerId
    #[23] = WorkTimeInSeconds
    #[27] = Input.chunk_1_id
    #[32] = Input.chunk_2_id
    #[37] = Input.chunk_3_id
    #[42] = Input.chunk_4_id
    #[47] = Input.chunk_5_id
    #[55] = Answer.chunk_1_transcription
    #[56] = Answer.chunk_2_transcription
    #[57] = Answer.chunk_3_transcription
    #[58] = Answer.chunk_4_transcription
    #[59] = Answer.chunk_5_transcription
    #[60] = Answer.“chunk_1_polarityâ€
    #[61] = Answer.“chunk_2_polarityâ€
    #[62] = Answer.“chunk_3_polarityâ€
    #[63] = Answer.“chunk_4_polarityâ€
    #[64] = Answer.“chunk_5_polarityâ€
    HIT_list = []
    
    with codec_open(mturk_csv, 'rb', 'utf-8') as audio_fragment_csv:
        f = list( csv.reader(audio_fragment_csv) )
        for row in f[1:]:
            hit_id = row[0]
            worker_id = row[15]
            work_time = row[23]
            chunk_ids = row[27:52:5]
            chunk_transcriptions = row[55:60]
            answer_polarities = row[60:65]
            age = row[52]
            location = row[53]
            gender = row[54]
            
            temp = HIT(hit_id, worker_id, work_time, chunk_ids, answer_polarities, age, location, gender)
            temp.transcriptions = set(chunk_transcriptions)
            
            for field in [age,location,gender]:
                if 'select one' in field:
                    temp.reject_flag = True
                    temp.reject_reason = 'Pre-survey was left incomplete'
                    
            HIT_list.append( temp )
    
    return HIT_list
Beispiel #53
0
    def test_quad_180_01(self):
        r"""
        Identify a 180+ degree quad

        y
        ^         4
        |       / |
        |     /   |
        |   /     |
        | /       |
        /         |
        1------2  |----> x
                \ |
                 \|
                  3
        """
        msg = ('CEND\n'
               'BEGIN BULK\n'
               'GRID,1,,0.,0.,0.\n'
               'GRID,2,,1.,0.,0.\n'
               'GRID,3,,2.,-1.,0.\n'
               'GRID,4,,2., 1.,0.\n'
               'CQUAD4,100,1, 1,2,3,4\n'
               'PSHELL,1,1,0.1\n'
               'MAT1,1,3.0,, 0.3\n'
               'ENDDATA')
        bdf_filename = 'cquad4.bdf'
        with codec_open(bdf_filename, 'w') as bdf_file:
            bdf_file.write(msg)

        model = read_bdf(bdf_filename, log=log, xref=True)
        xyz_cid0 = model.get_xyz_in_coord(cid=0, fdtype='float32')
        nid_map = {}
        for i, (nid, node) in enumerate(sorted(iteritems(model.nodes))):
            #xyz = node.get_position()
            #xyz_cid0[i, :] = xyz
            nid_map[nid] = i
        eids_to_delete = get_bad_shells(model,
                                        xyz_cid0,
                                        nid_map,
                                        max_theta=180.,
                                        max_skew=1000.,
                                        max_aspect_ratio=1000.)
        assert eids_to_delete == [100], eids_to_delete
        os.remove(bdf_filename)
Beispiel #54
0
 def test_encoding_write(self):
     mesh = BDF(log=log, debug=False)
     mesh.add_card(['GRID', 100000, 0, 43.91715, -29., .8712984], 'GRID')
     mesh.write_bdf('out.bdf')
     lines_expected = [
         '$pyNastran: version=msc',
         '$pyNastran: punch=True',
         '$pyNastran: encoding=ascii'
         if PY2 else '$pyNastran: encoding=utf-8\n',
         '$pyNastran: nnodes=1',
         '$pyNastran: nelements=0',
         '$NODES',
         'GRID      100000        43.91715    -29..8712984',
     ]
     bdf_filename = 'out.bdf'
     with codec_open(bdf_filename, 'r', encoding='ascii') as bdf_file:
         lines = bdf_file.readlines()
         compare_lines(self, lines, lines_expected, has_endline=False)
Beispiel #55
0
 def test_include_stop(self):
     with codec_open('a.bdf', 'w') as bdf_file:
         bdf_file.write('CEND\n')
         bdf_file.write('BEGIN BULK\n')
         bdf_file.write("INCLUDE 'b.bdf'\n\n")
         bdf_file.write('GRID,1,,1.0\n')
     model = BDF(log=log, debug=False)
     with self.assertRaises(IOError):
         model.read_bdf(bdf_filename='a.bdf', xref=True, punch=False,
                        read_includes=True, encoding=None)
     with self.assertRaises(IOError):
         read_bdf(bdf_filename='a.bdf', xref=True, punch=False,
                  encoding=None, log=log)
     model.read_bdf(bdf_filename='a.bdf', xref=True, punch=False,
                    read_includes=False, encoding=None)
     model.write_bdf('out.bdf')
     os.remove('a.bdf')
     os.remove('out.bdf')
Beispiel #56
0
 def test_read_bad_02(self):
     """tests when users don't add punch=True to read_bdf(...)"""
     lines = [
         'GRID     1000177       0      1.      0.      0.       0\n',
         'GRID     1000178       0      0.      1.      0.       0\n',
         'GRID     1000186       0      0.      0.      1.       0\n',
         'GRID     1000187       0      1.      1.      1.       0\n',
         'GRID    15000014       0      2.      1.      1.       0\n',
         'RBE2    1500002215000014  123456 1000177 1000178 1000186 1000187\n',
     ]
     bdf_filename = 'xref_test.bdf'
     with codec_open(bdf_filename, 'w') as bdf_file:
         bdf_file.writelines(lines)
     with self.assertRaises(RuntimeError):
         read_bdf(bdf_filename, validate=False, xref=False,
                  punch=False, encoding=None,
                  log=log, debug=True, mode='msc')
     os.remove(bdf_filename)
Beispiel #57
0
    def test_quad_180_01(self):
        r"""
        Identify a 180+ degree quad

        y
        ^         4
        |       / |
        |     /   |
        |   /     |
        | /       |
        /         |
        1------2  |----> x
                \ |
                 \|
                  3
        """
        msg = (
            'CEND\n'
            'BEGIN BULK\n'
            'GRID,1,,0.,0.,0.\n'
            'GRID,2,,1.,0.,0.\n'
            'GRID,3,,2.,-1.,0.\n'
            'GRID,4,,2., 1.,0.\n'

            'CQUAD4,100,1, 1,2,3,4\n'
            'PSHELL,1,1,0.1\n'
            'MAT1,1,3.0,, 0.3\n'
            'ENDDATA'
        )
        bdf_filename = 'cquad4.bdf'
        with codec_open(bdf_filename, 'w') as bdf_file:
            bdf_file.write(msg)

        model = read_bdf(bdf_filename, xref=True)
        xyz_cid0 = model.get_xyz_in_coord(cid=0, dtype='float32')
        nid_map = {}
        for i, (nid, node) in enumerate(sorted(iteritems(model.nodes))):
            #xyz = node.get_position()
            #xyz_cid0[i, :] = xyz
            nid_map[nid] = i
        eids_to_delete = get_bad_shells(model, xyz_cid0, nid_map, max_theta=180.,
                                        max_skew=1000., max_aspect_ratio=1000.)
        assert eids_to_delete == [100], eids_to_delete
        os.remove(bdf_filename)
Beispiel #58
0
    def _dump_file(self, bdf_dump_filename, lines, i):
        # type: (str, List[str], int) -> None
        """
        Writes a BDF up to some failed line index

        Parameters
        ----------
        bdf_dump_filename : str
            the bdf filename to dump
        lines : List[str]
            the entire list of lines
        i : int
            the last index to write
        """
        with codec_open(_filename(bdf_dump_filename),
                        'w',
                        encoding=self.encoding) as crash_file:
            for line in lines[:i]:
                crash_file.write(line)
Beispiel #59
0
def load_deflection_csv(out_filename, encoding='latin1'):
    """
    The GUI deflection CSV loading function.

    Considers:
      - extension in determining how to load a file (e.g. commas or not)
      - header line of file for information regarding data types
    """
    ext = os.path.splitext(out_filename)[1].lower()
    if ext not in ['.csv', '.dat', '.txt']:
        raise NotImplementedError('extension=%r is not supported (use .dat, .txt, or .csv)' % ext)

    with codec_open(_filename(out_filename), 'r', encoding=encoding) as file_obj:
        names, fmt_dict, dtype, delimiter = _load_format_header(file_obj, ext, force_float=False)
        nnames = len(names)

        try:
            #A = np.loadtxt(file_obj, dtype=dtype, delimiter=delimiter)
            A = loadtxt_nice(file_obj, delimiter=delimiter)
        except:
            traceback.print_exc(file=sys.stdout)
            msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % (
                ext, len(names), delimiter, dtype)
            raise RuntimeError(msg)

        try:
            nrows, ncols = A.shape
        except ValueError:
            msg = 'A should be (nnodes, 3); A.shape=%s nnames*3=%s names=%s' % (
                str(A.shape), nnames*3, names)
            raise ValueError(msg)

        if ncols != (nnames * 3):
            msg = 'A.shape=%s ncols=%s nnames*3=%s names=%s' % (
                str(A.shape), ncols, nnames*3, names)
            raise RuntimeError(msg)
    B = {}
    for i, name in enumerate(names):
        B[name] = A[:, 3*i:3*i+3]
    return B, fmt_dict, names