Exemple #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--rm', action='store_true')
    parser.add_argument('--out-dir')
    args = parser.parse_args()

    # Build editor schema.
    the_editor = aprinter_config_editor.editor()
    editor_schema = the_editor._json_schema()

    # Determine directories.
    src_dir = file_utils.file_dir(__file__)
    libs_dir = os.path.join(src_dir, 'libs')
    jsoneditor_dir = os.path.join(src_dir, '..', 'json-editor')
    if args.out_dir is not None:
        dist_dir = args.out_dir
    else:
        dist_dir = os.path.join(src_dir, 'dist')

    # Remove dist dir.
    if args.rm and os.path.isdir(dist_dir):
        shutil.rmtree(dist_dir)

    # Create dist dir.
    os.mkdir(dist_dir)

    # Copy JS libraries.
    shutil.copyfile(os.path.join(jsoneditor_dir, 'dist', 'jsoneditor.min.js'),
                    os.path.join(dist_dir, 'jsoneditor.js'))
    shutil.copyfile(os.path.join(libs_dir, 'FileSaver.min.js'),
                    os.path.join(dist_dir, 'FileSaver.js'))
    shutil.copyfile(os.path.join(libs_dir, 'jquery-1.11.2.min.js'),
                    os.path.join(dist_dir, 'jquery.js'))

    # Copy Bootstrap.
    subprocess.call([
        'unzip', '-q',
        os.path.join(libs_dir, 'bootstrap-3.3.2-dist.zip'), '-d', dist_dir
    ])
    os.rename(os.path.join(dist_dir, 'bootstrap-3.3.2-dist'),
              os.path.join(dist_dir, 'bootstrap'))

    # Copy files.
    for filename in ['index.html', 'Ajax-loader.gif']:
        shutil.copyfile(os.path.join(src_dir, filename),
                        os.path.join(dist_dir, filename))

    # Read default configuration.
    default_config = json.loads(
        file_utils.read_file(os.path.join(src_dir, 'default_config.json')))

    # Build and write init.js.
    init_js_template = file_utils.read_file(os.path.join(src_dir, 'init.js'))
    init_js = rich_template.RichTemplate(init_js_template).substitute({
        'SCHEMA':
        json.dumps(editor_schema, separators=(',', ':'), sort_keys=True),
        'DEFAULT':
        json.dumps(default_config, separators=(',', ':'), sort_keys=True)
    })
    file_utils.write_file(os.path.join(dist_dir, 'init.js'), init_js)
Exemple #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--rm', action='store_true')
    parser.add_argument('--out-dir')
    args = parser.parse_args()
    
    # Build editor schema.
    the_editor = aprinter_config_editor.editor()
    editor_schema = the_editor.json_schema()
    
    # Determine directories.
    src_dir = file_utils.file_dir(__file__)
    libs_dir = os.path.join(src_dir, 'libs')
    jsoneditor_dir = os.path.join(src_dir, '..', 'json-editor')
    if args.out_dir is not None:
        dist_dir = args.out_dir
    else:
        dist_dir = os.path.join(src_dir, 'dist')
    
    # Remove dist dir.
    if args.rm and os.path.isdir(dist_dir):
        shutil.rmtree(dist_dir)
    
    # The temp dir is where we will prepare contents before hashification.
    temp_dir = os.path.join(dist_dir, 'TEMP')

    # Create directories.
    os.mkdir(dist_dir)
    os.mkdir(temp_dir)
    
    # Copy JS libraries.
    shutil.copyfile(os.path.join(jsoneditor_dir, 'dist', 'jsoneditor.min.js'), os.path.join(temp_dir, 'jsoneditor.js'))
    shutil.copyfile(os.path.join(libs_dir, 'FileSaver.min.js'), os.path.join(temp_dir, 'FileSaver.js'))
    shutil.copyfile(os.path.join(libs_dir, 'jquery-1.11.2.min.js'), os.path.join(temp_dir, 'jquery.js'))
    
    # Copy Bootstrap.
    subprocess.call(['unzip', '-q', os.path.join(libs_dir, 'bootstrap-3.3.2-dist.zip'), '-d', temp_dir])
    os.rename(os.path.join(temp_dir, 'bootstrap-3.3.2-dist'), os.path.join(temp_dir, 'bootstrap'))

    # Copy files.
    for filename in ['index.html', 'Ajax-loader.gif']:
        shutil.copyfile(os.path.join(src_dir, filename), os.path.join(temp_dir, filename))
    
    # Read default configuration.
    default_config = json.loads(file_utils.read_file(os.path.join(src_dir, 'default_config.json')))
    
    # Build and write init.js.
    init_js_template = file_utils.read_file(os.path.join(src_dir, 'init.js'))
    init_js = rich_template.RichTemplate(init_js_template).substitute({
        'SCHEMA': json.dumps(editor_schema, separators=(',',':'), sort_keys=True),
        'DEFAULT': json.dumps(default_config, separators=(',',':'), sort_keys=True)
    })
    file_utils.write_file(os.path.join(temp_dir, 'init.js'), init_js)

    # Run hashify to produce the final contents.
    resource_hashifier.hashify(temp_dir, _HASHIFY_CONFIG, dist_dir)

    # Remove the temp dir.
    shutil.rmtree(temp_dir)
def assert_json_files_equal(unit_test, path1, path2):
    """
    Asserts that two files containing json are equal
    :param unit_test:
    :param path1:
    :param path2:
    :return:
    """
    obj1 = json.loads(read_file(path1))
    obj2 = json.loads(read_file(path2))
    assert_object_equals(unit_test, obj1, obj2)
Exemple #4
0
def index_obs(lid, rid, format, temp_dir=None, downloader=None):
    """
    Generates a JSON index of an OBS RC.
    The resulting content can be written to a file and uploaded for use in the uW 2.0 and tS 2.0 APIs
    This should contain a single file per chapter.
    :param lid:
    :param rid:
    :param format:
    :param temp_dir: The temporary directory where files will be generated
    :param downloader: This is exposed to allow mocking the downloader
    :return: the obs json blob
    """
    obs_sources = {}
    format_str = format['format']
    if rid == 'obs' and 'type=book' in format_str:
        rc_dir = download_rc(lid, rid, format['url'], temp_dir, downloader)
        if not rc_dir: return obs_sources

        manifest = yaml.load(read_file(os.path.join(rc_dir, 'manifest.yaml')))
        dc = manifest['dublin_core']

        for project in manifest['projects']:
            pid = project['identifier']
            content_dir = os.path.join(rc_dir, project['path'])
            key = '$'.join([pid, lid, rid])
            chapters_json = _obs_chapters_to_json(
                os.path.normpath(content_dir))

            # app words
            app_words = {}
            app_words_file = os.path.join(rc_dir, '.apps', 'uw',
                                          'app_words.json')
            if os.path.exists(app_words_file):
                try:
                    app_words = json.loads(read_file(app_words_file))
                except Exception as e:
                    print('ERROR: failed to load app words: {}'.format(e))

            remove_tree(rc_dir, True)

            # TRICKY: OBS has a single project so we don't need to continue looping
            return {
                'app_words': app_words,
                'chapters': chapters_json,
                'date_modified': dc['modified'].replace('-', '').split('T')[0],
                'direction': dc['language']['direction'],
                'language': dc['language']['identifier']
            }
Exemple #5
0
    def gal_model_calcs(self):
        '''
        Read in galaxy SEDs, regrid them, calculate the model flux in each
        filter for each redshift.
        '''
        print('Using %ld gal templates over %ld redshifts'
              % (self._n_gal_template, self.nz))

        self.zstep = (self.zmax - self.zmin) / (self.nz - 1)

        model_flux_gals = np.zeros(
            (self.n_filter, self._n_gal_template * self.nz))

        for i in xrange(self._n_gal_template):

            sed_length = get_file_length(i, self.gals_sed_input)

            p_lam, p_val = read_file(i, self.gals_sed_input)

            for j in xrange(self.nz):
                z = self.zmin + j * self.zstep
                for k in xrange(self.n_filter):
                    p_val_fine = regrid_sed(
                        z, p_lam, p_val, self.filter_lgth_fine[k],
                        sed_length, self.filter_lamb_fine[k])
                    mod_val = integrate_sed(
                        self.filter_lgth_fine[k], p_val_fine,
                        self.filter_lamb_fine[k], self.filter_thru_fine[k])

                    model_flux_gals[k][j + i * self.nz] = mod_val

        self.model_flux_gals = model_flux_gals
def interp_training_vec(train, val1, val2, n):
    alpha_range = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]

    # arrays of arrays of ngram indices up to n
    train_ngram_configs = [
        data_processing.doc_to_ngram_indices(train, i + 1, char_to_index)
        for i in range(n)
    ]
    val1_ngram_configs = [
        data_processing.doc_to_ngram_indices(val1, i + 1, char_to_index)
        for i in range(n)
    ]

    # this is an array of ngram indices
    val2_ngram_idxs = data_processing.doc_to_ngram_indices(
        val2, n, char_to_index)

    probs, lambdas, alpha = lang_model.train_interp(train_ngram_configs,
                                                    val1_ngram_configs,
                                                    val2_ngram_idxs,
                                                    alpha_range, n)

    test = file_utils.read_file('data/test', n)
    test_ngram_idxs = data_processing.doc_to_ngram_indices(
        test, n, char_to_index)

    test_perp = lang_model.perplexity(test_ngram_idxs, probs)
    print('******** RESULT ********')
    print(f'Lambdas:          {lambdas}')
    print(f'Alphas:           {alpha}')
    print(f'Test perplexity:  {test_perp}')
    print('************************')

    return probs
Exemple #7
0
    def star_model_calcs(self):
        '''
        Read in star SEDs, regrid them, calculate the model flux in each filter
        for each redshift.
        '''
        print('Using %ld star templates' % self._n_star_template)

        model_flux_stars = np.zeros((self.n_filter, self._n_star_template))

        for i in xrange(self._n_star_template):
            sed_length = get_file_length(i, self.stars_sed_input)

            p_lam, p_val = read_file(i, self.stars_sed_input)

            for k in xrange(self.n_filter):
                p_val_fine = regrid_sed(
                    0.0, p_lam, p_val, self.filter_lgth_fine[k],
                    sed_length, self.filter_lamb_fine[k])

                mod_val = integrate_sed(
                    self.filter_lgth_fine[k], p_val_fine,
                    self.filter_lamb_fine[k], self.filter_thru_fine[k])

                model_flux_stars[k][i] = mod_val

        self.model_flux_stars = model_flux_stars
def create_doctors(intent):
    content = read_file('doctors.txt')
    doctors = content[intent]
    elements = []
    for doctor in doctors:
        values = doctor.split("|")
        elements.append({
            "title":
            values[0],
            "image_url":
            DOCTOR_IMG,
            "subtitle":
            values[2],
            "buttons": [{
                "type": "web_url",
                "url": "https://petersfancybrownhats.com",
                "title": "Revisar Curriculum"
            }, {
                "type": "postback",
                "payload": values[0] + '_' + values[1],
                "title": "Agendar"
            }]
        })
    return elements


#endregion
Exemple #9
0
def _obs_chapters_to_json(dir):
    """
    Converts obs chapter markdown into json
    :param dir: the obs book content directory
    :param date_modified:
    :return:
    """
    obs_title_re = re.compile('^\s*#+\s*(.*)', re.UNICODE)
    obs_footer_re = re.compile('\_+([^\_]*)\_+$', re.UNICODE)
    obs_image_re = re.compile('.*!\[[^\]]*\]\(.*\).*', re.IGNORECASE | re.UNICODE)
    chapters = []
    for chapter_file in os.listdir(dir):
        if chapter_file == 'config.yaml' or chapter_file == 'toc.yaml':
            continue
        chapter_slug = chapter_file.split('.md')[0]
        path = os.path.join(dir, chapter_file)
        if os.path.isfile(path):
            chapter_file = os.path.join(dir, path)
            chapter_str = read_file(chapter_file).strip()

            title_match = obs_title_re.match(chapter_str)
            if title_match:
                title = title_match.group(1)
            else:
                print('ERROR: missing title in {}'.format(chapter_file))
                continue
            chapter_str = obs_title_re.sub('', chapter_str).strip()
            lines = chapter_str.split('\n')
            reference_match = obs_footer_re.match(lines[-1])
            if reference_match:
                reference = reference_match.group(1)
            else:
                print('ERROR: missing reference in {}'.format(chapter_file))
                continue
            chapter_str = '\n'.join(lines[0:-1]).strip()
            chunks = obs_image_re.split(chapter_str)

            frames = []
            chunk_index = 0
            for chunk in chunks:
                chunk = chunk.strip()
                if not chunk:
                    continue
                chunk_index += 1
                id = '{}-{}'.format(chapter_slug, '{}'.format(chunk_index).zfill(2))
                frames.append({
                    'id': id,
                    'img': 'https://cdn.door43.org/obs/jpg/360px/obs-en-{}.jpg'.format(id),
                    'text': chunk
                })
            frames.sort(key=__extract_frame_id, reverse=False)
            chapters.append({
                'frames': frames,
                'number': chapter_slug,
                'ref': reference,
                'title': title
            })

    chapters.sort(key=__extract_chapter_number, reverse=False)
    return chapters
def assert_object_equals_file(unit_test, obj, path):
    """
    Asserts that an object equals the contents of a json file
    :param unit_test:
    :param obj:
    :param path:
    :return:
    """
    expected_obj = json.loads(read_file(path))
    assert_object_equals(unit_test, expected_obj, obj)
Exemple #11
0
def login():
    if file_utils.read_file("teacher_data.json"):
        teacher_name = input("请输入你的帐号姓名:")
        teacher_pwd = input("请输入你的帐号密码:")
        teacher_dict = file_utils.read_json("teacher_data.json", {})
        if teacher_dict.get(teacher_name) == teacher_pwd:
            student_utils.manager_stu()
        else:
            print("账户名字或者密码不正确!")
    else:
        print("当前teacher账户为空,请先注册")
Exemple #12
0
def _compare_files(expected, generated, sort=False):
    """Compares file contents of generated file with expected file."""
    def _sort_lines(data):
        """Sort all lines of data using newline as delimiter."""
        return '\n'.join(sorted(data.split('\n')))

    expected_data = fu.read_file(expected)
    generated_data = _normalize(fu.read_file(generated))
    if sort:
        expected_data = _sort_lines(expected_data)
        generated_data = _sort_lines(_normalize(fu.read_file(generated)))

    matches = _sort_lines(expected_data) == _sort_lines(generated_data)
    if not matches and UPDATE_TEST_DATA:
        fu.write_file(expected, generated_data)
    if not matches and not UPDATE_TEST_DATA:
        print 'Files {} and {} differ!'.format(expected, generated)
        print '{}\nExpected output: {}'.format('~' * 80, expected_data)
        print '{}\nGenerated output: {}'.format('~' * 80, generated_data)
    return matches
Exemple #13
0
def main():
    while True:
        print(file_utils.read_file("teacher_ui.txt"))
        num = input("请选择(1-3):")
        if num == "1":
            teacher_utils.login()
        elif num == "2":
            teacher_utils.register()
        elif num == "3":
            teacher_utils.exit()
        else:
            print("输入错误")
def assert_s3_equals_api_text(unit_test, mock_s3, mock_api, key):
    """
    Asserts that the s3 file identified by the given key matches
    the equivalent file in the api
    :param unit_test: an instance of UnitTest
    :param mock_s3:
    :param mock_api:
    :param key: the relative path to the key
    :return:
    """
    unit_test.assertIn(key, mock_s3._recent_uploads)
    s3_text = read_file(mock_s3._recent_uploads[key])
    api_text = mock_api.get_url(key)
    unit_test.assertEquals(s3_text, api_text)
Exemple #15
0
    def test_serialize(self):
        # Arrange
        test_output_file_name = './test_serialize_pom.xml'
        expected = self.test_xmlp.tostring()

        # Act
        self.test_xmlp.serialize(test_output_file_name)
        actual = xml_utils.XML_Parser(file_utils.read_file(test_output_file_name)).tostring()

        # Assert
        self.assertEqual(expected, actual)

        # Cleanup
        os.remove(test_output_file_name)
Exemple #16
0
def show_references(name):
    from file_utils import read_file

    X = df[df.name == name].itertuples()
    while True:
        try:
            resp = next(X)[1:]
            line_no, col_no, _, _, file_path = resp
            data = read_file(file_path)
            readlines = data.splitlines()
            for ix, xs in enumerate(readlines):
                if ix == line_no - 1:
                    print(xs.strip(), "\n\n", "*" * 50, "\n")
        except StopIteration:
            break
def assert_s3_equals_api_json(unit_test, mock_s3, mock_api, key):
    """
    Asserts that the s3 file identified by the given key matches
    the equivalent file in the api
    :param unit_test: an instance of UnitTest
    :param mock_s3:
    :param mock_api:
    :param key: the relative path to the key
    :return:
    """
    unit_test.assertIn(key, mock_s3._recent_uploads)
    s3_str = read_file(mock_s3._recent_uploads[key])
    api_str = mock_api.get_url(key)
    s3_obj = json.loads(s3_str)
    api_obj = json.loads(api_str)
    assert_object_equals(unit_test, s3_obj, api_obj)
Exemple #18
0
def search_stu(data: list):
    while True:
        print(file_utils.read_file("student_search_ui.txt"))
        num = input("请选择(1-5):")
        if num == "1":
            search_byname(data)
        elif num == "2":
            search_bysex(data)
        elif num == "3":
            search_byage(data)
        elif num == "4":
            search_all(data)
        elif num == "5":
            break
        else:
            print("输入错误")
Exemple #19
0
def manager_stu():
    while True:
        print(file_utils.read_file("student_ui.txt"))
        num = input("请选择(1-5):")
        student_list = file_utils.read_json("student_data.json", [])
        if num == "1":
            add_stu(student_list, "student_data.json")
        elif num == "2":
            search_stu(student_list)
        elif num == "3":
            mod_stu(student_list)
        elif num == "4":
            del_stu(student_list)
        elif num == "5":
            break
        else:
            print("输入错误")
Exemple #20
0
def validate_binary_output(binary_path, grep_items):
    """Check that given binary has all the list of patterns."""
    LOGGER.debug('Checking output of %s binary.', binary_path)
    try:
        output_file = fu.get_temp_file()
        with open(output_file, 'w') as stdout_obj:
            subprocess.check_call(binary_path, stdout=stdout_obj)
        stdout_lines = fu.read_file(output_file).split('\n')
        for grep_item in grep_items:
            matched_lines = [line for line in stdout_lines if grep_item in line]
            if not matched_lines:
                LOGGER.error('Expected pattern: %s', grep_item)
                LOGGER.error('   Actual output: %s', '\n'.join(stdout_lines))
                msg = 'Binary output validation failed for %s!!' % binary_path
                raise Error(msg)
    finally:
        os.remove(output_file)
def _read_files_and_collect_refs(root_dir_path, file_paths, refmatcher_index):
    file_entries = {}

    for file in file_paths:
        file_content = file_utils.read_file(os.path.join(root_dir_path, file))
        
        refmatcher = refmatcher_index.get_matcher_for_file(file)
        if refmatcher is not None:
            file_refs = [_normalize_ref(file, ref_val)
                for ref_val in refmatcher.find_refs(file_content)]
        else:
            file_refs = []
        
        file_entries[file] = {
            'content': file_content,
            'refmatcher': refmatcher,
            'refs': file_refs
        }
    
    return file_entries
def _read_files_and_collect_refs(root_dir_path, file_paths, refmatcher_index):
    file_entries = {}

    for file in file_paths:
        file_content = file_utils.read_file(os.path.join(root_dir_path, file))

        refmatcher = refmatcher_index.get_matcher_for_file(file)
        if refmatcher is not None:
            file_refs = [
                _normalize_ref(file, ref_val)
                for ref_val in refmatcher.find_refs(file_content)
            ]
        else:
            file_refs = []

        file_entries[file] = {
            'content': file_content,
            'refmatcher': refmatcher,
            'refs': file_refs
        }

    return file_entries
Exemple #23
0
def _obs_chapters_to_json(dir):
    """
    Converts obs chapter markdown into json
    :param dir: the obs book content directory
    :return:
    """
    chapters = []
    if os.path.isdir(dir):
        for chapter_file in os.listdir(dir):
            if chapter_file == 'config.yaml' or chapter_file == 'toc.yaml':
                continue
            chapter_slug = chapter_file.split('.md')[0]
            path = os.path.join(dir, chapter_file)
            if os.path.isfile(path):
                chapter_file = os.path.join(dir, path)
                chapter_str = read_file(chapter_file).strip()

                chapter_json = _convert_obs_chapter_to_json(chapter_str, chapter_slug, chapter_file)
                chapters.append(chapter_json)

    chapters.sort(key=__extract_chapter_number, reverse=False)
    return chapters
Exemple #24
0
def check_submitq_test(expected_tests, changed_files):
    """Check if submit queue outputs expected tests for given changed files."""
    LOGGER.debug('Running SUBMITQ tests on: [%s].', ', '.join(changed_files))
    try:
        # In debug mode, the tests are only listed. Otherwise the tests are
        # actually executed.
        os.environ['SUBMITQ_DEBUG_MODE'] = 'true'
        input_file = fu.get_temp_file()
        output_file = fu.get_temp_file()
        input_data = [os.path.join(BUILD_ROOT, f) for f in changed_files]
        fu.write_file(input_file, '\n'.join(input_data))
        with open(output_file, 'w') as stdout_obj:
            subprocess.check_call([BU_SCRIPT, 'do_test_changes', input_file],
                                  stdout=stdout_obj)
        actual_tests = [l for l in fu.read_file(output_file).split('\n') if l]
        expected_tests.sort()
        actual_tests.sort()
        if expected_tests != actual_tests:
            LOGGER.error('Expected tests: %s', ' '.join(expected_tests))
            LOGGER.error('  Actual tests: %s', ' '.join(actual_tests))
            raise Error('SUBMITQ tests failed!!')
    finally:
        os.remove(input_file)
        os.remove(output_file)
Exemple #25
0
    def filter_calcs(self):
        '''
        Reads in the models seds and filter throughput response curves,
        regrids the filters onto a finer grid for interpolation,
        and calls a routine to calculate the filter flux zero-points.
        Testing indicates details of regridding and interpolation scheme
        is not very important.
        '''
        sed_length = 0

        self.n_filter = get_num_files(self.filters_input)

        if self.n_filter < 2:
            raise ValueError

        print('Found %d Filters' % self.n_filter)

        self._n_star_template = get_num_files(self.stars_sed_input)
        self._n_gal_template = get_num_files(self.gals_sed_input)

        # find the longest SED file among the bunch
        # first the star SEDs
        for i in xrange(self._n_star_template):
            n = get_file_length(i, self.stars_sed_input)
            if n * 2 > sed_length:
                sed_length = n * 2

        # do the same for the galaxies
        for i in xrange(self._n_gal_template):
            n = get_file_length(i, self.gals_sed_input)
            if n * 2 > sed_length:
                sed_length = n * 2

        self.filter_lgth_fine = np.zeros(self.n_filter)
        self.norm = np.zeros(self.n_filter)
        self.filter_lamb_fine = {}
        self.filter_thru_fine = {}

        for i in xrange(self.n_filter):
            n = get_file_length(i, self.filters_input)
            filter_length = n
            regrid_factor = np.round(float(sed_length) / float(n))

            filter_lgth_fine = n * regrid_factor

            filt_lamb, filt_thru = read_file(i, self.filters_input)

            filter_lamb_fine, filter_thru_fine = \
                regrid_filter(filt_lamb, filt_thru,
                              filter_length, filter_lgth_fine)

            norm = calc_normalization(filter_lamb_fine, filter_thru_fine,
                                      filter_lgth_fine)

            print("Filter %ld has (AB) zeropoint flux normalization: %f"
                  % (i, norm))

            self.filter_lgth_fine[i] = filter_lgth_fine
            self.norm[i] = norm
            self.filter_lamb_fine[i] = filter_lamb_fine
            self.filter_thru_fine[i] = filter_thru_fine
from redbaron import RedBaron
from file_utils import listfiles
from file_utils import read_file
from sys import argv

project_path = argv[1]

for file_name in listfiles(project_path, '.py'):
    source_code = read_file(file_name)
    red = RedBaron(source_code)
    resp = red.find_all('ClassNode')
    while resp:
        if resp[0].inherit_from.find('namenode', value='object'):
            print(file_name)
            print(resp[0].name)
        resp.pop(0)
Exemple #27
0
 def setUp(self):
     self.sample_rekap_akhir_pekan = utils.read_file('rekap_harian.txt')
     self.sample_rekap_ip = utils.read_file('rekap_pekanan.txt')
Exemple #28
0
else:
    target = None

module_names = {}
for path in sources:
    module_names[path] = re.sub(r'\.py$', '', os.path.basename(path))

if options.main:
    main = options.main
else:
    main = module_names[sources[0]]

text = ''
for source in sources:
    module = module_names[source]
    content = fix_convert_text_callers(file_utils.read_file(source))
    if module == main:
        text += content
    else:
        text += rename_convert_text(strip_main(content), module)
text = move_main(strip_modules(text, module_names.values()))

if target is None:
    print text
else:
    f = open(target, 'w')
    try:
        f.write(text)
    finally:
        f.close()
Exemple #29
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--aprinter-src-dir')
    parser.add_argument('--request-file')
    parser.add_argument('--response-file')
    parser.add_argument('--temp-dir')
    parser.add_argument('--stderr-truncate-bytes')
    parser.add_argument('--python')
    parser.add_argument('--nix-build')
    parser.add_argument('--mkdir')
    parser.add_argument('--rsync')
    parser.add_argument('--p7za')
    parser.add_argument('--bash')
    parser.add_argument('--head')
    parser.add_argument('--cat')
    args = parser.parse_args()
    
    # Read the request.
    with file_utils.use_input_file(args.request_file) as input_stream:
        request = input_stream.read()
    
    # The response will be built from these variables.
    response_success = False
    response_message = ''
    response_error = None
    response_filename = None
    response_data = None

    try:
        # Create a subfolder which we will archive.
        build_path = os.path.join(args.temp_dir, 'aprinter-build')
        run_process_limited(args, [args.mkdir, build_path], '', 'The mkdir failed!?')
        
        # Write the configuration to the build folder.
        config_path = os.path.join(build_path, 'config.json')
        file_utils.write_file(config_path, request)
        
        # Do the build.
        result_path = os.path.join(args.temp_dir, 'result')
        nixbuild_cmd = [args.nix_build, args.aprinter_src_dir, '-A', 'aprinterBuild',
            '-o', result_path, '--argstr', 'aprinterConfigFile', config_path]
        run_process_limited(args, nixbuild_cmd, '', 'Failed to build APrinter.')
        
        # Copy the build to the build_path.
        run_process_limited(args, [args.rsync, '-rL', '--chmod=ugo=rwX', '{}/'.format(result_path), '{}/'.format(build_path)], '', 'The rsync failed!?')
        
        # Produce the archive.
        archive_filename = 'aprinter-build.zip'
        archive_path = os.path.join(args.temp_dir, archive_filename)
        archive_cmd = [args.p7za, 'a', archive_path, build_path]
        run_process_limited(args, archive_cmd, '', 'The p7za failed!?')
        
        # Read the archive contents.
        archive_contents = file_utils.read_file(archive_path)
        
        response_success = True
        response_message = 'Compilation successful.'
        response_filename = archive_filename
        response_data = archive_contents
        
    except ProcessError as e:
        response_message = str(e)
        response_error = e.stderr_output
    except Exception as e:
        response_message = str(e)
    
    # Build the response.
    response = collections.OrderedDict({})
    response['success'] = response_success
    response['message'] = response_message
    if response_error is not None:
        response['error'] = response_error
    if response_filename is not None:
        response['filename'] = response_filename
        response['data'] = base64.b64encode(response_data)
    
    # Write the response.
    with file_utils.use_output_file(args.response_file) as output_stream:
        json.dump(response, output_stream)
 def __init__(self):
     self.head = file_utils.read_file("resources/head.html")
     self.tail = file_utils.read_file("resources/tail.html")
     self.body = ""
Exemple #31
0
save_state(STATE_WELCOME)
"""Fin de Estado"""
"""Cargamos los modelos y Stemmer"""
nltk.download('punkt')
nltk.download('stopwords')
model = load_model()
target = load_encoder()
encoder = load_encoder()
vector = load_vector()
stemmer = get_stemmer()
"""Fin de carga de modelos y Stemmer"""
#endregion

#region Crear clase Bot
bot = create_bot()
utterances = read_file('utterances.txt')
#endregion

app = Flask(__name__)


@app.route("/")
@app.route("/index")
def index():
    return render_template("index.html", title="Chatbot Medical")


@app.route("/webhook", methods=['POST', 'GET'])
def webhook():
    if request.method == 'GET':
        return verify_webhook(request)
 def setUp(self):
     self.weekly_report = WeeklyReport(utils.read_file('rekap_harian.txt'),utils.read_file('rekap_pekanan.txt'))
Exemple #33
0
def iexec(cmd, **kwargs):
    """
    Perform a command on local machine with subprocess.Popen
    contains many conveniences and logging capabilities
    returns an ExecResult object which also contains many conveniences
    :param cmd: the command
    :param kwargs: any kwargs
    :return: ExecResult Object
    """
    show_log = kwargs.pop('show_log', True)
    to_console = kwargs.pop('to_console', True)
    print_to_console = kwargs.pop('print_to_console', False)
    redirect_output = kwargs.pop('redirect_output', False)
    redirect_file_name = kwargs.pop('redirect_file_name', None)
    log_as_debug = kwargs.pop('log_as_debug', False)
    log_as_trace = kwargs.pop('log_as_trace', False)
    log_as_level = kwargs.pop('log_as_level', None)
    pickle_result = kwargs.pop('pickle_result', '')
    dump_file = kwargs.pop('dump_file', None)
    trace_file = kwargs.pop('trace_file', None)
    timeout = kwargs.pop('timeout', 0)
    dump_file_rotate = kwargs.pop('dump_file_rotate', False)
    alt_out = kwargs.pop('alt_out', None)
    alt_err = kwargs.pop('alt_err', alt_out)
    iexec_communicate = kwargs.pop('iexec_communicate', None)
    iexec_communicate_input = kwargs.pop('iexec_communicate_input', None)
    dump_kwargs = kwargs.pop('dump_kwargs', False)

    if not isinstance(cmd, str):
        cmd = subprocess.list2cmdline(cmd)

    if redirect_output and running_on_windows:
        if redirect_file_name is None:
            redirect_file = tempfile.NamedTemporaryFile(
                suffix=".txt",
                prefix="gstmp.{}.redirect.".format(get_datestring()),
                dir=ir_artifact_dir,
                delete=False)
            # closing the file, since we just need its name and it must be closed before using redirect the output
            redirect_file.close()
            redirect_file_name = redirect_file.name
        cmd += ' > {} 2>&1'.format(redirect_file_name)

    if print_to_console:
        print cmd

    if show_log:
        msg = 'exec: {}'.format(cmd)
        if log_as_level:
            get_log_func(log_as_level)(msg)
        elif log_as_trace:
            log.trace(msg)
        elif log_as_debug:
            log.debug(msg)
        else:
            log.info(msg)

    pkwargs = {
        'shell': True,
        'stdout': subprocess.PIPE,
        'stderr': subprocess.PIPE
    }
    subprocess_kwargs = {}
    for arg in SUBPROCESS_KWARGS:
        if arg in kwargs and arg not in pkwargs:
            pkwargs[arg] = kwargs[
                arg]  # kwargs to actually pass to the subprocess
            subprocess_kwargs[arg] = kwargs[
                arg]  # the kwargs the user supplied

    stdout = []
    stderr = []
    ordered_out = []
    start_time = time.time()

    proc = subprocess.Popen(args=cmd, **pkwargs)

    def _write_to_stdout(line):
        if to_console:
            sys.stdout.write(line)
        if print_to_console:
            print line
        if alt_out is not None and callable(alt_out):
            alt_out(contents=line)
        stdout.append(line)
        ordered_out.append(line)

    def _write_to_stderr(line):
        if to_console:
            sys.stderr.write(line)
        if print_to_console:
            print line
        if alt_err is not None and callable(alt_err):
            alt_err(contents=line)
        stderr.append(line)
        ordered_out.append(line)

    if running_on_windows:
        if iexec_communicate:
            stdout_buffer, stderr_buffer = proc.communicate(
                iexec_communicate_input)
            if redirect_output:
                stdout_buffer = read_file(redirect_file_name)
            for stdout_line in stdout_buffer:
                _write_to_stdout(stdout_line)
            for stderr_line in stderr_buffer:
                _write_to_stderr(stderr_line)
            rc = proc.wait()
        else:

            def _enqueue_stream(stream, queue):
                for line in iter(stream.readline, b''):
                    queue.put(line)
                stream.close()

            qo = Queue()
            to = Thread(target=_enqueue_stream, args=(proc.stdout, qo))
            to.daemon = True  # thread dies with the program
            to.start()

            qe = Queue()
            te = Thread(target=_enqueue_stream, args=(proc.stderr, qe))
            te.daemon = True  # thread dies with the program
            te.start()

            while True:
                try:
                    stdout_line = qo.get_nowait()  # or q.get(timeout=.1)
                except Empty:
                    pass
                else:
                    _write_to_stdout(stdout_line)
                    sys.stdout.flush()
                try:
                    stderr_line = qe.get_nowait()  # or q.get(timeout=.1)
                except Empty:
                    pass
                else:
                    _write_to_stderr(stderr_line)
                    sys.stderr.flush()

                rc = proc.poll()
                if rc is not None:
                    # finished proc, read all the rest of the lines from the buffer
                    try:
                        while True:
                            stdout_line = qo.get_nowait(
                            )  # or q.get(timeout=.1)
                            _write_to_stdout(stdout_line)
                            sys.stdout.flush()
                    except Empty:
                        pass
                    try:
                        while True:
                            stderr_line = qe.get_nowait(
                            )  # or q.get(timeout=.1)
                            _write_to_stderr(stderr_line)
                            sys.stderr.flush()
                    except Empty:
                        pass
                    if redirect_output:
                        stdout_buffer = read_file(redirect_file_name)
                        for stdout_line in stdout_buffer:
                            _write_to_stdout(stdout_line)
                    break
    else:
        reads = [proc.stdout.fileno(), proc.stderr.fileno()]
        while True:
            ret = select.select(reads, [], [])

            for fd in ret[0]:
                if fd == proc.stdout.fileno():
                    stdout_line = proc.stdout.readline()
                    _write_to_stdout(stdout_line)
                if fd == proc.stderr.fileno():
                    stderr_line = proc.stderr.readline()
                    _write_to_stderr(stderr_line)

            rc = proc.poll()
            if rc is not None:
                # finished proc, read all the rest of the lines from the buffer
                stdout_buffer = proc.stdout.readlines()
                for stdout_line in stdout_buffer:
                    _write_to_stdout(stdout_line)
                stderr_buffer = proc.stderr.readlines()
                for stderr_line in stderr_buffer:
                    _write_to_stderr(stderr_line)
                break

            if timeout and time.time() - start_time > timeout:
                raise RuntimeError('Timeout executing cmd on linux')

    time_taken = time.time() - start_time
    result = ExecResult(stdout, stderr, rc, time_taken, cmd, ordered_out,
                        start_time, timeout, subprocess_kwargs)

    if dump_file:
        result.to_dump_file(dump_file,
                            dump_file_rotate,
                            dump_kwargs=dump_kwargs)

    if trace_file:
        write_file(trace_file, contents=result.append_output(), mode='a')

    if pickle_result:
        with open(pickle_result, 'wb') as f:
            pickle.dump(result, f, protocol=1)

    return result
Exemple #34
0
def main(args):
    logging.basicConfig(format="%(levelname)s: %(message)s",
                        level=logging.INFO)
    logging.getLogger('matplotlib.font_manager').disabled = True

    for file in args.dataset_file:
        logging.info("Reading file: %s." % file)
        pairs, dataset_similarities = file_utils.read_file(file)
        cache_file_id = hashlib.md5(open(file, 'rb').read()).hexdigest() + file

        models = [
            BertSentenceTester(pairs, version='bert-large-nli-mean-tokens'),
            BertSentenceTester(pairs, version='bert-base-nli-mean-tokens'),
            BertSentenceTester(pairs, version='roberta-base-nli-mean-tokens'),
            BertSentenceTester(pairs,
                               version='distilbert-base-nli-mean-tokens'),
            BertTester(pairs,
                       similarity_type='head',
                       version='bert-large-uncased',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='head',
                       version='bert-base-uncased',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='head',
                       version='roberta-base',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='head',
                       version='distilbert-base-uncased',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='dep-subj',
                       version='bert-large-uncased',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='dep-subj',
                       version='bert-base-uncased',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='dep-subj',
                       version='roberta-base',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='dep-subj',
                       version='distilbert-base-uncased',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='dep-obj',
                       version='bert-large-uncased',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='dep-obj',
                       version='bert-base-uncased',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='dep-obj',
                       version='roberta-base',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='dep-obj',
                       version='distilbert-base-uncased',
                       combine_method='sum'),
            BertTester(pairs,
                       similarity_type='head',
                       version='bert-large-uncased',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='head',
                       version='bert-base-uncased',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='head',
                       version='roberta-base',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='head',
                       version='distilbert-base-uncased',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='dep-subj',
                       version='bert-large-uncased',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='dep-subj',
                       version='bert-base-uncased',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='dep-subj',
                       version='roberta-base',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='dep-subj',
                       version='distilbert-base-uncased',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='dep-obj',
                       version='bert-large-uncased',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='dep-obj',
                       version='bert-base-uncased',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='dep-obj',
                       version='roberta-base',
                       combine_method='concat'),
            BertTester(pairs,
                       similarity_type='dep-obj',
                       version='distilbert-base-uncased',
                       combine_method='concat'),
        ]

        for model in models:
            # Do not launch dep-obj focused models in NV datasets.
            if len(pairs[0][0].split('@')) < 3 and \
                    hasattr(model, 'similarity_type') and model.similarity_type == 'dep-obj':
                continue
            print("Model: " + str(model) + ", dataset: " + file)
            cache = 'cache/' + str(model) + "-" + cache_file_id
            if os.path.isfile(
                    cache):  # We look for the file of cached similarities.
                with open(cache, "r") as outfile:
                    model_similarities = ast.literal_eval(outfile.read())
            else:
                model_similarities = model.process_pairs()
                with open(
                        cache,
                        "w") as outfile:  # Write the cache file for the future
                    outfile.write(str(model_similarities))

            pearson_corr, _ = pearsonr(dataset_similarities,
                                       model_similarities)
            spearman_corr, _ = spearmanr(dataset_similarities,
                                         model_similarities)
            mean = "{:.2f}".format(100.0 * 2.0 * pearson_corr * spearman_corr /
                                   (pearson_corr + spearman_corr))
            if pearson_corr * spearman_corr < 0:  # If not both positive or negative, standard mean instead of harmonic
                mean = "{:.2f}".format(
                    100.0 * (pearson_corr + spearman_corr) / 2.0) + '*'
            print("\r\tPearson=" + "{:.2f}".format(100.0 * pearson_corr) +
                  ", Spearman=" + "{:.2f}".format(100.0 * spearman_corr) +
                  ", Mean=" + mean)

            # plt.ylim(0.1, 1.1)  # Scale for the plots
            # plt.scatter(dataset_similarities, model_similarities)
            # plt.savefig('plots/' + str(model) + '-' + file + '.png')
            # plt.close()

            dataset_standarized = np.array(dataset_similarities)
            dataset_standarized = (
                dataset_standarized -
                np.mean(dataset_standarized)) / np.std(dataset_standarized)
            model_standarized = np.array(model_similarities)
            model_standarized = (model_standarized - np.mean(model_standarized)
                                 ) / np.std(model_standarized)
            plt.xlim(-1.5, 2.15)  # Scale for the plots
            plt.ylim(-2.5, 2.5)  # Scale for the plots
            plt.scatter(dataset_standarized, model_standarized)
            plt.savefig('plots_standarized/' + str(model) + '-' + file +
                        '.png')
            plt.close()
Exemple #35
0
    'add_alpha': add_alpha_training_vec,
    'interpolation': interp_training_vec
}

args = get_args()
task = args.task

if task == 'train':

    infile = args.training_file
    lang = args.language
    train_type = args.train_type
    n = args.n
    format = args.format

    docs = file_utils.read_file(infile, n)
    N = len(docs)

    np.random.shuffle(docs)

    # split data
    tr_i, val_i = int(N * .8), int(N * .9)
    train, val1, val2 = docs[:tr_i], docs[tr_i:val_i], docs[val_i:]

    training_routine = training_routines_dict[train_type]
    probs = training_routine(train, val1, val2, n)

    # save model in specified format
    file_utils.save_model(format, probs, lang, n)
    print('\'{}\' model saved to data folder.'.format(format))
Exemple #36
0
 def run_test(self, name):
     input = file_utils.read_file(os.path.join(input_dir, name + '.erb.shpaml'))
     actual_output = aml_erb.convert_text(input)
     expected_output = file_utils.read_file(os.path.join(output_dir, name + '.erb'))
     self.assertEqual(expected_output, actual_output)
Exemple #37
0
def get_experiment(task: str):
    if task == "depth":
        return DepthExperiment
    else:
        raise ValueError(f"Unknown task {task}")


# %%
if __name__ == "__main__":
    exp_name = sys.argv[1] if len(sys.argv) > 1 else "default"
    resume_from_checkpoint = bool(
        sys.argv[2] == "1") if len(sys.argv) > 2 else None
    fast_dev_run = int(sys.argv[3]) if len(sys.argv) > 3 else False

    config = read_file(exp_name)
    hparams = dict_to_args(config)

    experiment_folder = "experiment_data"
    checkpoint_path = (config["experiment"]["checkpoint_path"]
                       if "checkpoint_path" in config["experiment"] else None)
    if resume_from_checkpoint and not checkpoint_path:
        path = f"./{experiment_folder}/{config['name']}/{config['version']}/checkpoints"
        try:
            checkpoint = "last.ckpt"
            checkpoint_path = os.path.join(path, checkpoint)
            print(f"RESUMING FROM {checkpoint_path}")
        except Exception:
            print("FAILED TO LOAD")

    tb_logger = pl.loggers.TensorBoardLogger(
Exemple #38
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--aprinter-src-dir')
    parser.add_argument('--request-file')
    parser.add_argument('--response-file')
    parser.add_argument('--temp-dir')
    parser.add_argument('--stderr-truncate-bytes')
    parser.add_argument('--python')
    parser.add_argument('--nix-build')
    parser.add_argument('--mkdir')
    parser.add_argument('--rsync')
    parser.add_argument('--p7za')
    parser.add_argument('--bash')
    parser.add_argument('--head')
    parser.add_argument('--cat')
    args = parser.parse_args()

    # Read the request.
    with file_utils.use_input_file(args.request_file) as input_stream:
        request = input_stream.read()

    # The response will be built from these variables.
    response_success = False
    response_message = ''
    response_error = None
    response_filename = None
    response_data = None

    try:
        # Create a subfolder which we will archive.
        build_path = os.path.join(args.temp_dir, 'aprinter-build')
        run_process_limited(args, [args.mkdir, build_path], '',
                            'The mkdir failed!?')

        # Write the configuration to the build folder.
        config_path = os.path.join(build_path, 'config.json')
        file_utils.write_file(config_path, request)

        # Do the build.
        result_path = os.path.join(args.temp_dir, 'result')
        nixbuild_cmd = [
            args.nix_build, args.aprinter_src_dir, '-A', 'aprinterBuild', '-o',
            result_path, '--argstr', 'aprinterConfigFile', config_path
        ]
        run_process_limited(args, nixbuild_cmd, '',
                            'Failed to build APrinter.')

        # Copy the build to the build_path.
        run_process_limited(args, [
            args.rsync, '-rL', '--chmod=ugo=rwX', '{}/'.format(result_path),
            '{}/'.format(build_path)
        ], '', 'The rsync failed!?')

        # Produce the archive.
        archive_filename = 'aprinter-build.zip'
        archive_path = os.path.join(args.temp_dir, archive_filename)
        archive_cmd = [args.p7za, 'a', archive_path, build_path]
        run_process_limited(args, archive_cmd, '', 'The p7za failed!?')

        # Read the archive contents.
        archive_contents = file_utils.read_file(archive_path)

        response_success = True
        response_message = 'Compilation successful.'
        response_filename = archive_filename
        response_data = archive_contents

    except ProcessError as e:
        response_message = str(e)
        response_error = e.stderr_output
    except Exception as e:
        response_message = str(e)

    # Build the response.
    response = collections.OrderedDict({})
    response['success'] = response_success
    response['message'] = response_message
    if response_error is not None:
        response['error'] = response_error
    if response_filename is not None:
        response['filename'] = response_filename
        response['data'] = base64.b64encode(response_data)

    # Write the response.
    with file_utils.use_output_file(args.response_file) as output_stream:
        json.dump(response, output_stream)
Exemple #39
0
class Fields(object):
    CASE = read_file("resources/fields/casefields.txt").split("\n")[:-1]
from file_utils import read_csv, write_csv, read_file, write_file
from generate import _render_template, preprocess
from image import pass_gen
from mail import sendmail
import json

# Scraping the webpage and storing the data in a csv
data = scraper('http://scrape.kjscecodecell.com/')
write_csv(data)

# Reading the scraped data from the csv and preprocessing the data
participants = read_csv()
participants = preprocess(participants)

# Getting the list of mails to whom mails have already been sent
sent_mails = read_file()

# Looping over all participants
for participant in participants:
    # Checking if the participant was sent a mail previously
    if participant['email'] not in sent_mails:
        name = participant['name']
        email = participant['email']
        phone = participant['phone']
        payment_status = participant['payment']

        # Generating a message from the template
        message = _render_template(name, payment_status)

        # Generating a custom image
        image_path = pass_gen(name, email, phone)