Пример #1
0
def build(parts, params):
    parser = optparse.OptionParser()
    parser.add_option("-i",
                      "--input",
                      help="list of input documents",
                      default=PARAMS['input'])
    parser.add_option("-p",
                      "--prior",
                      help="file to output metadata to",
                      default=PARAMS['prior'])
    parser.add_option("-l",
                      "--langs",
                      help="read list of languages from file",
                      default=PARAMS['langs'])
    parser.add_option("-e",
                      "--exclude",
                      help="paths to exclude",
                      action='append',
                      default=PARAMS['exclude'])
    parser.add_option("-o",
                      "--outdir",
                      help="directory to output to",
                      default=PARAMS["outdir"])
    opts, args = parser.parse_args()

    for path, count, order in parts:
        p_opts = copy(opts)
        p_opts.count = count
        p_opts.segments = order
        p_opts.output = os.path.join(opts.outdir, path)
        p_opts.metadata = os.path.join(opts.outdir, path + '-meta')
        p_opts.used = os.path.join(opts.outdir, path + '-used')

        generate.main(p_opts, args)
        opts.exclude.append(p_opts.used)
Пример #2
0
def generate_main(data_dir, extra_flags=None):
    generate_parser = options.get_generation_parser()
    generate_args = options.parse_args_and_arch(
        generate_parser,
        [
            data_dir,
            '--path', os.path.join(data_dir, 'checkpoint_last.pt'),
            '--beam', '3',
            '--batch-size', '64',
            '--max-len-b', '5',
            '--gen-subset', 'valid',
            '--no-progress-bar',
            '--print-alignment',
        ] + (extra_flags or []),
    )

    # evaluate model in batch mode
    generate.main(generate_args)

    # evaluate model interactively
    generate_args.buffer_size = 0
    generate_args.max_sentences = None
    orig_stdin = sys.stdin
    sys.stdin = StringIO('h e l l o\n')
    interactive.main(generate_args)
    sys.stdin = orig_stdin
Пример #3
0
def generate_main(data_dir, extra_flags=None):
    if extra_flags is None:
        extra_flags = [
            "--print-alignment",
        ]
    generate_parser = options.get_generation_parser()
    generate_args = options.parse_args_and_arch(
        generate_parser,
        [
            data_dir,
            "--path",
            os.path.join(data_dir, "checkpoint_last.pt"),
            "--beam",
            "3",
            "--batch-size",
            "64",
            "--max-len-b",
            "5",
            "--gen-subset",
            "valid",
            "--no-progress-bar",
        ] + (extra_flags or []),
    )

    # evaluate model in batch mode
    generate.main(generate_args)

    # evaluate model interactively
    generate_args.buffer_size = 0
    generate_args.input = "-"
    generate_args.max_sentences = None
    orig_stdin = sys.stdin
    sys.stdin = StringIO("h e l l o\n")
    interactive.main(generate_args)
    sys.stdin = orig_stdin
Пример #4
0
def main(argv):
    if len(argv) == 1:
        print('Usage: {0} GEN_PATH [TOX_ARGS...]', file=sys.stderr)

    # Get the project root directory.
    project_root = os.path.dirname(os.path.dirname(
        os.path.realpath(__file__)))

    temp_dir = argv[1]
    print('Copying files to ', temp_dir)
    shutil.copytree(project_root, temp_dir)
    os.chdir(temp_dir)

    # Run generation.
    sys.path.insert(0, os.path.realpath('internal'))
    import generate
    generate.main()

    # Run tox.
    import tox
    # tox will raise SystemExit() and try to exit. We don't want that.
    try:
        tox.cmdline(argv[2:])
    except SystemExit:
        pass

    # Print out the directory name for the shell script.
    print(temp_dir)
Пример #5
0
def main(argv):
    if len(argv) == 1:
        print('Usage: {0} GEN_PATH [TOX_ARGS...]', file=sys.stderr)

    # Get the project root directory.
    project_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

    temp_dir = argv[1]
    print('Copying files to ', temp_dir)
    shutil.copytree(project_root, temp_dir)
    os.chdir(temp_dir)

    # Run generation.
    sys.path.insert(0, os.path.realpath('internal'))
    import generate
    generate.main()

    # Run tox.
    import tox
    # tox will raise SystemExit() and try to exit. We don't want that.
    try:
        tox.cmdline(argv[2:])
    except SystemExit:
        pass

    # Print out the directory name for the shell script.
    print(temp_dir)
Пример #6
0
def generate_main(data_dir, extra_flags=None):
    generate_parser = options.get_generation_parser()
    generate_args = options.parse_args_and_arch(
        generate_parser,
        [
            data_dir,
            '--path',
            os.path.join(data_dir, 'checkpoint_last.pt'),
            '--beam',
            '3',
            '--batch-size',
            '64',
            '--max-len-b',
            '5',
            '--gen-subset',
            'valid',
            '--no-progress-bar',
            '--print-alignment',
        ] + (extra_flags or []),
    )

    # evaluate model in batch mode
    generate.main(generate_args)

    # evaluate model interactively
    generate_args.buffer_size = 0
    generate_args.input = '-'
    generate_args.max_sentences = None
    orig_stdin = sys.stdin
    sys.stdin = StringIO('h e l l o\n')
    interactive.main(generate_args)
    sys.stdin = orig_stdin
Пример #7
0
def generate_():
    context = request.get_json(force=True)
    phrase = context.get('text', '')
    if context.get('model') == 'wiki' and wiki_model:
        generated = generate_wiki.main(wiki_model, enc, phrase)
    else:
        generated = generate.main(model, enc, phrase)
    return jsonify({"response": generated})
Пример #8
0
def main():
    args = parse_args()
    word_dic = read_word_list(args.words_in)
    wordcloud = generate.main(word_dic, args.image, args.font,
                              args.use_image_colors)
    save_wordcloud(args.wordcloud_out, wordcloud)
    if args.display:
        display_wordcloud(wordcloud)
Пример #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('new_version', type=str)
    args = parser.parse_args()

    version = Version.parse(args.new_version)
    assert not version.dev

    print(f"Bumping to {version}")
    if version.beta:
        write_version('beta', version)
        generate.main(['beta'])
    else:
        assert not version.beta
        write_version('stable', version)
        write_version('beta', version)
        generate.main(['stable', 'beta'])
    return 0
Пример #10
0
 def test_main(self):
     """
     Runs generate.main() which should merge source files,
     then compile all sources in all configured languages.
     Validates output by checking all .mo files in all configured languages.
     .mo files should exist, and be recently created (modified
     after start of test suite)
     """
     generate.main()
     for locale in CONFIGURATION.locales:
         for filename in ('django', 'djangojs'):
             mofile = filename+'.mo'
             path = os.path.join(CONFIGURATION.get_messages_dir(locale), mofile)
             exists = os.path.exists(path)
             self.assertTrue(exists, msg='Missing file in locale %s: %s' % (locale, mofile))
             self.assertTrue(datetime.fromtimestamp(os.path.getmtime(path)) >= self.start_time,
                             msg='File not recently modified: %s' % path)
         self.assert_merge_headers(locale)
    def generate(self, data_dir):
        generate_parser = options.get_generation_parser()
        generate_args = generate_parser.parse_args([
            data_dir,
            '--path', os.path.join(data_dir, 'checkpoint_best.pt'),
            '--beam', '5',
            '--batch-size', '32',
            '--gen-subset', 'valid',
            '--no-progress-bar',
        ])

        # evaluate model in batch mode
        generate.main(generate_args)

        # evaluate model interactively
        orig_stdin = sys.stdin
        sys.stdin = StringIO('h e l l o\n')
        interactive.main(generate_args)
        sys.stdin = orig_stdin
Пример #12
0
def homepage():
    form = makeCrossword()
    if form.validate_on_submit():
        pic_id = uuid1().hex
        words = form.words.data.upper().split()
        shuffle(words)
        structure = f'structure{form.structure.data}'
        if main(f'data/{structure}.txt', words, f'static/{pic_id}.png'):
            return render_template('crossword.html', id=pic_id)
        else:
            return render_template('crossword.html', message="No Solution, try using one of the sample words list!")
    return render_template('main.html', form=form)
Пример #13
0
 def test_main(self):
     """
     Runs generate.main() which should merge source files,
     then compile all sources in all configured languages.
     Validates output by checking all .mo files in all configured languages.
     .mo files should exist, and be recently created (modified
     after start of test suite)
     """
     generate.main()
     for locale in CONFIGURATION.locales:
         for filename in ('django', 'djangojs'):
             mofile = filename + '.mo'
             path = os.path.join(CONFIGURATION.get_messages_dir(locale),
                                 mofile)
             exists = os.path.exists(path)
             self.assertTrue(exists,
                             msg='Missing file in locale %s: %s' %
                             (locale, mofile))
             self.assertTrue(datetime.fromtimestamp(os.path.getmtime(path))
                             >= self.start_time,
                             msg='File not recently modified: %s' % path)
         self.assert_merge_headers(locale)
Пример #14
0
def get_inventory(clean=True, extra_args=None):
    "Return the inventory mapping in a dict."
    # Use the list argument to more closely mirror
    # Ansible's use of the callable.
    args = {'config': TARGET_DIR, 'list': True, 'environment': BASE_ENV_DIR}
    if extra_args:
        args.update(extra_args)
    try:
        inventory_string = di.main(**args)
        inventory = json.loads(inventory_string)
        return inventory
    finally:
        if clean:
            # Remove the file system artifacts since we want to force
            # fresh runs
            cleanup()
Пример #15
0
def get_inventory(clean=True, extra_args=None):
    "Return the inventory mapping in a dict."
    # Use the list argument to more closely mirror
    # Ansible's use of the callable.
    args = {'config': TARGET_DIR, 'list': True,
            'environment': BASE_ENV_DIR}
    if extra_args:
        args.update(extra_args)
    try:
        inventory_string = di.main(**args)
        inventory = json.loads(inventory_string)
        return inventory
    finally:
        if clean:
            # Remove the file system artifacts since we want to force
            # fresh runs
            cleanup()
#!/usr/bin/env python3

import os
import argparse
from pathlib import Path
import toml
import generate

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Testcase Generator for Matrix build')
    parser.add_argument('--show-list', action='store_true', help='Show problem list')
    parser.add_argument('num', type=int, help='# of server')
    parser.add_argument('id', type=int, help='server ID(1 <= id <= num)')
    parser.add_argument('args', nargs=argparse.REMAINDER)
    args = parser.parse_args()

    tomls = list(filter(lambda p: not p.match('test/**/info.toml'),
                   Path('.').glob('**/info.toml')))
    tomls = sorted(tomls, key=lambda x: x.parent.name)

    tomls = [tomls[i] for i in range(args.id - 1, len(tomls), args.num)]

    if args.show_list:
        print('Server ID: {} / {}'.format(args.id, args.num))
        print('Problem List:')
        for x in tomls:
            print('  {}'.format(x))
    else:
        generate.main(['--verify'] + list(map(str, tomls)) + args.args)
Пример #17
0
  def handleDelivery(self, selection, spec):
    delivery_method = selection['delivery_method']
    delivery_value = spec['referrer_policy']

    meta = ''
    headers = []
    if delivery_value != None:
        if delivery_method == 'meta-referrer':
            meta = \
                '<meta name="referrer" content="%s">' % delivery_value
        elif delivery_method == 'http-rp':
            meta = \
                "<!-- No meta: Referrer policy delivered via HTTP headers. -->"
            headers.append('Referrer-Policy: ' + '%s' % delivery_value)
            # TODO(kristijanburnik): Limit to WPT origins.
            headers.append('Access-Control-Allow-Origin: *')
        elif delivery_method == 'attr-referrer':
            # attr-referrer is supported by the JS test wrapper.
            pass
        elif delivery_method == 'rel-noreferrer':
            # rel=noreferrer is supported by the JS test wrapper.
            pass
        else:
            raise ValueError('Not implemented delivery_method: ' \
                              + delivery_method)
    return {"meta": meta, "headers": headers}


if __name__ == '__main__':
    generate.main(ReferrerPolicyConfig())
Пример #18
0
    ('Pelican', 'http://getpelican.com/'),
    ('Python.org', 'http://python.org/'),
    ('Jinja2', 'http://jinja.pocoo.org/'),
    ('You can modify those links in your config file', '#'),
)

# Social widget
SOCIAL = (
    ('You can add links in your config file', '#'),
    ('Another social link', '#'),
)

DEFAULT_PAGINATION = 10
#encrypted
PLUGINS = []
ENCRYPT_CONTENT = {
    'title_prefix': '[Encrypted]',
    'summary': 'This content is encrypted.'
}
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True

THEME = "myTheme"

import sys

sys.path.insert(0, './')
from generate import main

main()
Пример #19
0
            self.log_per_N_batch = 0
        """
        {
            'jaso' : 10,
            'char' : 10,
            'word' : 10,
            'pos_Twitter' : 10,
            'pos_Mecab' : 10,
            'pos_Hannanum' : 10,
            'test' : 1,
            'penn' : 1,
        }[self.data_type]
        """


if __name__ == "__main__":
    import sys
    import train
    import generate

    setting = SETTING()

    if len(sys.argv) == 1:
        print("***** train or generate *****")
    elif sys.argv[1] == 'train':
        train.main(setting)
    elif sys.argv[1] == 'generate':
        generate.main(setting)
    else:
        print("***** train or generate *****")
Пример #20
0
 def test_duplicated_ip(self):
     self.duplicate_ip()
     self.write_config()
     with self.assertRaises(di.MultipleHostsWithOneIPError) as context:
         di.main(config=TARGET_DIR, check=True, environment=BASE_ENV_DIR)
     self.assertEqual(context.exception.ip, '172.29.236.100')
Пример #21
0
def score_bw(args):
    if args.backwards1:
        scorer1_src = args.target_lang
        scorer1_tgt = args.source_lang
    else:
        scorer1_src = args.source_lang
        scorer1_tgt = args.target_lang

    if args.score_model2 is not None:
        if args.backwards2:
            scorer2_src = args.target_lang
            scorer2_tgt = args.source_lang
        else:
            scorer2_src = args.source_lang
            scorer2_tgt = args.target_lang

    rerank1_is_gen = args.gen_model == args.score_model1 and args.source_prefix_frac is None
    rerank2_is_gen = args.gen_model == args.score_model2 and args.source_prefix_frac is None

    pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \
        backwards_preprocessed_dir, lm_preprocessed_dir = \
        rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset,
                                     args.gen_model_name, args.shard_id, args.num_shards,
                                     args.sampling, args.prefix_len, args.target_prefix_frac,
                                     args.source_prefix_frac)

    score1_file = rerank_utils.rescore_file_name(
        pre_gen,
        args.prefix_len,
        args.model1_name,
        target_prefix_frac=args.target_prefix_frac,
        source_prefix_frac=args.source_prefix_frac,
        backwards=args.backwards1)

    if args.score_model2 is not None:
        score2_file = rerank_utils.rescore_file_name(
            pre_gen,
            args.prefix_len,
            args.model2_name,
            target_prefix_frac=args.target_prefix_frac,
            source_prefix_frac=args.source_prefix_frac,
            backwards=args.backwards2)

    if args.right_to_left1:
        rerank_data1 = right_to_left_preprocessed_dir
    elif args.backwards1:
        rerank_data1 = backwards_preprocessed_dir
    else:
        rerank_data1 = left_to_right_preprocessed_dir

    gen_param = [
        "--batch-size",
        str(128), "--score-reference", "--gen-subset", "train"
    ]
    if not rerank1_is_gen and not os.path.isfile(score1_file):
        print("STEP 4: score the translations for model 1")

        model_param1 = [
            "--path", args.score_model1, "--source-lang", scorer1_src,
            "--target-lang", scorer1_tgt
        ]
        gen_model1_param = [rerank_data1] + gen_param + model_param1

        gen_parser = options.get_generation_parser()
        input_args = options.parse_args_and_arch(gen_parser, gen_model1_param)

        with open(score1_file, 'w') as f:
            with redirect_stdout(f):
                generate.main(input_args)

    if args.score_model2 is not None and not os.path.isfile(
            score2_file) and not rerank2_is_gen:
        print("STEP 4: score the translations for model 2")

        if args.right_to_left2:
            rerank_data2 = right_to_left_preprocessed_dir
        elif args.backwards2:
            rerank_data2 = backwards_preprocessed_dir
        else:
            rerank_data2 = left_to_right_preprocessed_dir

        model_param2 = [
            "--path", args.score_model2, "--source-lang", scorer2_src,
            "--target-lang", scorer2_tgt
        ]
        gen_model2_param = [rerank_data2] + gen_param + model_param2

        gen_parser = options.get_generation_parser()
        input_args = options.parse_args_and_arch(gen_parser, gen_model2_param)

        with open(score2_file, 'w') as f:
            with redirect_stdout(f):
                generate.main(input_args)
Пример #22
0
    self.sanity_checker_js = '/mixed-content/generic/sanity-checker.js'
    self.spec_json_js = '/mixed-content/spec_json.js'

    self.test_case_name = 'MixedContentTestCase'

    script_directory = os.path.dirname(os.path.abspath(__file__))
    self.spec_directory = os.path.abspath(os.path.join(script_directory, '..', '..'))

  def handleDelivery(self, selection, spec):
    opt_in_method = selection['opt_in_method']

    meta = ''
    headers = []

    # TODO(kristijanburnik): Implement the opt-in-method here.
    if opt_in_method == 'meta-csp':
        meta = '<meta http-equiv="Content-Security-Policy" ' + \
               'content="block-all-mixed-content">'
    elif opt_in_method == 'http-csp':
        headers.append("Content-Security-Policy: block-all-mixed-content")
    elif opt_in_method == 'no-opt-in':
        pass
    else:
        raise ValueError("Invalid opt_in_method %s" % opt_in_method)

    return {"meta": meta, "headers": headers}


if __name__ == '__main__':
    generate.main(MixedContentConfig())
Пример #23
0
 def run(self):
     import generate
     generate.main()
Пример #24
0
    n_samples = 50

    #model_file = 'models/vr-le13_12_0.5_1_2l_8_1_8_.model'
    #model_name = 'adam2_2_2_b_0.01_vr-le13_12_0.5_1_2l_8_1_8__d11_12_1_1l_16_1_x'
    model_file = 'models/vr-le13_12_0.5_2_1lg_8_2_16_f.model'
    model_name = 'adam2_2_2_b_0.01_vr-le13_12_0.5_2_1lg_8_2_16_f_d11_12_2_1l_8_1_x'
    weights_file = '{}/{}.{}.0.all_gen_iter_{}.caffemodel'.format(
        model_name, model_name, data_name, iter_)

    data_root = '/home/mtr22/dvorak' + '/net/pulsar/home/koes/mtr22/gan/data/'  #dkoes/PDBbind/refined-set/'
    data_file = 'data/two_atoms.types'

    net_param = caffe_util.NetParameter.from_prototxt(model_file)
    net_param.set_molgrid_data_source(data_file, '')
    data_param = net_param.get_molgrid_data_param(caffe.TEST)
    data_param.random_rotation = True
    data_param.fix_center_to_origin = True
    resolution = data_param.resolution

    params = ast.literal_eval(net_param.layer[-2].python_param.param_str)
    params[
        'gninatypes_file'] = '/home/mtr22/dvorak' + params['gninatypes_file']
    net_param.layer[-2].python_param.param_str = str(params)

    model_file = out_prefix + '.model'
    net_param.to_prototxt(model_file)

    argv = '-m {} -w {} -B rec -b lig_gen --max_iter 3 --fit_atom_types --verbose 1 --data_file {} --data_root {} -o {} --n_samples {} --random_rotation --fix_center_to_origin'  \
            .format(model_file, weights_file, data_file, data_root, out_prefix, n_samples).split()
    generate.main(argv)
        '--check',
        help="Configuration check only, don't generate inventory",
        action='store_true',
    )

    parser.add_argument(
        '-d',
        '--debug',
        help=('Output debug messages to log file. '
              'File is appended to, not overwritten'),
        action='store_true',
        default=False,
    )

    parser.add_argument(
        '-e',
        '--environment',
        help=('Directory that contains the base env.d directory.\n'
              'Defaults to <OSA_ROOT>/playbooks/inventory/.'),
        required=False,
        default=os.path.dirname(__file__),
    )

    return vars(parser.parse_args(arg_list))


if __name__ == '__main__':
    all_args = args(sys.argv[1:])
    output = generate.main(**all_args)
    print(output)
Пример #26
0
import analysis
import generate
from data import StoredxWriter

s = generate.main(
    prep_steps=int(1e6),
    samples=int(1e7),
    save_every=1,
    temp=1. / 3,
    writer=StoredxWriter
)

analysis.main()
Пример #27
0
#!python

import generate

if __name__ == '__main__':
    generate.main()
Пример #28
0
        '--check',
        help="Configuration check only, don't generate inventory",
        action='store_true',
    )

    parser.add_argument(
        '-d',
        '--debug',
        help=('Output debug messages to log file. '
              'File is appended to, not overwritten'),
        action='store_true',
        default=False,
    )

    parser.add_argument(
        '-e',
        '--environment',
        help=('Directory that contains the base env.d directory.\n'
              'Defaults to <OSA_ROOT>/playbooks/inventory/.'),
        required=False,
        default=os.path.dirname(__file__),
    )

    return vars(parser.parse_args(arg_list))


if __name__ == '__main__':
    all_args = args(sys.argv[1:])
    output = generate.main(**all_args)
    print(output)
def run_doc_gen():
    sys.path.append(os.path.join(THIS_DIR, "doc"))
    import generate
    generate.main()
Пример #30
0
def run_doc_gen():
    import generate
    generate.main()
Пример #31
0
 def testEmptyArchive(self, mock_args):
     # Run the generator.
     generate.main()
     self.verify_contents(TMP_DIR_NAME)
from generate import main

if __name__ == "__main__":
    main(in_blender_mode=True)
Пример #33
0
 def test_checking_good_config(self):
     output = di.main(config=TARGET_DIR,
                      check=True,
                      environment=BASE_ENV_DIR)
     self.assertEqual(output, 'Configuration ok!')
Пример #34
0
 def test_duplicated_ip(self):
     self.duplicate_ip()
     self.write_config()
     with self.assertRaises(di.MultipleHostsWithOneIPError) as context:
         di.main(config=TARGET_DIR, check=True, environment=BASE_ENV_DIR)
     self.assertEqual(context.exception.ip, '172.29.236.100')
Пример #35
0
                                 '%(source_scheme)s/' + \
                                 '%(subresource)s/' + \
                                 '%(redirection)s/'

        self.test_file_path_pattern = '%(spec_name)s/' + self.selection_pattern + \
                                      '%(name)s.%(source_scheme)s.html'

        self.test_description_template = '''The referrer URL is %(expectation)s when a
document served over %(source_scheme)s requires a
sub-resource via %(subresource)s using the %(delivery_type)s
delivery method with %(redirection)s and when
the target request is %(origin)s.'''

        self.test_page_title_template = 'Referrer-Policy: %s'

        self.helper_js = '/referrer-policy/generic/referrer-policy-test-case.sub.js'

        # For debug target only.
        self.sanity_checker_js = '/referrer-policy/generic/sanity-checker.js'
        self.spec_json_js = '/referrer-policy/spec_json.js'

        self.test_case_name = 'ReferrerPolicyTestCase'

        script_directory = os.path.dirname(os.path.abspath(__file__))
        self.spec_directory = os.path.abspath(
            os.path.join(script_directory, '..', '..'))


if __name__ == '__main__':
    generate.main(ReferrerPolicyConfig())
Пример #36
0
import generate


class UpgradeInsecureRequestsConfig(object):
    def __init__(self):
        self.selection_pattern = \
              '%(source_context_list)s.%(delivery_type)s/' + \
              '%(delivery_value)s/' + \
              '%(subresource)s/' + \
              '%(origin)s.%(redirection)s.%(source_scheme)s'

        self.test_file_path_pattern = 'gen/' + self.selection_pattern + '.html'

        self.test_description_template = 'Upgrade-Insecure-Requests: Expects %(expectation)s for %(subresource)s to %(origin)s origin and %(redirection)s redirection from %(source_scheme)s context.'

        self.test_page_title_template = 'Upgrade-Insecure-Requests: %s'

        self.helper_js = '/upgrade-insecure-requests/generic/test-case.sub.js'

        # For debug target only.
        self.sanity_checker_js = '/upgrade-insecure-requests/generic/sanity-checker.js'
        self.spec_json_js = '/upgrade-insecure-requests/spec_json.js'

        script_directory = os.path.dirname(os.path.abspath(__file__))
        self.spec_directory = os.path.abspath(
            os.path.join(script_directory, '..', '..'))


if __name__ == '__main__':
    generate.main(UpgradeInsecureRequestsConfig())
Пример #37
0
def initialize():
	make_next_dir(0)
	path = gen_dir(0)
	for i in xrange(mp.max_num_functions()):
		filename = os.path.join(path, str(i)+mp.mcode_suffix)
		generate.main(filename)
def run_doc_gen():
    import generate
    print()
    generate.main()
Пример #39
0
def gen_and_reprocess_nbest(args):
    if args.score_dict_dir is None:
        args.score_dict_dir = args.data
    if args.prefix_len is not None:
        assert args.right_to_left1 is False, "prefix length not compatible with right to left models"
        assert args.right_to_left2 is False, "prefix length not compatible with right to left models"

    if args.nbest_list is not None:
        assert args.score_model2 is None

    if args.backwards1:
        scorer1_src = args.target_lang
        scorer1_tgt = args.source_lang
    else:
        scorer1_src = args.source_lang
        scorer1_tgt = args.target_lang

    store_data = os.path.join(
        os.path.dirname(__file__)) + "/rerank_data/" + args.data_dir_name
    if not os.path.exists(store_data):
        os.makedirs(store_data)

    pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \
        backwards_preprocessed_dir, lm_preprocessed_dir = \
        rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset,
                                     args.gen_model_name, args.shard_id, args.num_shards,
                                     args.sampling, args.prefix_len, args.target_prefix_frac,
                                     args.source_prefix_frac)
    assert not (args.right_to_left1
                and args.backwards1), "backwards right to left not supported"
    assert not (args.right_to_left2
                and args.backwards2), "backwards right to left not supported"
    assert not (args.prefix_len is not None and args.target_prefix_frac is not None), \
        "target prefix frac and target prefix len incompatible"

    # make directory to store generation results
    if not os.path.exists(pre_gen):
        os.makedirs(pre_gen)

    rerank1_is_gen = args.gen_model == args.score_model1 and args.source_prefix_frac is None
    rerank2_is_gen = args.gen_model == args.score_model2 and args.source_prefix_frac is None

    if args.nbest_list is not None:
        rerank2_is_gen = True

    # make directories to store preprossed nbest list for reranking
    if not os.path.exists(left_to_right_preprocessed_dir):
        os.makedirs(left_to_right_preprocessed_dir)
    if not os.path.exists(right_to_left_preprocessed_dir):
        os.makedirs(right_to_left_preprocessed_dir)
    if not os.path.exists(lm_preprocessed_dir):
        os.makedirs(lm_preprocessed_dir)
    if not os.path.exists(backwards_preprocessed_dir):
        os.makedirs(backwards_preprocessed_dir)

    score1_file = rerank_utils.rescore_file_name(
        pre_gen,
        args.prefix_len,
        args.model1_name,
        target_prefix_frac=args.target_prefix_frac,
        source_prefix_frac=args.source_prefix_frac,
        backwards=args.backwards1)
    if args.score_model2 is not None:
        score2_file = rerank_utils.rescore_file_name(
            pre_gen,
            args.prefix_len,
            args.model2_name,
            target_prefix_frac=args.target_prefix_frac,
            source_prefix_frac=args.source_prefix_frac,
            backwards=args.backwards2)

    predictions_bpe_file = pre_gen + "/generate_output_bpe.txt"

    using_nbest = args.nbest_list is not None

    if using_nbest:
        print("Using predefined n-best list from interactive.py")
        predictions_bpe_file = args.nbest_list

    else:
        if not os.path.isfile(predictions_bpe_file):
            print(
                "STEP 1: generate predictions using the p(T|S) model with bpe")
            print(args.data)
            param1 = [
                args.data, "--path", args.gen_model, "--shard-id",
                str(args.shard_id), "--num-shards",
                str(args.num_shards), "--nbest",
                str(args.num_rescore), "--batch-size",
                str(args.batch_size), "--beam",
                str(args.num_rescore), "--max-sentences",
                str(args.num_rescore), "--gen-subset", args.gen_subset,
                "--source-lang", args.source_lang, "--target-lang",
                args.target_lang
            ]
            if args.sampling:
                param1 += ["--sampling"]

            gen_parser = options.get_generation_parser()
            input_args = options.parse_args_and_arch(gen_parser, param1)

            print(input_args)
            with open(predictions_bpe_file, 'w') as f:
                with redirect_stdout(f):
                    generate.main(input_args)

    gen_output = rerank_utils.BitextOutputFromGen(
        predictions_bpe_file,
        bpe_symbol=args.remove_bpe,
        nbest=using_nbest,
        prefix_len=args.prefix_len,
        target_prefix_frac=args.target_prefix_frac)

    if args.diff_bpe:
        rerank_utils.write_reprocessed(
            gen_output.no_bpe_source, gen_output.no_bpe_hypo,
            gen_output.no_bpe_target,
            pre_gen + "/source_gen_bpe." + args.source_lang,
            pre_gen + "/target_gen_bpe." + args.target_lang,
            pre_gen + "/reference_gen_bpe." + args.target_lang)
        bitext_bpe = args.rescore_bpe_code
        bpe_src_param = [
            "-c", bitext_bpe, "--input",
            pre_gen + "/source_gen_bpe." + args.source_lang, "--output",
            pre_gen + "/rescore_data." + args.source_lang
        ]
        bpe_tgt_param = [
            "-c", bitext_bpe, "--input",
            pre_gen + "/target_gen_bpe." + args.target_lang, "--output",
            pre_gen + "/rescore_data." + args.target_lang
        ]

        subprocess.call([
            "python",
            os.path.join(os.path.dirname(__file__),
                         "subword-nmt/subword_nmt/apply_bpe.py")
        ] + bpe_src_param,
                        shell=False)

        subprocess.call([
            "python",
            os.path.join(os.path.dirname(__file__),
                         "subword-nmt/subword_nmt/apply_bpe.py")
        ] + bpe_tgt_param,
                        shell=False)

    if (not os.path.isfile(score1_file) and not rerank1_is_gen) or \
            (args.score_model2 is not None and not os.path.isfile(score2_file) and not rerank2_is_gen):
        print(
            "STEP 2: process the output of generate.py so we have clean text files with the translations"
        )

        rescore_file = "/rescore_data"
        if args.prefix_len is not None:
            prefix_len_rescore_file = rescore_file + "prefix" + str(
                args.prefix_len)
        if args.target_prefix_frac is not None:
            target_prefix_frac_rescore_file = rescore_file + "target_prefix_frac" + str(
                args.target_prefix_frac)
        if args.source_prefix_frac is not None:
            source_prefix_frac_rescore_file = rescore_file + "source_prefix_frac" + str(
                args.source_prefix_frac)

        if not args.right_to_left1 or not args.right_to_left2:
            if not args.diff_bpe:
                rerank_utils.write_reprocessed(
                    gen_output.source,
                    gen_output.hypo,
                    gen_output.target,
                    pre_gen + rescore_file + "." + args.source_lang,
                    pre_gen + rescore_file + "." + args.target_lang,
                    pre_gen + "/reference_file",
                    bpe_symbol=args.remove_bpe)
                if args.prefix_len is not None:
                    bw_rescore_file = prefix_len_rescore_file
                    rerank_utils.write_reprocessed(
                        gen_output.source,
                        gen_output.hypo,
                        gen_output.target,
                        pre_gen + prefix_len_rescore_file + "." +
                        args.source_lang,
                        pre_gen + prefix_len_rescore_file + "." +
                        args.target_lang,
                        pre_gen + "/reference_file",
                        prefix_len=args.prefix_len,
                        bpe_symbol=args.remove_bpe)
                elif args.target_prefix_frac is not None:
                    bw_rescore_file = target_prefix_frac_rescore_file
                    rerank_utils.write_reprocessed(
                        gen_output.source,
                        gen_output.hypo,
                        gen_output.target,
                        pre_gen + target_prefix_frac_rescore_file + "." +
                        args.source_lang,
                        pre_gen + target_prefix_frac_rescore_file + "." +
                        args.target_lang,
                        pre_gen + "/reference_file",
                        bpe_symbol=args.remove_bpe,
                        target_prefix_frac=args.target_prefix_frac)
                else:
                    bw_rescore_file = rescore_file

                if args.source_prefix_frac is not None:
                    fw_rescore_file = source_prefix_frac_rescore_file
                    rerank_utils.write_reprocessed(
                        gen_output.source,
                        gen_output.hypo,
                        gen_output.target,
                        pre_gen + source_prefix_frac_rescore_file + "." +
                        args.source_lang,
                        pre_gen + source_prefix_frac_rescore_file + "." +
                        args.target_lang,
                        pre_gen + "/reference_file",
                        bpe_symbol=args.remove_bpe,
                        source_prefix_frac=args.source_prefix_frac)
                else:
                    fw_rescore_file = rescore_file

        if args.right_to_left1 or args.right_to_left2:
            rerank_utils.write_reprocessed(
                gen_output.source,
                gen_output.hypo,
                gen_output.target,
                pre_gen + "/right_to_left_rescore_data." + args.source_lang,
                pre_gen + "/right_to_left_rescore_data." + args.target_lang,
                pre_gen + "/right_to_left_reference_file",
                right_to_left=True,
                bpe_symbol=args.remove_bpe)

        print("STEP 3: binarize the translations")
        if not args.right_to_left1 or args.score_model2 is not None and not args.right_to_left2 or not rerank1_is_gen:

            if args.backwards1 or args.backwards2:
                if args.backwards_score_dict_dir is not None:
                    bw_dict = args.backwards_score_dict_dir
                else:
                    bw_dict = args.score_dict_dir
                bw_preprocess_param = [
                    "--source-lang", scorer1_src, "--target-lang", scorer1_tgt,
                    "--trainpref", pre_gen + bw_rescore_file, "--srcdict",
                    bw_dict + "/dict." + scorer1_src + ".txt", "--tgtdict",
                    bw_dict + "/dict." + scorer1_tgt + ".txt", "--destdir",
                    backwards_preprocessed_dir
                ]
                preprocess_parser = options.get_preprocessing_parser()
                input_args = preprocess_parser.parse_args(bw_preprocess_param)
                preprocess.main(input_args)

            preprocess_param = [
                "--source-lang", scorer1_src, "--target-lang", scorer1_tgt,
                "--trainpref", pre_gen + fw_rescore_file, "--srcdict",
                args.score_dict_dir + "/dict." + scorer1_src + ".txt",
                "--tgtdict",
                args.score_dict_dir + "/dict." + scorer1_tgt + ".txt",
                "--destdir", left_to_right_preprocessed_dir
            ]
            preprocess_parser = options.get_preprocessing_parser()
            input_args = preprocess_parser.parse_args(preprocess_param)
            preprocess.main(input_args)

        if args.right_to_left1 or args.right_to_left2:
            preprocess_param = [
                "--source-lang", scorer1_src, "--target-lang", scorer1_tgt,
                "--trainpref", pre_gen + "/right_to_left_rescore_data",
                "--srcdict",
                args.score_dict_dir + "/dict." + scorer1_src + ".txt",
                "--tgtdict",
                args.score_dict_dir + "/dict." + scorer1_tgt + ".txt",
                "--destdir", right_to_left_preprocessed_dir
            ]
            preprocess_parser = options.get_preprocessing_parser()
            input_args = preprocess_parser.parse_args(preprocess_param)
            preprocess.main(input_args)

    return gen_output
Пример #40
0
def run_doc_gen():
    sys.path.append(os.path.join(THIS_DIR, "doc"))
    import generate
    generate.main()
Пример #41
0
def run_doc_gen():
    import generate
    print()
    generate.main()
Пример #42
0
import generate


class MixedContentConfig(object):
    def __init__(self):
        self.selection_pattern = \
              '%(source_context_list)s.%(delivery_type)s/' + \
              '%(delivery_value)s/' + \
              '%(subresource)s/' + \
              '%(origin)s.%(redirection)s.%(source_scheme)s'

        self.test_file_path_pattern = 'gen/' + self.selection_pattern + '.html'

        self.test_description_template = 'Mixed-Content: Expects %(expectation)s for %(subresource)s to %(origin)s origin and %(redirection)s redirection from %(source_scheme)s context.'

        self.test_page_title_template = 'Mixed-Content: %s'

        self.helper_js = '/mixed-content/generic/test-case.sub.js'

        # For debug target only.
        self.sanity_checker_js = '/mixed-content/generic/sanity-checker.js'
        self.spec_json_js = '/mixed-content/spec_json.js'

        script_directory = os.path.dirname(os.path.abspath(__file__))
        self.spec_directory = os.path.abspath(
            os.path.join(script_directory, '..', '..'))


if __name__ == '__main__':
    generate.main(MixedContentConfig())
Пример #43
0
 def test_checking_good_config(self):
     output = di.main(config=TARGET_DIR, check=True,
                      environment=BASE_ENV_DIR)
     self.assertEqual(output, 'Configuration ok!')
def run_doc_gen():
    import generate
    generate.main()