예제 #1
0
    def test_deployment_parameters(self):
        tests = [
            {
                "parameter_list": [],
                "expected": {},
            },
            {
                "parameter_list": [['{"foo": "bar"}']],
                "expected": {"foo": "bar"},
            },
            {
                "parameter_list": [['{"foo": "bar"}', '{"baz": "blat"}']],
                "expected": {"foo": "bar", "baz": "blat"},
            },
            {
                "parameter_list": [['{"foo": "bar"}', '{"foo": "baz"}']],
                "expected": {"foo": "baz"},
            }
        ]

        for test in tests:
            from argparse import Namespace
            namespace = Namespace()
            namespace.parameters = test['parameter_list']
            validate_deployment_parameters(namespace)
            self.assertEqual(namespace.parameters, test['expected'])
예제 #2
0
    def add_inferred_output_like(self, data_param, task_param, name):
        """ This function adds entries to parameter objects to enable
        the evaluation action to automatically read in the output of a 
        previous inference run if inference is not explicitly specified.

        This can be used in an application if there is a data section
        entry in the configuration file that matches the inference output.
        In supervised learning, the reference data section would often
        match the inference output and could be used here. Otherwise, 
        a template data section could be used.

        :param data_param:
        :param task_param:
        :param name:  name of input parameter to copy parameters from
        :return: modified data_param and task_param
        """
        print(task_param)
        # Add the data parameter
        if 'inferred' not in data_param:
            data_name = vars(task_param)[name][0]
            inferred_param = Namespace(**vars(data_param[data_name]))
            inferred_param.csv_file = os.path.join(
                self.action_param.save_seg_dir, 'inferred.csv')
            data_param['inferred'] = inferred_param
        # Add the task parameter
        if 'inferred' not in task_param or len(task_param.inferred)==0:
            task_param.inferred = ('inferred',)
        return data_param, task_param
예제 #3
0
    def run_b2g_test(self, context, tests=None, suite='mochitest', **kwargs):
        """Runs a b2g mochitest."""
        if context.target_out:
            host_webapps_dir = os.path.join(context.target_out, 'data', 'local', 'webapps')
            if not os.path.isdir(os.path.join(
                    host_webapps_dir, 'test-container.gaiamobile.org')):
                print(ENG_BUILD_REQUIRED.format(host_webapps_dir))
                sys.exit(1)

        # TODO without os.chdir, chained imports fail below
        os.chdir(self.mochitest_dir)

        # The imp module can spew warnings if the modules below have
        # already been imported, ignore them.
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')

            import imp
            path = os.path.join(self.mochitest_dir, 'runtestsb2g.py')
            with open(path, 'r') as fh:
                imp.load_module('mochitest', fh, path,
                                ('.py', 'r', imp.PY_SOURCE))

            import mochitest

        options = Namespace(**kwargs)

        from manifestparser import TestManifest
        if tests:
            manifest = TestManifest()
            manifest.tests.extend(tests)
            options.manifestFile = manifest

        return mochitest.run_test_harness(options)
예제 #4
0
def test_purge_force():
    global tempdir
    # Generate 1 backup file per day from Thu. 10th January 2013,
    # to Tue 15th January 2013
    backup_files = []
    start_date = int(datetime(2013, 1, 10, 2, 0, 0).strftime("%s"))
    for day_count in range(6):
        tstamp = start_date + (day_count * 86400)
        (fh1, path) = mkstemp(dir=tempdir)
        os.utime(path, (tstamp, tstamp))
        backup_files.append(path)
    # No file in the backup_dir will match these rules
    args = Namespace(backup_dir=tempdir,
                     days_retention=0,
                     weeks_retention=3,
                     months_retention=2,
                     dom=5,
                     dow=3,
                     force=False,
                     noop=False)
    # Will it blend ?
    assert(purge.run(args) != 0)
    for backup_file in backup_files:
        assert(os.path.exists(backup_file))

    # Now, lets try with force=True
    args.force = True
    assert(purge.run(args) == 0)
    for backup_file in backup_files:
        assert(not os.path.exists(backup_file))
예제 #5
0
    def test_dry_run_no_changes(self, mock_file_config, mock_parse_args, initialize,
                                mocked_apply_migration, mock_entry, getLogger, mock_ensure_indexes):
        logger = MagicMock()
        getLogger.return_value = logger
        mock_args = Namespace(dry_run=True, test=False)
        mock_parse_args.return_value = mock_args

        # Test that when dry run is on, it returns 1 if migrations remain
        exit_code = manage.main()
        self.assertEqual(exit_code, 1)
        self.assertFalse(mock_ensure_indexes.called)
        initialize.assert_called_once_with(max_timeout=1)

        # Actually apply the migrations
        mock_args.dry_run = False
        mock_ensure_indexes.reset_mock()
        initialize.reset_mock()
        exit_code = manage.main()
        self.assertEqual(exit_code, 0)
        self.assertTrue(mock_ensure_indexes.called)
        initialize.assert_called_once_with(max_timeout=1)

        # Perform another dry run and check the return value is now 0
        mock_args.dry_run = True
        mock_ensure_indexes.reset_mock()
        initialize.reset_mock()
        exit_code = manage.main()
        self.assertEquals(exit_code, 0)
        self.assertFalse(mock_ensure_indexes.called)
        initialize.assert_called_once_with(max_timeout=1)
예제 #6
0
파일: tmx2.py 프로젝트: pulkomandy/bitbox
    def load_objects(self) :
        for objectgroup_elt in self.root.findall('objectgroup') :
            groupname=objectgroup_elt.get('name')
            if groupname[0]=='_' : continue # skip
            
            # only get GID ones ie tile objects
            for o_elt in objectgroup_elt.findall('object[@gid]') : 
                obj = Namespace (
                    x=int(float(o_elt.get('x'))),
                    y=int(float(o_elt.get('y')))-int(float(o_elt.get('height'))),
                    gid=int(o_elt.get('gid')),

                    name=o_elt.get('name',None), # object name 
                    type=o_elt.get('type',None), # type
                    group=groupname,  # group
                    )

                # find tilesheet
                obj.sprite=self.find_spritesheet(obj.gid)
                obj.tid=obj.gid-obj.sprite

                # check that the gid of this object one of a state
                if obj.tid not in set(st.tid for st in self.spritesheets[obj.sprite].states) : 
                    error("Tile ID of object %s is not a state of sprite sheet %s"%(obj,self.spritesheets[obj.sprite]))
                
                self.objects.append(obj)

            # now group by spritesheet
            self.objects.sort(key=lambda x:x.tid)
예제 #7
0
 def test_main(self, mock_argparse, mock_copy2):
     from argparse import Namespace
     n = Namespace()
     n.s = True
     mock_argparse.ArgumentParser.return_value.parse_args = lambda: n
     from imgurup import main
     main()
def run_mochitest(context, **kwargs):
    from mochitest_options import ALL_FLAVORS
    flavor = kwargs.get('flavor') or 'mochitest'
    if flavor not in ALL_FLAVORS:
        for fname, fobj in ALL_FLAVORS.iteritems():
            if flavor in fobj['aliases']:
                flavor = fname
                break
    fobj = ALL_FLAVORS[flavor]
    kwargs.update(fobj.get('extra_args', {}))

    args = Namespace(**kwargs)
    args.e10s = context.mozharness_config.get('e10s', args.e10s)
    args.certPath = context.certs_dir

    if args.test_paths:
        install_subdir = fobj.get('install_subdir', fobj['suite'])
        test_root = os.path.join(context.package_root, 'mochitest', install_subdir)
        normalize = partial(context.normalize_test_path, test_root)
        args.test_paths = map(normalize, args.test_paths)

    import mozinfo
    if mozinfo.info.get('buildapp') == 'mobile/android':
        return run_mochitest_android(context, args)
    return run_mochitest_desktop(context, args)
예제 #9
0
    def run_desktop_test(self, **kwargs):
        """Runs a reftest, in desktop Firefox."""
        import runreftest

        args = Namespace(**kwargs)
        if args.suite not in ("reftest", "crashtest", "jstestbrowser"):
            raise Exception("None or unrecognized reftest suite type.")

        default_manifest = {
            "reftest": (self.topsrcdir, "layout", "reftests", "reftest.list"),
            "crashtest": (self.topsrcdir, "testing", "crashtest", "crashtests.list"),
            "jstestbrowser": (self.topobjdir, "dist", "test-stage", "jsreftest", "tests", "jstests.list"),
        }

        args.extraProfileFiles.append(os.path.join(self.topobjdir, "dist", "plugins"))
        args.symbolsPath = os.path.join(self.topobjdir, "crashreporter-symbols")

        if not args.tests:
            args.tests = [os.path.join(*default_manifest[args.suite])]

        if args.suite == "jstestbrowser":
            args.extraProfileFiles.append(
                os.path.join(self.topobjdir, "dist", "test-stage", "jsreftest", "tests", "user.js")
            )

        self.log_manager.enable_unstructured()
        try:
            rv = runreftest.run_test_harness(parser, args)
        finally:
            self.log_manager.disable_unstructured()

        return rv
예제 #10
0
    def test_doRealInit(self, mock_read_defaults, mock_open, mock_update_reference, mock_parse_recipe, mock_execute, mock_os, mock_path,  mock_info, mock_banner):
      fake_dist = {"repo": "alisw/alidist", "ver": "master"}
      mock_open.side_effect = lambda x: {
        "/alidist/defaults-release.sh": StringIO("package: defaults-release\nversion: v1\n---"),
        "/alidist/aliroot.sh": StringIO("package: AliRoot\nversion: master\nsource: https://github.com/alisw/AliRoot\n---")
      }[x]
      mock_execute.side_effect = can_do_git_clone
      mock_parse_recipe.side_effect = valid_recipe
      mock_path.exists.side_effect = dummy_exists
      mock_os.mkdir.return_value = None
      mock_path.join.side_effect = path.join
      mock_read_defaults.return_value = (OrderedDict({"package": "defaults-release", "disable": []}), "")
      args = Namespace(
        develPrefix = ".",
        configDir = "/alidist",
        pkgname = "AliRoot@v5-08-00",
        referenceSources = "/sw/MIRROR",
        dist = fake_dist,
        defaults = "release",
        dryRun = False,
        fetchRepos = False
      )
      doInit(args)
      mock_execute.assert_called_with("git clone https://github.com/alisw/AliRoot -b v5-08-00 --reference /sw/MIRROR/aliroot ./AliRoot && cd ./AliRoot && git remote set-url --push origin https://github.com/alisw/AliRoot")
      self.assertEqual(mock_execute.mock_calls, CLONE_EVERYTHING)
      mock_path.exists.assert_has_calls([call('.'), call('/sw/MIRROR'), call('/alidist'), call('./AliRoot')])

      # Force fetch repos
      mock_execute.reset_mock()
      mock_path.reset_mock()
      args.fetchRepos = True
      doInit(args)
      mock_execute.assert_called_with("git clone https://github.com/alisw/AliRoot -b v5-08-00 --reference /sw/MIRROR/aliroot ./AliRoot && cd ./AliRoot && git remote set-url --push origin https://github.com/alisw/AliRoot")
      self.assertEqual(mock_execute.mock_calls, CLONE_EVERYTHING)
      mock_path.exists.assert_has_calls([call('.'), call('/sw/MIRROR'), call('/alidist'), call('./AliRoot')])
예제 #11
0
    def create_test_runner(self, module_text, parameters, parameter_text, environment):
        """Run create test for a given environment."""
        args = Namespace()
        args.name = 'test'
        args.environments = [environment]
        migrator_dir = Path(self.dir, environment)
        migrator_dir.mkdir()
        migrator.main.create(args)
        expected = """\"\"\"{0}\"\"\"


def up({1}):
    \"\"\"
    Run up migration.

    {2}
    \"\"\"
    pass


def down({1}):
    \"\"\"
    Run down migration (rollback).

    {2}
    \"\"\"
    pass
""".format(module_text, ', '.join(parameters), parameter_text)
        found_files = 0
        for entry in migrator_dir.iterdir():
            with entry.open() as open_file:
                self.assertEqual(expected, open_file.read())
            found_files += 1
        self.assertEqual(1, found_files)
    def test_migrate_master(self):
        environment = 'master'
        self.setup_test(environment)
        args = Namespace()
        args.direction = 'up'
        args.initial = False
        args.single = False
        args.config = None

        create_migration(self.database, self.dir, environment, '01_test2.py', 0)
        create_migration(self.database, self.dir, environment, '02_test2.py', 0)
        migrator.main.migrate_environment(self.database, environment, args)
        self.assertEqual("""Running up migrations for master...  01_test2
  02_test2
DONE
""", sys.stdout.getvalue())
        rows = self.database.session.query(self.database.migration_table).all()
        expected_rows = ['01_test2', '02_test2']
        self.assertEqual(len(rows), len(expected_rows))
        for i in range(len(rows)):
            row = rows[i]
            self.assertEqual(expected_rows[i], row.id)
            self.assertEqual(1, row.status)
            self.assertIsNotNone(row.commit_time)
            self.assertTrue(Path(self.dir, expected_rows[i] + '.py.up.txt').exists())
    def get_url(self, obj, view_name, request, url_format):
        new_obj = Namespace()
        new_obj.pk = self.get_cipher().encode(obj.pk)

        parent = super(EncryptedLookupHyperlinkedRelatedField, self)

        return parent.get_url(new_obj, view_name, request, url_format)
예제 #14
0
    def test_ConfigFileTask(self):
        """Simple test case for config overrides in file
        """
        # make simple pipeline
        actions = [cmdLineParser._ACTION_ADD_TASK("TaskOne:task"),
                   cmdLineParser._ACTION_CONFIG("task.field=value")]
        args = Namespace(instrument_overrides=False,
                         pipeline=None,
                         pipeline_actions=actions,
                         order_pipeline=False)
        pipeline = self.builder.makePipeline(args)
        self.assertEqual(pipeline[0].config.field, "value")

        # save config to file for next test
        overrides = NamedTemporaryFile(mode="wt", delete=False)
        pipeline[0].config.saveToStream(overrides)
        fname = overrides.name
        del overrides

        args.pipeline_actions = [cmdLineParser._ACTION_ADD_TASK("TaskOne:task"),
                                 cmdLineParser._ACTION_CONFIG_FILE("task:" + fname)]
        pipeline = self.builder.makePipeline(args)
        self.assertEqual(pipeline[0].config.field, "value")

        os.unlink(fname)

        # unknown label should raise LookupError
        args.pipeline_actions = [cmdLineParser._ACTION_ADD_TASK("TaskOne:task"),
                                 cmdLineParser._ACTION_CONFIG_FILE("label:/dev/null")]
        with self.assertRaises(LookupError):
            self.builder.makePipeline(args)
예제 #15
0
    def test_DeleteTask(self):
        """Simple test case removing tasks
        """
        # make short pipeline
        actions = [cmdLineParser._ACTION_ADD_TASK("TaskOne"),
                   cmdLineParser._ACTION_ADD_TASK("TaskTwo"),
                   cmdLineParser._ACTION_ADD_TASK("TaskOne:label"),
                   cmdLineParser._ACTION_ADD_TASK("TaskTwo:label2")]
        args = Namespace(instrument_overrides=False,
                         pipeline=None,
                         pipeline_actions=actions,
                         order_pipeline=False)
        pipeline = self.builder.makePipeline(args)
        self.assertEqual(len(pipeline), 4)

        args.pipeline_actions += [cmdLineParser._ACTION_DELETE_TASK("label")]
        pipeline = self.builder.makePipeline(args)
        self.assertEqual(len(pipeline), 3)

        args.pipeline_actions += [cmdLineParser._ACTION_DELETE_TASK("TaskTwo")]
        pipeline = self.builder.makePipeline(args)
        self.assertEqual(len(pipeline), 2)

        args.pipeline_actions += [cmdLineParser._ACTION_DELETE_TASK("TaskOne"),
                                  cmdLineParser._ACTION_DELETE_TASK("label2")]
        pipeline = self.builder.makePipeline(args)
        self.assertEqual(len(pipeline), 0)

        # unknown label should raise LookupError
        args.pipeline_actions = [cmdLineParser._ACTION_ADD_TASK("TaskOne"),
                                 cmdLineParser._ACTION_ADD_TASK("TaskTwo"),
                                 cmdLineParser._ACTION_DELETE_TASK("label2")]
        with self.assertRaises(LookupError):
            self.builder.makePipeline(args)
예제 #16
0
def normalize_configuration(cfg: Namespace, train_mode: bool) -> None:
    """Given a configuration namespace, normalize the values it contains.

    Arguments:
        cfg: The namespace object returned by `Configuration.make_namespace`
        train_mode: Boolean flag controlling normalization of parameters only
            used during training.
    """
    if train_mode:
        _normalize_train_cfg(cfg)

    if cfg.tf_manager is None:
        cfg.tf_manager = get_default_tf_manager()

    cfg.evaluation = [(e[0], e[0], e[1]) if len(e) == 2 else e
                      for e in cfg.evaluation]

    if cfg.evaluation:
        cfg.main_metric = "{}/{}".format(cfg.evaluation[-1][0],
                                         cfg.evaluation[-1][-1].name)
    else:
        cfg.main_metric = "{}/{}".format(cfg.runners[-1].decoder_data_id,
                                         cfg.runners[-1].loss_names[0])

        if not cfg.tf_manager.minimize_metric:
            raise ValueError("minimize_metric must be set to True in "
                             "TensorFlowManager when using loss as "
                             "the main metric")
예제 #17
0
    def run_android_test(self, context, tests, suite=None, **kwargs):
        host_ret = verify_host_bin()
        if host_ret != 0:
            return host_ret

        import imp
        path = os.path.join(self.mochitest_dir, 'runtestsremote.py')
        with open(path, 'r') as fh:
            imp.load_module('runtestsremote', fh, path,
                            ('.py', 'r', imp.PY_SOURCE))
        import runtestsremote

        from mozrunner.devices.android_device import get_adb_path
        if not kwargs['adbPath']:
            kwargs['adbPath'] = get_adb_path(self)

        options = Namespace(**kwargs)

        from manifestparser import TestManifest
        if tests and not options.manifestFile:
            manifest = TestManifest()
            manifest.tests.extend(tests)
            options.manifestFile = manifest

        return runtestsremote.run_test_harness(parser, options)
예제 #18
0
def test_multi_position():
    args = Namespace(**args_dict)
    args.locked = ['Eli Manning']
    roster = run(NFL, args)
    multi_pos = [p for p in roster.players if p.name == 'Eli Manning']
    ntools.assert_equal(len(multi_pos), 1)
    ntools.assert_equal(multi_pos[0].pos, 'TE')
예제 #19
0
파일: test_parse.py 프로젝트: cedadev/cis
    def test_output_file_matches_an_existing_file_with_no_force_overwrite_will_prompt(self):
        from cis.parse import _file_already_exists_and_no_overwrite
        from argparse import Namespace
        from tempfile import NamedTemporaryFile

        with NamedTemporaryFile() as tmpfile:
            existing_file = tmpfile.name
            arguments = Namespace(force_overwrite=False)
            # Test output file is the same as input file
            arguments.output = existing_file

            # Choose yes to overwrite - so return false
            with patch('six.moves.input', return_value='y'):
                assert not _file_already_exists_and_no_overwrite(arguments)

            # Choose no to not overwrite - return True
            with patch('six.moves.input', return_value='n'):
                assert _file_already_exists_and_no_overwrite(arguments)

            # Choose yes, eventually
            with patch('six.moves.input', side_effect=['blah', 'blah', 'y']):
                assert not _file_already_exists_and_no_overwrite(arguments)

            # Choose the default (no)
            with patch('six.moves.input', return_value=''):
                assert _file_already_exists_and_no_overwrite(arguments)

            # Choose the default, eventually
            with patch('six.moves.input', side_effect=['yo', 'nope', '']):
                assert _file_already_exists_and_no_overwrite(arguments)
예제 #20
0
def test_banned_constraint():
    args = Namespace(**args_dict)
    jg = 'Jimmy Garoppolo'
    args.teams = ['NE', 'Dal']
    args.banned = [jg]
    roster = run(NFL, args)
    ntools.assert_not_in(jg, [p.name for p in roster.players])
예제 #21
0
def test_within_avg():
    args = Namespace(**args_dict)
    avg_test_val = 3
    args.v_avg = avg_test_val
    roster = run(NFL, args)
    for player in roster.players:
        ntools.assert_less(abs(player.v_avg), avg_test_val)
예제 #22
0
def run_external_media_test(tests, testtype=None, topsrcdir=None, **kwargs):
    from external_media_harness.runtests import (
        FirefoxMediaHarness,
        MediaTestArguments,
        MediaTestRunner,
        mn_cli,
    )

    from mozlog.structured import commandline

    from argparse import Namespace

    parser = setup_argument_parser()

    if not tests:
        tests = [os.path.join(topsrcdir,
                 'dom/media/test/external/external_media_tests/manifest.ini')]

    args = Namespace(tests=tests)

    for k, v in kwargs.iteritems():
        setattr(args, k, v)

    parser.verify_usage(args)

    args.logger = commandline.setup_logging("Firefox External Media Tests",
                                            args,
                                            {"mach": sys.stdout})
    failed = mn_cli(MediaTestRunner, MediaTestArguments, FirefoxMediaHarness,
                    args=vars(args))

    if failed > 0:
        return 1
    else:
        return 0
    def test_missing_migration_no_file(self):
        environment = 'master'
        self.setup_test(environment)
        args = Namespace()
        args.direction = 'down'
        args.config = SimpleNamespace()
        install_path = Path(self.dir, 'install')
        Path(install_path, 'migrations', environment).mkdir(parents=True)
        args.config.submitty = {
            'submitty_install_dir': str(install_path)
        }

        create_migration(self.database, self.dir, environment, '01_test6.py')
        create_migration(self.database, self.dir, environment, '02_test6.py', 1, False)
        create_migration(self.database, self.dir, environment, '03_test6.py')
        create_migration(self.database, self.dir, environment, '04_test6.py', 0)
        migrator.main.migrate_environment(self.database, environment, args)
        self.assertEqual("""Running down migrations for master...
Removing 1 missing migrations:
  02_test6

  03_test6
DONE

""", sys.stdout.getvalue())
        rows = self.database.session.query(self.database.migration_table).all()
        expected_rows = ['01_test6', '03_test6', '04_test6']
        self.assertEqual(len(rows), len(expected_rows))
        for i in range(len(rows)):
            row = rows[i]
            self.assertEqual(expected_rows[i], row.id)
            self.assertEqual(1 if i < 1 else 0, row.status)
            self.assertIsNotNone(row.commit_time)
            down_file = expected_rows[i] + '.py.down.txt'
            self.assertEqual(i == 1, Path(self.dir, down_file).exists())
    def test_rollback_course(self):
        environment = 'course'
        self.setup_test(environment)
        args = Namespace()
        args.direction = 'down'
        args.semester = 'f18'
        args.course = 'csci1100'
        args.config = None

        create_migration(self.database, self.dir, environment, '01_test3.py')
        create_migration(self.database, self.dir, environment, '02_test3.py')
        migrator.main.migrate_environment(self.database, environment, args)
        self.assertEqual("""Running down migrations for f18.csci1100...  02_test3
DONE
""", sys.stdout.getvalue())
        rows = self.database.session.query(self.database.migration_table).all()
        expected_rows = ['01_test3', '02_test3']
        self.assertEqual(len(rows), len(expected_rows))
        for i in range(len(rows)):
            row = rows[i]
            self.assertEqual(expected_rows[i], row.id)
            self.assertEqual(1 if i < 1 else 0, row.status)
            self.assertIsNotNone(row.commit_time)
            down_file = expected_rows[i] + '.py.down.txt'
            self.assertEqual(i == 1, Path(self.dir, down_file).exists())
예제 #25
0
    def test_get_game_session_log(self):
        session_id = "mysessionid"
        save_as = os.path.join(self.file_creator.rootdir, "mylog")

        args = ["--game-session-id", session_id, "--save-as", save_as]
        global_args = Namespace()
        global_args.region = "us-west-2"
        global_args.endpoint_url = None
        global_args.verify_ssl = None

        presigned_url = "mypresignedurl"
        self.client.get_game_session_log_url.return_value = {"PreSignedUrl": presigned_url}

        # Call the command
        self.cmd(args, global_args)

        # Ensure the client was created properly
        self.mock_create_client.assert_called_once_with(
            "gamelift", region_name="us-west-2", endpoint_url=None, verify=None
        )

        # Ensure the client was called correctly
        self.client.get_game_session_log_url.assert_called_once_with(GameSessionId=session_id)

        # Ensure the presigned url was used
        self.urlopen_mock.assert_called_once_with(presigned_url)

        # Ensure the contents were saved to the file
        with open(save_as, "rb") as f:
            self.assertEqual(f.read(), self.contents)
예제 #26
0
def set_default_args(args=None):
    """Set the default test arguments
    Create a Namespace (`args`) and add all the default test arguments
    to the Namespace.  If an `args` Namespace is passed the default 
    test arguments are added if they are missing from the passed `args`.
    The passed `args` existing attributes are unchanged.
    """
    assert args is None or isinstance(args, Namespace)

    if args is None:
        args = Namespace(trace=False, vtrace=False)

    ipth = './test_images/color/'
    ipth = os.path.join(ipth, 'small4.png')

    default_args = (
        ('trace', False), ('vtrace', False), ('vtrace_level', 0),
        ('vtrace_module', 'tb_jpegenc'), ('imgfn', ipth), ('build_only', False),
        ('build_skip_v1', False), ('nout', 0), ('no_wait', False),
        ('dump_bitstreams', False), ('ncyc', 200), )
                    
    # for each of the arguments, if the args doesn't have the attribute
    # add it with the default value above.
    for arg, val in default_args:        
        if not hasattr(args, arg):
            setattr(args, arg, val)

    args.start_time = datetime.datetime.now()

    return args
예제 #27
0
 def _prepare_result(self, node, result):
     result_obj = Namespace()
     key, id = node.split(':')
     result_obj.id = id
     result_obj.key = key
     result_obj.result = result
     return result_obj
예제 #28
0
    def test_vm_nics(self):

        from argparse import Namespace
        from azure.cli.command_modules.vm._validators import _validate_vm_create_nics

        for i in range(0, 100):
            ns = Namespace()
            ns.resource_group_name = 'rg'
            ns.nics = ['nic1', 'nic2']
            _validate_vm_create_nics(ns)

            nic1_expected = {
                "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic1",
                "properties": {
                    "primary": True
                }
            }

            nic2_expected = {
                "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic2",
                "properties": {
                    "primary": False
                }
            }
            self.assertEqual(ns.nics[0], nic1_expected)
            self.assertEqual(ns.nics[1], nic2_expected)
예제 #29
0
    def run(self, workflow_input, project=None, folder="/", name=None, **kwargs):
        '''
        :param workflow_input: Hash of the workflow's input arguments, with keys equal to "N.name" where N is the stage number and name is the name of the input, e.g. "0.reads" if the first stage takes in an input called "reads"
        :type workflow_input: dict
        :param project: Project ID in which to run the jobs (project context)
        :type project: string
        :param folder: Folder in which the workflow's outputs will be placed in *project*
        :type folder: string
        :param name: String to append to the default job name for each job (default is the workflow's name)
        :type name: string
        :returns: list of job IDs in order of the stages

        Run each stage in the associated workflow
        '''

        workflow_name = self.describe()['name']
        workflow_spec = self.get_details()
        if workflow_spec.get('version') not in range(2, 6):
            raise DXError("Unrecognized workflow version {v} in {w}\n".format(v=workflow_spec.get('version', '<none>'), w=self))

        launched_jobs = OrderedDict()
        for stage in workflow_spec['stages']:
            launched_jobs[stage['id']] = None

        for k in range(len(workflow_spec['stages'])):
            workflow_spec['stages'][k].setdefault('key', str(k))
            for i in workflow_spec['stages'][k].get('inputs', {}).keys():
                if workflow_spec['stages'][k]['inputs'][i] == "":
                    del workflow_spec['stages'][k]['inputs'][i]

        for stage in workflow_spec['stages']:
            inputs_from_stage = {k: stage_to_job_refs(v, launched_jobs) for k, v in stage['inputs'].iteritems() if v is not None}

            exec_id = stage['app']['id'] if 'id' in stage['app'] else stage['app']
            if isinstance(exec_id, dict) and '$dnanexus_link' in exec_id:
                exec_id = exec_id['$dnanexus_link']
            if exec_id.startswith('app-'):
                from dxpy.utils.resolver import get_app_from_path
                exec_id = get_app_from_path(exec_id)['id']

            executable = get_handler(exec_id)
            job_name = executable.describe()['title']
            job_name += ' - ' + (name if name is not None else workflow_name)

            exec_inputs = ExecutableInputs(executable, input_name_prefix=str(stage['key'])+".")
            exec_inputs.update(inputs_from_stage, strip_prefix=False)
            fake_args = Namespace()
            fake_args.filename = None
            fake_args.input = None
            fake_args.input_spec = None
            fake_args.input_json = json.dumps(workflow_input)
            exec_inputs.update_from_args(fake_args)
            input_json = exec_inputs.inputs

            launched_jobs[stage['id']] = executable.run(input_json, project=project, folder=folder,
                                                        name=job_name,
                                                        **kwargs)

        return launched_jobs.values()
예제 #30
0
 def setUp(self):
     SolrTestCase.setUp(self)
     SitemapTestCase.setUp(self)
     args = Namespace(tika=None, solr=None,
                      slacktoken=None, slackchannel=None)
     args.config = BASIC_CONFIG
     self.config = deepcopy(get_config(args))
     self.config.url_field = 'url'
예제 #31
0
    def recog(self,
              enc_output,
              recog_args,
              char_list=None,
              rnnlm=None,
              use_jit=False):
        """Recognize input speech of each speaker.

        :param ndnarray enc_output: encoder outputs (B, T, D) or (T, D)
        :param Namespace recog_args: argment Namespace contraining options
        :param list char_list: list of characters
        :param torch.nn.Module rnnlm: language model module
        :return: N-best decoding results
        :rtype: list
        """
        if recog_args.ctc_weight > 0.0:
            lpz = self.ctc.log_softmax(enc_output)
            lpz = lpz.squeeze(0)
        else:
            lpz = None

        h = enc_output.squeeze(0)

        logging.info("input lengths: " + str(h.size(0)))
        # search parms
        beam = recog_args.beam_size
        penalty = recog_args.penalty
        ctc_weight = recog_args.ctc_weight

        # preprare sos
        y = self.sos
        vy = h.new_zeros(1).long()

        if recog_args.maxlenratio == 0:
            maxlen = h.shape[0]
        else:
            # maxlen >= 1
            maxlen = max(1, int(recog_args.maxlenratio * h.size(0)))
        minlen = int(recog_args.minlenratio * h.size(0))
        logging.info("max output length: " + str(maxlen))
        logging.info("min output length: " + str(minlen))

        # initialize hypothesis
        if rnnlm:
            hyp = {"score": 0.0, "yseq": [y], "rnnlm_prev": None}
        else:
            hyp = {"score": 0.0, "yseq": [y]}
        if lpz is not None:
            ctc_prefix_score = CTCPrefixScore(lpz.detach().numpy(), 0,
                                              self.eos, numpy)
            hyp["ctc_state_prev"] = ctc_prefix_score.initial_state()
            hyp["ctc_score_prev"] = 0.0
            if ctc_weight != 1.0:
                # pre-pruning based on attention scores
                ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO))
            else:
                ctc_beam = lpz.shape[-1]
        hyps = [hyp]
        ended_hyps = []

        import six

        traced_decoder = None
        for i in six.moves.range(maxlen):
            logging.debug("position " + str(i))

            hyps_best_kept = []
            for hyp in hyps:
                vy[0] = hyp["yseq"][i]

                # get nbest local scores and their ids
                ys_mask = subsequent_mask(i + 1).unsqueeze(0)
                ys = torch.tensor(hyp["yseq"]).unsqueeze(0)
                # FIXME: jit does not match non-jit result
                if use_jit:
                    if traced_decoder is None:
                        traced_decoder = torch.jit.trace(
                            self.decoder.forward_one_step,
                            (ys, ys_mask, enc_output))
                    local_att_scores = traced_decoder(ys, ys_mask,
                                                      enc_output)[0]
                else:
                    local_att_scores = self.decoder.forward_one_step(
                        ys, ys_mask, enc_output)[0]

                if rnnlm:
                    rnnlm_state, local_lm_scores = rnnlm.predict(
                        hyp["rnnlm_prev"], vy)
                    local_scores = (local_att_scores +
                                    recog_args.lm_weight * local_lm_scores)
                else:
                    local_scores = local_att_scores

                if lpz is not None:
                    local_best_scores, local_best_ids = torch.topk(
                        local_att_scores, ctc_beam, dim=1)
                    ctc_scores, ctc_states = ctc_prefix_score(
                        hyp["yseq"], local_best_ids[0], hyp["ctc_state_prev"])
                    local_scores = (
                        1.0 - ctc_weight) * local_att_scores[:, local_best_ids[
                            0]] + ctc_weight * torch.from_numpy(
                                ctc_scores - hyp["ctc_score_prev"])
                    if rnnlm:
                        local_scores += (recog_args.lm_weight *
                                         local_lm_scores[:, local_best_ids[0]])
                    local_best_scores, joint_best_ids = torch.topk(
                        local_scores, beam, dim=1)
                    local_best_ids = local_best_ids[:, joint_best_ids[0]]
                else:
                    local_best_scores, local_best_ids = torch.topk(
                        local_scores, beam, dim=1)

                for j in six.moves.range(beam):
                    new_hyp = {}
                    new_hyp["score"] = hyp["score"] + float(
                        local_best_scores[0, j])
                    new_hyp["yseq"] = [0] * (1 + len(hyp["yseq"]))
                    new_hyp["yseq"][:len(hyp["yseq"])] = hyp["yseq"]
                    new_hyp["yseq"][len(hyp["yseq"])] = int(local_best_ids[0,
                                                                           j])
                    if rnnlm:
                        new_hyp["rnnlm_prev"] = rnnlm_state
                    if lpz is not None:
                        new_hyp["ctc_state_prev"] = ctc_states[joint_best_ids[
                            0, j]]
                        new_hyp["ctc_score_prev"] = ctc_scores[joint_best_ids[
                            0, j]]
                    # will be (2 x beam) hyps at most
                    hyps_best_kept.append(new_hyp)

                hyps_best_kept = sorted(hyps_best_kept,
                                        key=lambda x: x["score"],
                                        reverse=True)[:beam]

            # sort and get nbest
            hyps = hyps_best_kept
            logging.debug("number of pruned hypothes: " + str(len(hyps)))
            if char_list is not None:
                logging.debug(
                    "best hypo: " +
                    "".join([char_list[int(x)] for x in hyps[0]["yseq"][1:]]))

            # add eos in the final loop to avoid that there are no ended hyps
            if i == maxlen - 1:
                logging.info("adding <eos> in the last postion in the loop")
                for hyp in hyps:
                    hyp["yseq"].append(self.eos)

            # add ended hypothes to a final list, and removed them from current hypothes
            # (this will be a probmlem, number of hyps < beam)
            remained_hyps = []
            for hyp in hyps:
                if hyp["yseq"][-1] == self.eos:
                    # only store the sequence that has more than minlen outputs
                    # also add penalty
                    if len(hyp["yseq"]) > minlen:
                        hyp["score"] += (i + 1) * penalty
                        if rnnlm:  # Word LM needs to add final <eos> score
                            hyp["score"] += recog_args.lm_weight * rnnlm.final(
                                hyp["rnnlm_prev"])
                        ended_hyps.append(hyp)
                else:
                    remained_hyps.append(hyp)

            # end detection

            if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
                logging.info("end detected at %d", i)
                break

            hyps = remained_hyps
            if len(hyps) > 0:
                logging.debug("remeined hypothes: " + str(len(hyps)))
            else:
                logging.info("no hypothesis. Finish decoding.")
                break

            if char_list is not None:
                for hyp in hyps:
                    logging.debug(
                        "hypo: " +
                        "".join([char_list[int(x)] for x in hyp["yseq"][1:]]))

            logging.debug("number of ended hypothes: " + str(len(ended_hyps)))

        nbest_hyps = sorted(
            ended_hyps, key=lambda x: x["score"],
            reverse=True)[:min(len(ended_hyps), recog_args.nbest)]

        # check number of hypotheis
        if len(nbest_hyps) == 0:
            logging.warning("there is no N-best results, perform recognition "
                            "again with smaller minlenratio.")
            # should copy becasuse Namespace will be overwritten globally
            recog_args = Namespace(**vars(recog_args))
            recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
            return self.recog(enc_output, recog_args, char_list, rnnlm)

        logging.info("total log probability: " + str(nbest_hyps[0]["score"]))
        logging.info("normalized log probability: " +
                     str(nbest_hyps[0]["score"] / len(nbest_hyps[0]["yseq"])))
        return nbest_hyps
예제 #32
0
def ReadConfig(args):
    instances = []
    args.cmds = []

    i = 0
    instance = 0
    iargs = args
    debug = args.debug

    accept, reject = _processMutuallyExclusiveOptions(args)

    cfgFile = os.path.expanduser(args.config)
    if not os.path.exists(cfgFile):
        args.instances = instances
        return args
    with open(cfgFile) as f:
        for line in f:
            line = line.lstrip().rstrip()
            if line.startswith('#') or line == '':
                continue
            if line.startswith('['):
                instName = line[1:-1]
                instance += 1
                iargs = Namespace()
                iargs.instName = instName
                iargs.instance = instance
                iargs.accept = False
                iargs.connect = False
                iargs.host = None
                iargs.port = None
                iargs.speed = None
                iargs.experimental = args.experimental
                iargs.daemon = True
                iargs.rtscts = False
                iargs.files = []
                iargs.cmds = []
                iargs.debug = debug
                iargs.offset = args.offset
                iargs.hdbdos = args.hdbdos
                iargs.noreconnect = args.noreconnect
                iargs.printFormat = args.printFormat
                # iargs.printMode = None
                iargs.printDir = args.printDir
                iargs.printPrefix = args.printPrefix
                iargs.printFile = args.printFile
                iargs.printCmd = args.printCmd
                instances.append(iargs)
                continue
            lp = line.split(' ')
            if lp[0].lower() == 'option':
                # args[lp[1]] = lp[2]
                key = lp[1]
                val = lp[2]
                if val in ['True', 'False']:
                    val = eval(val)
                if key == 'debug':
                    val = int(val)
                    if not debug:
                        debug = val

                if instance == 0:
                    # print key, accept, reject
                    if key in reject:
                        print(
                            '%d: rejecting line from config file (R): %s' %
                            (instance, line))
                        continue
                    else:
                        v = eval('iargs.%s' % key)
                        has = defaultConfigValues.has_key(key)
                        if v and has and v != defaultConfigValues[key]:
                            print(
                                '%d: rejecting line from config file (D): %s' %
                                (instance, line))
                        elif v and not has:
                            print(
                                '%d: rejecting line from config file (O): %s' %
                                (instance, line))
                        else:
                            #print(
                            #    '%d: accepting line from config file: %s' %
                            #    (instance, line))
                            exec('iargs.%s = val' % key)
                else:
                    exec('iargs.%s = val' % key)

                # print "%d:option:%s" % (instance,line)
            else:
                if args.debug >= 1 and line.startswith('dw server debug'):
                    continue
                if args.debug == 2 and line.startswith('dw server conn debug'):
                    continue
                iargs.cmds += [line]
                # print "%d:cmd:%s" % (instance,line)
    args.instances = instances
    # args.cmds = cmds
    return args
예제 #33
0
    def test_missing_migration_not_up(self):
        environment = 'master'
        self.setup_test(environment)
        args = Namespace()
        args.direction = 'up'
        args.single = False
        args.initial = False
        args.config = SimpleNamespace()
        install_path = Path(self.dir, 'install')
        Path(install_path, 'migrations', environment).mkdir(parents=True)
        args.config.submitty = {'submitty_install_dir': str(install_path)}

        missing_migration = Path(install_path, 'migrations', environment,
                                 '02_test4.py')
        with missing_migration.open('w') as open_file:
            open_file.write("""
from pathlib import Path
INSTALL_PATH = "{}"

def down(*_):
    with Path(INSTALL_PATH, 'test.txt').open('w') as open_file:
        open_file.write('test')
""".format(install_path))
        create_migration(self.database, self.dir, environment, '01_test4.py',
                         1)
        create_migration(self.database, self.dir, environment, '02_test4.py',
                         0, False)
        create_migration(self.database, self.dir, environment, '03_test4.py',
                         0)
        create_migration(self.database, self.dir, environment, '04_test4.py',
                         0)
        missing_migrations = set()
        migrator.main.migrate_environment(self.database, environment, args,
                                          missing_migrations)
        self.assertEqual(len(missing_migrations), 1)
        self.assertEqual(list(missing_migrations)[0], missing_migration)
        self.assertEqual(
            """Running up migrations for master...
Removing 1 missing migrations:
  02_test4

  03_test4
  04_test4
DONE

""", sys.stdout.getvalue())
        rows = self.database.session.query(self.database.migration_table).all()
        expected_rows = ['01_test4', '03_test4', '04_test4']
        self.assertEqual(len(rows), len(expected_rows))
        for i in range(len(rows)):
            row = rows[i]
            self.assertEqual(expected_rows[i], row.id)
            self.assertEqual(1, row.status)
            self.assertIsNotNone(row.commit_time)
            up_file = expected_rows[i] + '.py.up.txt'
            if i > 0:
                self.assertTrue(Path(self.dir, up_file).exists())
            else:
                self.assertFalse(Path(self.dir, up_file).exists())

        self.assertTrue(missing_migration.exists())
        self.assertFalse(Path(install_path, 'test.txt').exists())
예제 #34
0
import gensim
from gensim.models import Word2Vec
import json
import nltk
nltk.download('punkt')
import numpy as np
import pandas as pd
import re
import urllib
import warnings
warnings.filterwarnings('ignore')
args = Namespace(
    seed=1234,
    data_file="harrypotter.txt",
    embedding_dim=100,
    window=5,
    min_count=3,
    skip_gram=1,  # 0 = CBOW
    negative_sampling=20,
)

# Upload data from GitHub to notebook's local drive
url = "https://raw.githubusercontent.com/GokuMohandas/practicalAI/master/data/harrypotter.txt"
response = urllib.request.urlopen(url)
html = response.read()
with open(args.data_file, 'wb') as fp:
    fp.write(html)

# In[5]:

# Split text into sentences
예제 #35
0
                        type=int,
                        default=256,
                        help='the size of the LSTM hidden state')

    opt = parser.parse_args()

    argparse_dict = vars(opt)

    import json
    with open('train_args.json', 'w') as fp:
        json.dump(argparse_dict, fp, sort_keys=True, indent=4)
    opt = {}
    from argparse import Namespace
    with open('train_args.json', 'r') as fp:
        args = json.load(fp)
        opt = Namespace(**args)

    if not opt.exp_name:
        opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-{opt.Prediction}'
        opt.exp_name += f'-Seed{opt.manualSeed}'
        # print(opt.exp_name)

    os.makedirs(f'/content/drive/MyDrive/saved_models/{opt.exp_name}',
                exist_ok=True)
    """ vocab / character number configuration """
    if opt.sensitive:
        # opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
        opt.character = string.printable[:
                                         -6]  # same with ASTER setting (use 94 char).
    """ Seed and GPU setting """
    # print("Random Seed: ", opt.manualSeed)
예제 #36
0
def process_step(args, api_endpoint, access_token, profile_response,
                 pokemonsJSON, pokemonsJSON_icons, translationsJSON, ignore,
                 only):

    print('[+] Searching for Pokemon at location {} {}'.format(
        FLOAT_LAT, FLOAT_LONG))
    origin = LatLng.from_degrees(FLOAT_LAT, FLOAT_LONG)
    step_lat = FLOAT_LAT
    step_long = FLOAT_LONG
    parent = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
                                                     FLOAT_LONG)).parent(15)
    h = get_heartbeat(args.auth_service, api_endpoint, access_token,
                      profile_response)
    hs = [h]
    seen = {}

    for child in parent.children():
        latlng = LatLng.from_point(Cell(child).get_center())
        set_location_coords(latlng.lat().degrees, latlng.lng().degrees, 0)
        hs.append(
            get_heartbeat(args.auth_service, api_endpoint, access_token,
                          profile_response))
    set_location_coords(step_lat, step_long, 0)
    visible = []

    for hh in hs:
        try:
            for cell in hh.cells:
                for wild in cell.WildPokemon:
                    hash = wild.SpawnPointId
                    disappear_timestamp = time.time(
                    ) + wild.TimeTillHiddenMs / 1000
                    if hash not in seen.keys() or \
                                    wild.pokemon.PokemonId != seen[hash]['PokemonId'] \
                                    or seen[hash]['disappear_timestamp'] < disappear_timestamp - 300:
                        visible.append(wild)
                        seen[hash] = {
                            'disappear_timestamp': disappear_timestamp,
                            'PokemonId': wild.pokemon.PokemonId
                        }
                if cell.Fort:
                    for Fort in cell.Fort:
                        if Fort.Enabled == True:
                            if args.china:
                                (Fort.Latitude, Fort.Longitude) = \
transform_from_wgs_to_gcj(Location(Fort.Latitude, Fort.Longitude))
                            if Fort.GymPoints and args.display_gym:
                                gyms[Fort.FortId] = [
                                    Fort.Team, Fort.Latitude, Fort.Longitude,
                                    Fort.GymPoints
                                ]

                            elif Fort.FortType:
                                expire_time = 0
                                if Fort.LureInfo.LureExpiresTimestampMs:
                                    if Fort.LureInfo.ActivePokemonId:
                                        hash = Fort.LureInfo.FortId
                                        disappear_timestamp = Fort.LureInfo.LureExpiresTimestampMs / 1000
                                        if hash not in seen.keys() \
                                                or Fort.LureInfo.ActivePokemonId != seen[hash].PokemonId \
                                                or seen[hash].disappear_timestamp < disappear_timestamp - 300:
                                            lured = Namespace()
                                            lured.lured = True
                                            lured.pokemon = Namespace()
                                            lured.pokemon.PokemonId = Fort.LureInfo.ActivePokemonId
                                            lured.Latitude = Fort.Latitude + 0.00007 * np.random.normal(
                                            )
                                            lured.Longitude = Fort.Longitude + 0.00007 * np.random.normal(
                                            )
                                            lured.SpawnPointId = 'Fort_' + Fort.LureInfo.FortId
                                            lured.TimeTillHiddenMs = Fort.LureInfo.LureExpiresTimestampMs - \
                                                time.time() * 1000
                                            visible.append(lured)
                                            seen[hash] = {
                                                'disappear_timestamp':
                                                disappear_timestamp,
                                                'PokemonId':
                                                Fort.LureInfo.ActivePokemonId
                                            }
                                    expire_time = datetime\
                                        .fromtimestamp(Fort.LureInfo.LureExpiresTimestampMs / 1000.0)\
                                        .strftime("%H:%M:%S")
                                if args.display_pokestop and (
                                        expire_time != 0 or not args.onlylure):
                                    pokestops[Fort.FortId] = [
                                        Fort.Latitude, Fort.Longitude,
                                        expire_time
                                    ]
        except AttributeError:
            break

    for poke in visible:
        pokeid = str(poke.pokemon.PokemonId)
        pokename = pokemonsJSON[pokeid]
        pokename_icon = pokemonsJSON_icons[pokeid]
        disappear_timestamp = time.time() + poke.TimeTillHiddenMs / 1000
        if args.ignore:
            if pokename.lower() in ignore or pokeid in ignore:
                continue
        elif args.only:
            if pokename.lower() not in only and pokeid not in only:
                continue
        if poke.SpawnPointId in spotted_pokemon.keys():
            if spotted_pokemon[
                    poke.SpawnPointId]['disappear_datetime'] > datetime.now():
                continue
        if poke.TimeTillHiddenMs < 0:
            continue

        if args.china:
            (poke.Latitude, poke.Longitude) = \
                transform_from_wgs_to_gcj(Location(poke.Latitude,
                    poke.Longitude))

        disappear_datetime = datetime.fromtimestamp(disappear_timestamp)
        distance = lonlat_to_meters(origin_lat, origin_lon, poke.Latitude,
                                    poke.Longitude)

        if distance < max_distance:
            time_till_disappears = disappear_datetime - datetime.now()
            disappear_hours, disappear_remainder = divmod(
                time_till_disappears.seconds, 3600)
            disappear_minutes, disappear_seconds = divmod(
                disappear_remainder, 60)
            disappear_minutes = str(disappear_minutes)
            disappear_seconds = str(disappear_seconds)
            if len(disappear_seconds) == 1:
                disappear_seconds = str(0) + disappear_seconds
            disappear_time = disappear_datetime.strftime(
                translationsJSON['time_format'])

            # calculate direction of Pokemon in bearing degrees
            direction = bearing_degrees(origin_lat, origin_lon, poke.Latitude,
                                        poke.Longitude)
            # transform in compass direction
            direction = bearing_degrees_to_compass_direction(direction)

            alert_text = strtr(
                translationsJSON['spotted_pokemon'], {
                    '#{latitude}': str(poke.Latitude),
                    '#{longitude}': str(poke.Longitude),
                    '#{distance}': "{0:.2f}".format(distance),
                    '#{direction}': translationsJSON['directions'][direction],
                    '#{disappear_time}': disappear_time,
                    '#{disappear_minutes}': disappear_minutes,
                    '#{disappear_seconds}': disappear_seconds
                })

            if pokemon_icons_prefix != ':pokeball:':
                user_icon = pokemon_icons_prefix + pokename_icon.lower() + ':'
            else:
                user_icon = ':pokeball:'

            try:
                if poke.lured:
                    send_to_slack(alert_text, pokename + ' (lured)', user_icon,
                                  slack_webhook_urlpath)
                else:
                    send_to_slack(alert_text, pokename, user_icon,
                                  slack_webhook_urlpath)
            except:
                send_to_slack(alert_text, pokename, user_icon,
                              slack_webhook_urlpath)

            spotted_pokemon[poke.SpawnPointId] = {
                'disappear_datetime': disappear_datetime,
                'pokename': pokename
            }

        # print(r.status_code, r.reason)

        global api_last_response
        api_last_response = datetime.now()

        pokemons[poke.SpawnPointId] = {
            "lat": poke.Latitude,
            "lng": poke.Longitude,
            "disappear_time": disappear_timestamp,
            "id": poke.pokemon.PokemonId,
            "name": pokename
        }
예제 #37
0
    def __init__(self, config_params):
        self.config = Namespace(**config_params)
        self.config.no_cuda = False
        self.model_save_path = self.config.model_save_path

        super(LanguageModelSimilarityTrain, self).__init__(self.config)
예제 #38
0
    args = Namespace(
        alpha=0.2,
        batch_size=90,
        embedding_size=128,
        epoch_size=1000,
        gpu_memory_fraction=1.0,
        image_size=160,
        images_per_person=5,
        keep_probability=1.0,
        learning_rate=0.1,
        learning_rate_decay_epochs=4,
        learning_rate_decay_factor=0.98,
        learning_rate_schedule_file=
        'no',  #'data/learning_rate_schedule_classifier_vggface2.txt',
        max_nrof_epochs=1000,
        model_def='src.models.inception_resnet_v2',
        moving_average_decay=0.9999,
        optimizer='ADAM',  #'ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'
        people_per_batch=8,
        pretrained_model=None,
        random_crop=False,
        random_flip=True,
        seed=666,
        weight_decay=2e-4,

        # Parameters for validation on LFW
        lfw_dir='',
        lfw_nrof_folds=10,
        lfw_pairs='data/pairs.txt',
        lfw_projection='data/lfw_projection.txt',

        # data_dir='/export/livia/Database/COX-S2V/Aligned-COX-S2V-Video/video2',
        data_dir=
        '/export/livia/data/lemoineh/dog_dataset/generated/dog_samples',
        models_base_dir='/export/livia/data/lemoineh/facenet/dog_test/models',
        logs_base_dir='/export/livia/data/lemoineh/facenet/dog_test/logs'

        #     train_tripletloss.py.
        #         python
        # src / train_tripletloss.py - -logs_base_dir
        # ~ / logs / facenet / --models_base_dir
        # ~ / models / facenet / --data_dir
        # ~ / datasets / casia / casia_maxpy_mtcnnalign_182_160 - -image_size
        # 160 - -model_def
        # models.inception_resnet_v1 - -lfw_dir
        # ~ / datasets / lfw / lfw_mtcnnalign_160 - -optimizer
        # RMSPROP - -learning_rate
        # 0.01 - -weight_decay
        # 1e-4 - -max_nrof_epochs
        # 500
    )
예제 #39
0
    def __init__(self, task, wer_args, zero_infinity, sentence_avg, remove_bpe):
        super().__init__(task)
        self.blank_idx = task.target_dictionary.bos()
        self.pad_idx = task.target_dictionary.pad()
        self.eos_idx = task.target_dictionary.eos()
        self.post_process = remove_bpe if remove_bpe else "letter"

        if wer_args is not None:
            from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder

            wer_compute_kenlm, wer_lexicon, lm_w, ws_w = eval(wer_args)

            dec_args = Namespace()
            dec_args.nbest = 1
            dec_args.criterion = "ctc"
            dec_args.kenlm_model = wer_compute_kenlm
            dec_args.lexicon = wer_lexicon
            dec_args.beam = 50
            dec_args.beam_size_token = min(50, len(task.target_dictionary))
            dec_args.beam_threshold = min(50, len(task.target_dictionary))
            dec_args.lm_weight = lm_w
            dec_args.word_score = ws_w
            dec_args.unk_weight = -math.inf
            dec_args.sil_weight = 0

            self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary)
        else:
            self.w2l_decoder = None

        self.zero_infinity = zero_infinity
        self.sentence_avg = sentence_avg
예제 #40
0
 def assertNamespaceEqual(self, expected, actual):
     """
     Check that namespaces are equal.
     """
     namespace = Namespace(**expected)
     self.assertEqual(namespace, actual)
예제 #41
0
 def setUp(self):
     self.cmd = Command()
     self.cmd.signature = 'example:cmd { arg1 } {--opt1}'
     name, params = Parser.parse(self.cmd.signature)
     self.cmd._params = params
     self.cmd._values = Namespace(arg1='test arg1', opt1='test opt1')
예제 #42
0
def nll_and_kl(recon_x, x, log_likelihood, a, b, logsample, z_discrete, logit_post, log_prior, mu, logvar, dataset_size, args=Namespace(), test=False):
    batch_size = x.size()[0]
    NLL = -1 * log_likelihood(recon_x, x)
    KL_zreal = -0.5 * (1. + logvar - mu**2 - logvar.exp())
    KL_beta = kl_divergence(a, b, prior_alpha=args.alpha0, log_beta_prior=np.log(1./args.alpha0), args=args).repeat(batch_size, 1) * (1. / dataset_size)

    # in test mode, our samples are essentially coming from a Bernoulli
    if not test:
        KL_discrete = kl_discrete(logit_post, logit(log_prior.exp()), logsample, args.temp, args.temp_prior)
    else:
        pi_prior = torch.exp(log_prior)
        pi_posterior = torch.sigmoid(logit_post)
        kl_1 = z_discrete * (pi_posterior + SMALL).log() + (1 - z_discrete) * (1 - pi_posterior + SMALL).log()
        kl_2 = z_discrete * (pi_prior + SMALL).log() + (1 - z_discrete) * (1 - pi_prior + SMALL).log()
        KL_discrete = kl_1 - kl_2

    return NLL, KL_zreal, KL_beta, KL_discrete
예제 #43
0
def elbo(recon_x, x, log_likelihood, a, b, logsample, z_discrete, logit_post, log_prior, mu, logvar, dataset_size, args=Namespace(), test=False):
    NLL, KL_zreal, KL_beta, KL_discrete = nll_and_kl(recon_x, x, log_likelihood, a, b, logsample, z_discrete, logit_post, log_prior, mu, logvar, dataset_size, args, test=test)
    return NLL.sum() + KL_zreal.sum() + KL_beta.sum() + KL_discrete.sum(), (NLL, KL_zreal, KL_beta, KL_discrete)
예제 #44
0
def load_config(load_path):
    data = json.load(open(load_path, "rb"))
    config = Namespace()
    config.__dict__ = data
    return config
예제 #45
0
파일: m2r2.py 프로젝트: CrossNox/m2r2
    from urlparse import urlparse
else:
    _open = open
    from urllib.parse import urlparse

_is_sphinx = False
prolog = """\
.. role:: raw-html-m2r(raw)
   :format: html

"""

# for command-line use
parser = ArgumentParser()
options = Namespace()
parser.add_argument("input_file",
                    nargs="*",
                    help="files to convert to reST format")
parser.add_argument(
    "--overwrite",
    action="store_true",
    default=False,
    help="overwrite output file without confirmaion",
)
parser.add_argument(
    "--dry-run",
    action="store_true",
    default=False,
    help="print conversion result and not save output file",
)
예제 #46
0
config.lstm_type = options.get('algorithm/lstm_type',
                               'Basic')  # Basic | Dilated
config.lstm_num_cores = options.get('algorithm/lstm_num_cores', 8)

config.max_global_step = options.get('algorithm/max_global_step', 5e7)
config.use_linear_schedule = options.get('algorithm/use_linear_schedule',
                                         False)

config.initial_learning_rate = options.get('algorithm/initial_learning_rate',
                                           1e-4)
config.learning_rate_end = options.get('algorithm/learning_rate_end', 0.0)

config.optimizer = options.get('algorithm/optimizer', 'Adam')  # Adam | RMSProp
# RMSProp default parameters
if not hasattr(config, 'RMSProp'):
    config.RMSProp = options.get('algorithm/RMSProp', Namespace())
config.RMSProp.decay = options.get('algorithm/RMSProp/decay', 0.99)
config.RMSProp.epsilon = options.get('algorithm/RMSProp/epsilon', 0.1)

config.policy_clip = options.get('algorithm/policy_clip', False)
config.critic_clip = options.get('algorithm/critic_clip', False)

config.norm_adv = options.get('algorithm/normalize_advantage', False)

config.output.loss_type = options.get('algorithm/output/loss_type',
                                      'Normal')  # Normal | Expanded | Extended
config.gradients_norm_clipping = options.get(
    'algorithm/gradients_norm_clipping', False)

config.entropy_beta = options.get('algorithm/entropy_beta', 0.01)
config.entropy_type = options.get('algorithm/entropy_type',
예제 #47
0
def test_applicationaction(good_mock_service):
    """Test _ApplicationAction."""
    action = cli._ApplicationAction(option_strings='a', dest='app')
    namespace = Namespace()
    action(None, namespace, 'good_import:app')
    assert isinstance(namespace.app, Application)
예제 #48
0
파일: qa_emma.py 프로젝트: zhihuishuwp/emma
    def test_corrtrain_correlation_multi(self):
        from leakagemodels import LeakageModel
        """
        Artificial example to test AICorrNet and trace processing with multiple leakage values and multiple subkeys.
        """

        # ------------------------------
        # Generate data
        # ------------------------------
        traces = [  # Contains abs(trace). Shape = [trace, point]
            [1, 1, 1, -15],
            [-4, 2, 2, -12],
            [10, 3, 3, 8],
            [8, 1, 1, -14],
            [9, 0, -3, 8],
        ]

        plaintexts = [
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 13, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 15, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        ]

        keys = [
            [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        ]

        # Convert to numpy
        traces = np.array(traces)
        plaintexts = np.array(plaintexts)
        keys = np.array(keys)

        trace_set = TraceSet(name='test',
                             traces=traces,
                             plaintexts=plaintexts,
                             keys=keys)

        # ------------------------------
        # Preprocess data
        # ------------------------------
        conf = Namespace(
            max_cache=0,
            augment_roll=False,
            augment_noise=False,
            normalize=False,
            traces_per_set=4,
            online=False,
            dataset_id='qa',
            cnn=False,
            leakage_model=LeakageModelType.AES_MULTI,
            input_type=AIInputType.SIGNAL,
            augment_shuffle=True,
            n_hidden_layers=1,
            n_hidden_nodes=256,
            activation='leakyrelu',
            metric_freq=100,
            regularizer=None,
            reglambda=0.001,
            model_suffix=None,
            use_bias=True,
            batch_norm=True,
            hamming=False,
            key_low=1,
            key_high=3,
            loss_type='correlation',
            lr=0.001,
            epochs=5000,
            batch_size=512,
            norank=False,
        )
        it_dummy = AICorrSignalIterator([],
                                        conf,
                                        batch_size=10000,
                                        request_id=None,
                                        stream_server=None)
        x, y = it_dummy._preprocess_trace_set(trace_set)

        # ------------------------------
        # Train and obtain encodings
        # ------------------------------
        model = ai.AICorrNet(conf, input_dim=4, name="test")
        print(model.info())
        rank_cb = rank.CorrRankCallback(conf,
                                        '/tmp/deleteme/',
                                        save_best=False,
                                        save_path=None)
        rank_cb.set_trace_set(trace_set)

        if model.using_regularization:
            print(
                "Warning: cant do correlation loss test because regularizer will influence loss function"
            )
            return

        # Find optimal weights
        print("The x (EM samples) and y (leakage model values) are:")
        print(x)
        print(y)
        print(
            "When feeding x through the model without training, the encodings become:"
        )
        print(model.predict(x))
        print("Training now")
        model.train_set(x,
                        y,
                        save=False,
                        epochs=conf.epochs,
                        extra_callbacks=[rank_cb])
        print("Done training")

        # Get the encodings of the input data using the same approach used in ops.py corrtest (iterate over rows)
        result = []
        for i in range(0, x.shape[0]):
            result.append(
                model.predict(np.array([x[i, :]], dtype=float))[0]
            )  # Result contains sum of points such that corr with y[key_index] is maximal for all key indices. Shape = [trace, 16]
        result = np.array(result)
        print(
            "When feeding x through the model after training, the encodings for key bytes %d to %d become:\n %s"
            % (conf.key_low, conf.key_high, str(result)))

        # ------------------------------
        # Check loss function
        # ------------------------------
        # Evaluate the model to get the loss for the encodings
        predicted_loss = model.model.evaluate(x, y, verbose=0)

        # Manually calculate the loss using numpy to verify that we are learning a correct correlation
        calculated_loss = 0
        num_keys = (conf.key_high - conf.key_low)
        num_outputs = LeakageModel.get_num_outputs(conf) // num_keys
        for i in range(0, num_keys):
            subkey_hws = y[:, i * num_outputs:(i + 1) * num_outputs]
            subkey_encodings = result[:, i * num_outputs:(i + 1) * num_outputs]
            print("Subkey %d HWs   : %s" % (i + conf.key_low, str(subkey_hws)))
            print("Subkey %d encodings: %s" %
                  (i + conf.key_low, str(subkey_encodings)))
            y_key = subkey_hws.reshape([-1, 1])
            y_pred = subkey_encodings.reshape([-1, 1])
            print("Flattened subkey %d HWs   : %s" %
                  (i + conf.key_low, str(y_key)))
            print("Flattened subkey %d encodings: %s" %
                  (i + conf.key_low, str(y_pred)))

            # Calculate correlation (numpy approach)
            corr_key_i = np.corrcoef(y_pred[:, 0], y_key[:, 0],
                                     rowvar=False)[1, 0]
            print("corr_num: %s" % corr_key_i)

            calculated_loss += 1.0 - corr_key_i

        print("These values should be close:")
        print("Predicted loss: %s" % str(predicted_loss))
        print("Calculated loss: %s" % str(calculated_loss))
        self.assertAlmostEqual(predicted_loss, calculated_loss, places=2)
def ldsc_h2_part(args, **kwargs):
    """
    Runs LD score to estimate h2 for the named UKBB phenotype
    
    Args is a list with elements:
    - args[0] = phenotype name
    - args[1] = phenotype description
    - args[2] = phenotype source (phesant, etc)
    - args[3] = N
    - args[4] = N_cases
    - args[5] = N_controls
    
    # keyword args is for global settings:
    - wd            (working directory)
    - ld_ref_panel  (local path, supplied to --ref-ld-chr)
    - ld_w_panel    (local path, supplied to --w-ld-chr)
    - ld_frq_panel  (local path, supplied to --frqfile-chr)
    - ss_bucket     (cloud bucket containing sumstats files)
    
    Using this structure for the sake of multiprocessing.pool.map()
    """

    # handle args
    phname = str(args[0])
    phdesc = str(args[1])
    phsource = str(args[2])
    n = float(args[3])
    ncas = float(args[4])
    ncon = float(args[5])

    # define names

    ss_name = str(phname) + '.ldsc.imputed_v3.' + sex_group + '.tsv.bgz'
    sspath_local = wd + '/' + ss_name
    sspath_cloud = ss_bucket + '/' + ss_name
    h2_out = 'h2part.ukbb.' + str(sex_group) + '.' + str(phname)

    # download sumstats file
    subprocess.call(['gsutil', 'cp', sspath_cloud, sspath_local])

    # run ldsc
    args_h2 = Namespace(out=h2_out,
                        bfile=None,
                        l2=None,
                        extract=None,
                        keep=None,
                        ld_wind_snps=None,
                        ld_wind_kb=None,
                        ld_wind_cm=None,
                        print_snps=None,
                        annot=None,
                        thin_annot=False,
                        cts_bin=None,
                        cts_break=None,
                        cts_names=None,
                        per_allele=False,
                        pq_exp=None,
                        no_print_annot=False,
                        maf=0.05,
                        h2=sspath_local,
                        rg=None,
                        ref_ld=None,
                        ref_ld_chr=ld_ref_panel,
                        w_ld=None,
                        w_ld_chr=ld_w_panel,
                        overlap_annot=True,
                        no_intercept=False,
                        intercept_h2=None,
                        intercept_gencov=None,
                        M=None,
                        two_step=None,
                        chisq_max=99999,
                        print_cov=False,
                        print_delete_vals=False,
                        chunk_size=50,
                        pickle=False,
                        invert_anyway=False,
                        yes_really=False,
                        n_blocks=200,
                        not_M_5_50=False,
                        return_silly_things=False,
                        no_check_alleles=False,
                        print_coefficients=True,
                        samp_prev=None,
                        pop_prev=None,
                        frqfile=None,
                        h2_cts=None,
                        frqfile_chr=ld_frq_panel,
                        print_all_cts=False,
                        sumstats_frames=None,
                        rg_mat=False)

    print "Launching ldsc for " + str(phname)
    h2_results = ldsc.sumstats.estimate_h2(args_h2, Logger_to_Logging())
    print "Completed ldsc for " + str(phname)

    # cleanup sumstats file
    subprocess.call(['rm', sspath_local])

    return process_h2_part(h2_results, h2_out + '.results', phname, phdesc,
                           phsource, float(n), float(ncas), float(ncon))
예제 #50
0
 def POST(self):
     then = datetime.now()  #tracking for the time delta
     myform = dataform()
     row = None
     if not myform.validates():
         return render.forms(myform)
     else:
         try:
             reader = csv.reader(open('form_values.csv'))
             row = next(reader)
         except StopIteration:
             with open("form_values.csv",
                       "w+b") as csvf:  #if the file doesn't exist
                 csvf.write(''.join(str(x) for x in [","] * 30))
                 reader = csv.reader(open("form_values.csv"))
                 row = next(reader)
         except IOError:
             with open("form_values.csv",
                       "w+b") as csvf:  #if the file doesn't exist
                 csvf.write(''.join(str(x) for x in [","] * 30))
                 csvf.close()
                 reader = csv.reader(open("form_values.csv"))
                 row = next(reader)
         if "thebluealliance" in myform.d.mcode:
             myform.mcode.set_value(myform.d.mcode.split("_")[-1])
         options = Namespace()
         formdata = web.input()
         options.then = then
         options.gui = True
         options.where = row[0] = myform.d.where
         options.prodteam = row[1] = myform.d.prodteam
         options.twit = row[2] = myform.d.twit
         options.fb = row[3] = myform.d.fb
         options.weblink = row[4] = myform.d.weblink
         options.ename = row[5] = myform.d.ename
         options.ecode = row[6] = myform.d.ecode
         options.pID = row[7] = myform.d.pID
         options.tbaID = row[8] = myform.d.tbaID
         options.tbaSecret = row[9] = myform.d.tbaSecret
         options.description = row[10] = myform.d.description
         options.mcode = row[11] = myform.d.mcode
         options.mnum = row[12] = int(myform.d.mnum)
         options.mtype = row[13] = myform.d.mtype
         options.tiebreak = 0 if myform.d.tiebreak == "no" else 1
         row[14] = myform.d.tiebreak
         options.tba = 0 if myform.d.tba == "no" else 1
         row[15] = myform.d.tba
         options.ceremonies = row[16] = myform.d.ceremonies
         options.eday = row[17] = myform.d.eday
         options.end = row[18] = myform.d.end
         thr = threading.Thread(target=yup.init, args=(options, ))
         thr.daemon = True
         thr.start()
         if int(myform.d.ceremonies) == 0:
             if myform.d.end == "Only for batch uploads":
                 myform.mnum.set_value(str(int(myform.d.mnum) + 1))
             else:
                 myform.mnum.set_value(str(int(myform.d.end) + 1))
                 myform.end.set_value("Only for batch uploads")
         elif int(myform.d.ceremonies) == 2:
             myform.mnum.set_value("1")
             myform.mcode.set_value("qf")
         if myform.d.mcode == "qm" and myform.d.tiebreak == "yes":
             myform.tiebreak.set_value("no")
         row[12] = int(myform.d.mnum)
         row[18] = myform.d.end
         writer = csv.writer(open('form_values.csv', 'w'))
         writer.writerow(row)
         return render.forms(myform)
예제 #51
0
파일: main.py 프로젝트: uncbiag/pregis
    t_v = T.reshape(image_size, 1) # total variation term
    prefix = current_folder + '/' + 'Iter' + str(current_iter)
 
    lowRankIm = prefix + '_LowRank.nii.gz'
    totalIm = prefix + '_TV.nii.gz'
    masktvIm = prefix+ '_TVmask.nii.gz'

    save_image_from_data_matrix(l_v,atlas_im_name,lowRankIm)
    save_image_from_data_matrix(t_v,atlas_im_name,totalIm)

    createTVMask(totalIm, masktvIm)
       

    return


def main(args):
    configure = performInitialization(args)
    atlas_arr = sitk.GetArrayFromImage(sitk.ReadImage(configure['atlas_im_name']))
    z,x,y = atlas_arr.shape
    image_size = x*y*z

    D_Basis, D_BasisT, D_mean = ReadPCABasis(image_size, configure)
    performIteration(configure, D_Basis, D_BasisT, D_mean, image_size)

if __name__ == '__main__':
    args = Namespace(input_image=sys.argv[1], gamma=float(sys.argv[3]), num_of_correction=int(sys.argv[4]), platform=sys.argv[5], debug=1, verbose=True)
    main(args)
 

예제 #52
0
    model_state_dict = model.state_dict()
    optim_state_dict = optim.optimizer.state_dict()
    checkpoints = {
        "model": model_state_dict,
        "config": config,
        "updates": updates,
        "optim": optim_state_dict,
    }
    torch.save(checkpoints, path)


if __name__ == "__main__":
    # Combine command-line arguments and yaml file arguments
    opt = opts.model_opts()
    config = yaml.load(open(opt.config, "r"))
    config = Namespace(**config, **vars(opt))

    writer = misc_utils.set_tensorboard(config)
    device, devices_id = misc_utils.set_cuda(config)
    misc_utils.set_seed(config.seed)

    if config.label_dict_file:
        with open(config.label_dict_file, "r") as f:
            label_dict = json.load(f)

    if config.restore:
        print("loading checkpoint...\n")
        checkpoints = torch.load(config.restore,
                                 map_location=lambda storage, loc: storage)
    else:
        checkpoints = None
예제 #53
0
def _set_not_required_args(args: argparse.Namespace) -> None:
    args.fixed_model_name = None
    args.store_uncompressed = False
예제 #54
0
from argparse import Namespace
from .param_cond import NAME_SOMA
import logging

param_sim = Namespace()

# Options for setting up stim paradigm
#param_sim.stim_loc = NAME_SOMA
param_sim.stim_paradigm = 'inject'
param_sim.injection_current = [0e-12,
                               -200e-12]  #[-200e-12,-100e-12, 50e-12,150e-12]
param_sim.injection_delay = 0.1
param_sim.injection_width = 0.3
param_sim.simtime = 4.0

param_sim.neuron_type = None

param_sim.logging_level = logging.INFO

param_sim.save = False  #True # save to hdf5
param_sim.save_txt = False  # True # Save text files

#smaller than previous to improve stability
param_sim.simdt = 1e-06
param_sim.hsolve = True

param_sim.plotcomps = [NAME_SOMA]

param_sim.fname = None  # `None` uses default specified by stim paradigm

param_sim.plot_vm = True
예제 #55
0
from transformer.flow import make_model, batch_size_fn, run_epoch
from transformer.batch import rebatch
from transformer.greedy import greedy_decode
from transformer.noam_opt import NoamOpt
from transformer.iterator import Iterator
from transformer.label_smoothing import LabelSmoothing
from transformer.multi_gpu_loss_compute import MultiGPULossCompute

args = Namespace(
    BOS_TOKEN="<s>",
    EOS_TOKEN="</s>",
    BLANK_TOKEN="<blank>",
    MAX_LEN=100,
    MIN_FREQ=2,
    cuda=True,
    devices=[0],  # [0, 1],
    batch_size=1200,
    num_epochs=1,
    factor=1,
    warm_up=2000,
    betas=(0.9, 0.98),
    eps=1e-9,
    load_model=True,
    model_path='models/params.pkl')

if not torch.cuda.is_available():
    args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA:", args.cuda)

print("Loading data...")
spacy_de = spacy.load("de")
예제 #56
0
def test_forward_and_inference_are_equal(model_dict):
    # make args
    model_args = make_transformer_args(dprenet_dropout_rate=0.0, **model_dict)

    # setup batch
    idim = 5
    odim = 10
    ilens = [10]
    olens = [20]
    batch = prepare_inputs(idim, odim, ilens, olens)
    xs = batch["xs"]
    ilens = batch["ilens"]
    ys = batch["ys"]
    olens = batch["olens"]

    # define model
    model = Transformer(idim, odim, Namespace(**model_args))
    model.eval()

    # TODO(kan-bayashi): update following ugly part
    with torch.no_grad():
        # --------- forward calculation ---------
        x_masks = model._source_mask(ilens)
        hs_fp, _ = model.encoder(xs, x_masks)
        if model.reduction_factor > 1:
            ys_in = ys[:, model.reduction_factor - 1::model.reduction_factor]
            olens_in = olens.new(
                [olen // model.reduction_factor for olen in olens])
        else:
            ys_in, olens_in = ys, olens
        ys_in = model._add_first_frame_and_remove_last_frame(ys_in)
        y_masks = model._target_mask(olens_in)
        xy_masks = model._source_to_target_mask(ilens, olens_in)
        zs, _ = model.decoder(ys_in, y_masks, hs_fp, xy_masks)
        before_outs = model.feat_out(zs).view(zs.size(0), -1, model.odim)
        logits = model.prob_out(zs).view(zs.size(0), -1)
        after_outs = before_outs + model.postnet(before_outs.transpose(
            1, 2)).transpose(1, 2)
        # --------- forward calculation ---------

        # --------- inference calculation ---------
        hs_ir, _ = model.encoder(xs, None)
        maxlen = ys_in.shape[1]
        minlen = ys_in.shape[1]
        idx = 0
        # this is the inferene calculation but we use groundtruth to check the behavior
        ys_in_ = ys_in[0, idx].view(1, 1, model.odim)
        np.testing.assert_array_equal(
            ys_in_.new_zeros(1, 1, model.odim).detach().cpu().numpy(),
            ys_in_.detach().cpu().numpy(),
        )
        outs, probs = [], []
        while True:
            idx += 1
            y_masks = subsequent_mask(idx).unsqueeze(0)
            z = model.decoder.recognize(ys_in_, y_masks,
                                        hs_ir)  # (B, idx, adim)
            outs += [model.feat_out(z).view(1, -1,
                                            model.odim)]  # [(1, r, odim), ...]
            probs += [torch.sigmoid(model.prob_out(z))[0]]  # [(r), ...]
            if idx >= maxlen:
                if idx < minlen:
                    continue
                outs = torch.cat(outs, dim=1).transpose(
                    1, 2)  # (1, L, odim) -> (1, odim, L)
                if model.postnet is not None:
                    outs = outs + model.postnet(outs)  # (1, odim, L)
                outs = outs.transpose(2, 1).squeeze(0)  # (L, odim)
                probs = torch.cat(probs, dim=0)
                break
            ys_in_ = torch.cat((ys_in_, ys_in[0, idx].view(1, 1, model.odim)),
                               dim=1)  # (1, idx + 1, odim)
        # --------- inference calculation ---------

        # check both are equal
        np.testing.assert_array_almost_equal(
            hs_fp.detach().cpu().numpy(),
            hs_ir.detach().cpu().numpy(),
        )
        np.testing.assert_array_almost_equal(
            after_outs.squeeze(0).detach().cpu().numpy(),
            outs.detach().cpu().numpy(),
        )
        np.testing.assert_array_almost_equal(
            torch.sigmoid(logits.squeeze(0)).detach().cpu().numpy(),
            probs.detach().cpu().numpy(),
        )
예제 #57
0
파일: qa_emma.py 프로젝트: zhihuishuwp/emma
    def test_autoenctrain(self):
        """
        Artificial example to test AutoEncoder
        """

        # ------------------------------
        # Generate data
        # ------------------------------
        traces = [  # Contains abs(trace). Shape = [trace, point]
            [1, 1, 1, -15],
            [-4, 1, 2, -12],
            [10, 1, 3, 8],
            [8, 1, 1, -14],
            [9, 1, -3, 8],
        ]

        plaintexts = [
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        ]

        keys = [
            [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        ]

        # Convert to numpy
        traces = np.array(traces)
        plaintexts = np.array(plaintexts)
        keys = np.array(keys)

        trace_set = TraceSet(name='test',
                             traces=traces,
                             plaintexts=plaintexts,
                             keys=keys)

        # ------------------------------
        # Preprocess data
        # ------------------------------
        conf = Namespace(
            max_cache=0,
            augment_roll=False,
            augment_noise=False,
            normalize=False,
            traces_per_set=4,
            online=False,
            dataset_id='qa',
            cnn=False,
            leakage_model=LeakageModelType.HAMMING_WEIGHT_SBOX,
            input_type=AIInputType.SIGNAL,
            augment_shuffle=True,
            n_hidden_layers=1,
            n_hidden_nodes=256,
            activation='leakyrelu',
            metric_freq=100,
            regularizer=None,
            reglambda=0.001,
            model_suffix=None,
            use_bias=True,
            batch_norm=True,
            hamming=False,
            key_low=2,
            key_high=3,
            loss_type='correlation',
            lr=0.0001,
            epochs=2000,
            batch_size=512,
            norank=False,
        )
        it_dummy = AutoEncoderSignalIterator([],
                                             conf,
                                             batch_size=10000,
                                             request_id=None,
                                             stream_server=None)
        x, y = it_dummy._preprocess_trace_set(trace_set)

        # ------------------------------
        # Train and obtain encodings
        # ------------------------------
        model = ai.AutoEncoder(conf, input_dim=4, name="test")
        print(model.info())

        # Find optimal weights
        print("X, Y")
        print(x)
        print(y)
        print(
            "When feeding x through the model without training, the encodings become:"
        )
        print(model.predict(x))
        print("Training now")
        model.train_set(x, y, epochs=conf.epochs)
        print("Done training")

        # Get the encodings of the input data using the same approach used in ops.py corrtest (iterate over rows)
        result = []
        for i in range(0, x.shape[0]):
            result.append(
                model.predict(np.array([x[i, :]], dtype=float))[0]
            )  # Result contains sum of points such that corr with y[key_index] is maximal for all key indices. Shape = [trace, 16]
        result = np.array(result)

        for i in range(result.shape[0]):
            rounded_result = np.round(result[i])
            print("Original x    : %s" % x[i])
            print("Rounded result: %s" % rounded_result)
            self.assertListEqual(list(rounded_result), list(x[i]))
예제 #58
0
파일: randp.py 프로젝트: xodyd/internalblue
    # and now read and save the random
    random = internalblue.readMem(MEM_RNG, MEM_ROUNDS * 5)
    data.extend(random)

    i = i + 1

internalblue.logger.info("Finished acquiring random data!")

# every 5th byte i 0x42
check = data[4::5]
for c in check:
    if c != 0x42:
        internalblue.logger.error("Data was corrupted by another process!")

# uhm and for deleting every 5th let's take numpy (oh why??)
data = np.delete(data, np.arange(4, data.__len__(), 5))

f = open("6p_randomdata_pseudo-%irounds-reg0x%x.bin" % (rounds, PRAND), "wb")
f.write(data)
f.close()

internalblue.logger.info("--------------------")
internalblue.logger.info("Entering InternalBlue CLI to interpret RNG.")

# enter CLI
cli = InternalBlueCLI(
    Namespace(data_directory=None, verbose=False, trace=None, save=None),
    internalblue)
sys.exit(cli.cmdloop())
예제 #59
0
def test_make_atom_poscars_wo_options():
    parsed_args = parse_args(["map"])
    expected = Namespace(dirname=Path.cwd(),
                         elements=None,
                         func=parsed_args.func)
    assert parsed_args == expected
예제 #60
0
    'device' : 'cuda' if torch.cuda.is_available() else 'cpu',
    'd_lr' : 1e-4,
    'g_lr' : 1e-4,
    'num_epochs' : 15,
    'num_resblocks' : 16,
    'overwrite_cache' : False,
    'cache_dir' : 'data_cache/',
    'batch_size' : 64,
    'print_every' : 4,
    'save_every' : 23,
    'visible_gpus' : [1,0,4,5],
    'model_dir' : 'model_cache/'
    
}

args = Namespace(**args)

# if torch.cuda.device_count() > 1:
#     args.n_gpus = torch.cuda.device_count()
# else:
#     args.n_gpus = 0
# ''
# g = SRGAN_Generator().to(args.device)
# d = SRGAN_Discriminator(256).to(args.device)
# if args.n_gpus > 1:
#     g = nn.DataParallel(g)
#     d = nn.DataParallel(d)


train_loader, test_loader, val_loader = get_loaders(args)