Ejemplo n.º 1
0
  def testWithEncryptedVolumes(self, get_state_mock, gui_oauth_mock, _):
    argv = [
        'main_test', '--login_type', 'oauth2',
        '--server_url', 'https://cvest.appspot.com',
        '--username', 'user'
    ]

    app.run(main.main, argv=argv)

    get_state_mock.assert_called_once()

    gui_oauth_mock.assert_called_once_with('https://cvest.appspot.com', 'user')
    gui_oauth_mock.return_value.EncryptedVolumePrompt.assert_called_once()
Ejemplo n.º 2
0
def run_main():
  """Initializes flags and calls main()."""
  program.setup_environment()
  tensorboard = program.TensorBoard(default.get_plugins(),
                                    default.get_assets_zip_provider())
  try:
    from absl import app
    # Import this to check that app.run() will accept the flags_parser argument.
    from absl.flags import argparse_flags
    app.run(tensorboard.main, flags_parser=tensorboard.configure)
    raise AssertionError("absl.app.run() shouldn't return")
  except ImportError:
    pass
  tensorboard.configure(sys.argv)
  sys.exit(tensorboard.main())
Ejemplo n.º 3
0
def MaybeRunScriptInstead():
  if "SCRIPT_NAME" in os.environ:
    # Append current path to import path and execute `SCRIPT_NAME` main.
    sys.path.extend([os.path.dirname(__file__)])
    module_name = os.environ["SCRIPT_NAME"]
    retval = app.run(importlib.import_module(module_name).main)
    sys.exit(retval)
			) as env:

			for i in range(100000):
				agent.setup(env.observation_spec(), env.action_spec())
				timesteps = env.reset()
				agent.reset()

				while True:
					action, pi, last_logp_pi = agent.step(timesteps[0])
					step_actions = [action]
					old_timesteps = timesteps
					timesteps = env.step(step_actions)
					if(isTraining):
						agent.trainAgent(timesteps[0], old_timesteps[0], pi, timesteps[0].reward, last_logp_pi)
					if timesteps[0].last():
						break

	except KeyboardInterrupt:
		pass


if __name__ == '__main__':
	parser = argparse.ArgumentParser()
	parser.add_argument('--model', type=str, help='Name of the model')
	parser.add_argument('--replay', type=bool, help="Save a replay of the experiment")
	parser.add_argument('--training', type=bool, help="if is training")
	args, unknown_flags = parser.parse_known_args()
	flags.FLAGS(sys.argv[:1] + unknown_flags)

	app.run(main, argv=sys.argv[:1] + unknown_flags)
Ejemplo n.º 5
0
def GRRFuse():
  from grr_response_server.bin import fuse_mount
  app.run(fuse_mount.main)
Ejemplo n.º 6
0
def GrrFrontend():
  from grr_response_server.bin import frontend
  app.run(frontend.main)
Ejemplo n.º 7
0
def ApiShellRawAccess():
  from grr_response_server.bin import api_shell_raw_access
  app.run(api_shell_raw_access.main)
def test_main():
    with pytest.raises(SystemExit):
        app.run(benchmark_xmon_simulator.main, argv=(
            'main',
            'from dev_tools.profiling.benchmark_xmon_simulator import simulate'
        ))
Ejemplo n.º 9
0
  flags.register_validator(
      'generated_x_dir',
      lambda x: False if (FLAGS.image_set_y_glob and not x) else True,
      'Must provide `generated_x_dir`.')
  flags.register_validator(
      'generated_y_dir',
      lambda x: False if (FLAGS.image_set_x_glob and not x) else True,
      'Must provide `generated_y_dir`.')


def main(_):
  _validate_flags()
  images_x_hwc_pl, generated_y = make_inference_graph('ModelX2Y',
                                                      FLAGS.patch_dim)
  images_y_hwc_pl, generated_x = make_inference_graph('ModelY2X',
                                                      FLAGS.patch_dim)

  # Restore all the variables that were saved in the checkpoint.
  saver = tf.train.Saver()
  with tf.Session() as sess:
    saver.restore(sess, FLAGS.checkpoint_path)

    export(sess, images_x_hwc_pl, generated_y, FLAGS.image_set_x_glob,
           FLAGS.generated_y_dir)
    export(sess, images_y_hwc_pl, generated_x, FLAGS.image_set_y_glob,
           FLAGS.generated_x_dir)


if __name__ == '__main__':
  app.run()
Ejemplo n.º 10
0
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility to create a single py_binary that can call multiple py_binaries.

This simulates executing a python script by importing a module name by the
environment 'SCRIPT_NAME' and executing its main via `app.run`.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import importlib
import os
import sys

from absl import app


if __name__ == '__main__':
  # Append current path to import path and execute `SCRIPT_NAME` main.
  sys.path.extend([os.path.dirname(__file__)])
  module_name = os.environ['SCRIPT_NAME']
  app.run(importlib.import_module(module_name).main)
Ejemplo n.º 11
0
FLAGS = flags.FLAGS

flags.DEFINE_string('name', None, 'model name')


def _main(name):
    starter = get_starter(name)
    print(name)
    if starter.url is not None:
        if starter.clean_archive():
            print('Cleaned')
        else:
            print('No archive present')
    else:
        print('No url')
    print('---------------------------------------')


def main(_):
    name = FLAGS.name
    if name is None:
        for name in get_names():
            _main(name)
    else:
        _main(name)


if __name__ == '__main__':
    app.run(main)
Ejemplo n.º 12
0
                          resnet_size='32',
                          train_epochs=250,
                          epochs_between_evals=10,
                          batch_size=128)


def run_cifar(flags_obj):
  """Run ResNet CIFAR-10 training and eval loop.

  Args:
    flags_obj: An object containing parsed flag values.
  """
  input_function = (flags_obj.use_synthetic_data and get_synth_input_fn()
                    or input_fn)
##### INPUT
##### Specifically the "shape" that is passed in with the values being at the top of the file
  resnet_run_loop.resnet_main(
      flags_obj, cifar10_model_fn, input_function, DATASET_NAME,
      shape=[_HEIGHT, _WIDTH, _NUM_CHANNELS])


def main(_):
  with logger.benchmark_context(flags.FLAGS):
    run_cifar(flags.FLAGS)


if __name__ == '__main__':
  tf.logging.set_verbosity(tf.logging.INFO)
  define_cifar_flags()
  absl_app.run(main)
Ejemplo n.º 13
0
def multi_process_run(main: Callable[[Any], None]) -> None:
    """Same as `absl.app.run` but with special multiprocess flags."""
    app.run(main)
Ejemplo n.º 14
0
Archivo: worker.py Proyecto: google/grr
    "version",
    default=False,
    allow_override=True,
    help="Print the GRR worker version number and exit immediately.")


def main(argv):
  """Main."""
  del argv  # Unused.

  if flags.FLAGS.version:
    print("GRR worker {}".format(config_server.VERSION["packageversion"]))
    return

  config.CONFIG.AddContext(contexts.WORKER_CONTEXT,
                           "Context applied when running a worker.")

  # Initialise flows and config_lib
  server_startup.Init()

  fleetspeak_connector.Init()


  token = access_control.ACLToken(username="******").SetUID()
  worker_obj = worker_lib.GRRWorker(token=token)
  worker_obj.Run()


if __name__ == "__main__":
  app.run(main)
Ejemplo n.º 15
0
    return (self.train_loss_metric.result().numpy(),
            self.test_loss_metric.result().numpy())


def run_main(argv):
  del argv
  kwargs = utils.flags_dict()
  main(**kwargs)


def main(epochs, enable_function, buffer_size, batch_size, download_path,
         num_examples=70000, embedding_dim=256, enc_units=1024, dec_units=1024):
  file_path = utils.download(download_path)
  train_ds, test_ds, inp_lang, targ_lang = utils.create_dataset(
      file_path, num_examples, buffer_size, batch_size)
  vocab_inp_size = len(inp_lang.word_index) + 1
  vocab_tar_size = len(targ_lang.word_index) + 1

  encoder = nmt.Encoder(vocab_inp_size, embedding_dim, enc_units, batch_size)
  decoder = nmt.Decoder(vocab_tar_size, embedding_dim, dec_units)

  train_obj = Train(epochs, enable_function, encoder, decoder,
                    inp_lang, targ_lang, batch_size, batch_size)
  print ('Training ...')
  return train_obj.training_loop(train_ds, test_ds)

if __name__ == '__main__':
  utils.nmt_flags()
  app.run(run_main)
Ejemplo n.º 16
0
            [0, 1],
            GetProcessIteratorPids(
                cmdline_regex_string=r"svchost.exe -k (abc|def)"))

        self.assertCountEqual(
            [0, 1, 2],
            GetProcessIteratorPids(cmdline_regex_string=r"svchost.exe.*"))

        self.assertCountEqual(
            [2], GetProcessIteratorPids(cmdline_regex_string=r"^svchost.exe$"))

    def testCmdlineRegex(self):
        scan_request = rdf_memory.YaraProcessScanRequest(
            signature_shard=rdf_memory.YaraSignatureShard(index=0,
                                                          payload=b"123"),
            num_signature_shards=1,
            cmdline_regex="svchost.exe -k def")

        with mock.patch.object(memory.YaraProcessScan,
                               "_GetMatches",
                               return_value=[rdf_memory.YaraMatch()]):
            results = self.ExecuteAction(memory.YaraProcessScan,
                                         arg=scan_request)
        self.assertLen(results, 2)
        self.assertLen(results[0].matches, 1)
        self.assertEqual(results[0].matches[0].process.pid, 1)


if __name__ == "__main__":
    app.run(test_lib.main)
Ejemplo n.º 17
0
  decompress_cmd.set_defaults(
      f=decompress,
      a=["input_file", "output_file", "url_prefix", "metagraph_cache"])

  # Arguments for both 'compress' and 'decompress'.
  for cmd, ext in ((compress_cmd, ".tfci"), (decompress_cmd, ".png")):
    cmd.add_argument(
        "input_file",
        help="Input filename.")
    cmd.add_argument(
        "output_file", nargs="?",
        help="Output filename (optional). If not provided, appends '{}' to "
             "the input filename.".format(ext))

  # 'models' subcommand.
  models_cmd = subparsers.add_parser(
      "models",
      description="Lists available trained models. Requires an internet "
                  "connection.")
  models_cmd.set_defaults(f=list_models, a=["url_prefix"])

  # Parse arguments.
  return parser.parse_args(argv[1:])


if __name__ == "__main__":
  # Parse arguments and run function determined by subcommand.
  app.run(
      lambda args: args.f(**{k: getattr(args, k) for k in args.a}),
      flags_parser=parse_args)
Ejemplo n.º 18
0
def Run():
  app.run(main, flags_parser=lambda argv: parser.parse_args(argv[1:]))
Ejemplo n.º 19
0
def EndToEndTests():
  from grr_response_test import run_end_to_end_tests
  app.run(run_end_to_end_tests.main)
Ejemplo n.º 20
0
  summary_writer = tf.summary.FileWriter(LOG)
  for i in range(PARALLEL):
    agents[i].setup(sess, summary_writer)

  agent.initialize()
  if not FLAGS.training or FLAGS.continuation:
    global COUNTER
    COUNTER = agent.load_model(SNAPSHOT)

  # Run threads
  threads = []
  for i in range(PARALLEL - 1):
    t = threading.Thread(target=run_thread, args=(agents[i], FLAGS.map, False))
    threads.append(t)
    t.daemon = True
    t.start()
    time.sleep(5)

  run_thread(agents[-1], FLAGS.map, FLAGS.render)

  for t in threads:
    t.join()

  if FLAGS.profile:
    print(stopwatch.sw)


if __name__ == "__main__":
  app.run(_main)
Ejemplo n.º 21
0
def ApiRegressionTestsGenerate():
  from grr_response_test import api_regression_test_generate
  app.run(api_regression_test_generate.main)
Ejemplo n.º 22
0
def Console():
  from grr_response_server.bin import console
  app.run(console.main)
Ejemplo n.º 23
0
def DumpMySQLSchema():
  from grr_response_test import dump_mysql_schema
  app.run(dump_mysql_schema.main)
Ejemplo n.º 24
0
def GrrServer():
  from grr_response_server.bin import grr_server
  app.run(grr_server.main)
Ejemplo n.º 25
0
def DistEntry():
  """The main entry point for packages."""
  app.run(main)
Ejemplo n.º 26
0
def Worker():
  from grr_response_server.bin import worker
  app.run(worker.main)
Ejemplo n.º 27
0
def Client():
  from grr_response_client import client
  app.run(client.main)
Ejemplo n.º 28
0
def AdminUI():
  from grr_response_server.gui import admin_ui
  app.run(admin_ui.main)
Ejemplo n.º 29
0
def FleetspeakClient():
  from grr_response_client import grr_fs_client
  app.run(grr_fs_client.main)
Ejemplo n.º 30
0
  flags.DEFINE_integer(
      name="train_start", default=0,
      help=help_wrap("Start index of train examples within the data."))
  flags.DEFINE_integer(
      name="train_count", default=1000000,
      help=help_wrap("Number of train examples within the data."))
  flags.DEFINE_integer(
      name="eval_start", default=10000000,
      help=help_wrap("Start index of eval examples within the data."))
  flags.DEFINE_integer(
      name="eval_count", default=1000000,
      help=help_wrap("Number of eval examples within the data."))

  flags.DEFINE_integer(
      "n_trees", default=100, help=help_wrap("Number of trees to build."))
  flags.DEFINE_integer(
      "max_depth", default=6, help=help_wrap("Maximum depths of each tree."))
  flags.DEFINE_float(
      "learning_rate", default=0.1,
      help=help_wrap("The learning rate."))

  flags_core.set_defaults(data_dir="/tmp/higgs_data",
                          model_dir="/tmp/higgs_model")


if __name__ == "__main__":
  # Training progress and eval results are shown as logging.INFO; so enables it.
  tf.logging.set_verbosity(tf.logging.INFO)
  define_train_higgs_flags()
  absl_app.run(main)
Ejemplo n.º 31
0
def PoolClient():
  from grr_response_client import poolclient
  app.run(poolclient.main)
Ejemplo n.º 32
0
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals

from absl import app
from absl.testing import absltest

from grr_response_server.databases import db_client_reports_test
from grr_response_server.databases import mem_test_base
from grr.test_lib import test_lib


class MemoryDBClientReportsTest(
    db_client_reports_test.DatabaseTestClientReportsMixin,
    mem_test_base.MemoryDBTestBase, absltest.TestCase):
  pass


if __name__ == "__main__":
  app.run(test_lib.main)
Ejemplo n.º 33
0
            cv2.imshow('HawkEye', img)
            if FLAGS.output:
                out.write(img)
                frame_index = frame_index + 1
                list_file.write(str(frame_index) + ' ')
                if len(converted_boxes) != 0:
                    for i in range(0, len(converted_boxes)):
                        list_file.write(
                            str(converted_boxes[i][0]) + ' ' +
                            str(converted_boxes[i][1]) + ' ' +
                            str(converted_boxes[i][2]) + ' ' +
                            str(converted_boxes[i][3]) + ' ')
                list_file.write('\n')

            # press q to quit
            if cv2.waitKey(1) == ord('q'):
                break
        vid.release()
        if FLAGS.output:
            out.release()
            list_file.close()
        cv2.destroyAllWindows()


if __name__ == '__main__':
    try:
        hawkEye = HawkEye()
        app.run(hawkEye.main)
    except SystemExit:
        pass