def create_shard_dataset(rows, shards, output_path):
    tf_record_close_stack = contextlib2.ExitStack()
    output_tf_records = open_sharded_output_tfrecords(
            tf_record_close_stack, output_path, shards)

    for index, row in enumerate(tqdm.tqdm(rows)):
        tf_record_sample = create_tf_record_sample(*row)
        output_tf_record = output_tf_records[index % shards]
        output_tf_record.write(tf_record_sample.SerializeToString())

    for tf_record in output_tf_records:
        tf_record.close()
Exemplo n.º 2
0
def main(args=None):
    """Runs lighthouse checks and deletes reports."""
    parsed_args = _PARSER.parse_args(args=args)

    if parsed_args.mode == LIGHTHOUSE_MODE_ACCESSIBILITY:
        lighthouse_mode = LIGHTHOUSE_MODE_ACCESSIBILITY
        server_mode = SERVER_MODE_DEV
    elif parsed_args.mode == LIGHTHOUSE_MODE_PERFORMANCE:
        lighthouse_mode = LIGHTHOUSE_MODE_PERFORMANCE
        server_mode = SERVER_MODE_PROD
    else:
        raise Exception(
            'Invalid parameter passed in: \'%s\', please choose'
            'from \'accessibility\' or \'performance\'' % parsed_args.mode)

    enable_webpages()
    atexit.register(cleanup)

    if lighthouse_mode == LIGHTHOUSE_MODE_PERFORMANCE:
        python_utils.PRINT('Building files in production mode.')
        # We are using --source_maps here, so that we have at least one CI check
        # that builds using source maps in prod env. This is to ensure that
        # there are no issues while deploying oppia.
        build.main(args=['--prod_env', '--source_maps'])
    elif lighthouse_mode == LIGHTHOUSE_MODE_ACCESSIBILITY:
        build.main(args=[])
        run_webpack_compilation()
    else:
        raise Exception(
            'Invalid lighthouse mode: \'%s\', please choose'
            'from \'accessibility\' or \'performance\'' % lighthouse_mode)

    common.start_redis_server()

    # TODO(#11549): Move this to top of the file.
    import contextlib2
    managed_dev_appserver = common.managed_dev_appserver(
        APP_YAML_FILENAMES[server_mode], port=GOOGLE_APP_ENGINE_PORT,
        clear_datastore=True, log_level='critical', skip_sdk_update_check=True)

    with contextlib2.ExitStack() as stack:
        stack.enter_context(common.managed_elasticsearch_dev_server())
        if constants.EMULATOR_MODE:
            stack.enter_context(common.managed_firebase_auth_emulator())
        stack.enter_context(managed_dev_appserver)

        # Wait for the servers to come up.
        common.wait_for_port_to_be_open(feconf.ES_LOCALHOST_PORT)
        common.wait_for_port_to_be_open(GOOGLE_APP_ENGINE_PORT)

        run_lighthouse_puppeteer_script()
        run_lighthouse_checks(lighthouse_mode)
def create_tf_record(output_filename,
                     num_shards,
                     label_map_dict,
                     annotations_dir,
                     image_dir,
                     examples,
                     faces_only=True,
                     mask_type='png'):
  """Creates a TFRecord file from examples.

  Args:
    output_filename: Path to where output file is saved.
    num_shards: Number of shards for output file.
    label_map_dict: The label map dictionary.
    annotations_dir: Directory where annotation files are stored.
    image_dir: Directory where image files are stored.
    examples: Examples to parse and save to tf record.
    faces_only: If True, generates bounding boxes for pet faces.  Otherwise
      generates bounding boxes (as well as segmentations for full pet bodies).
    mask_type: 'numerical' or 'png'. 'png' is recommended because it leads to
      smaller file sizes.
  """
  with contextlib2.ExitStack() as tf_record_close_stack:
    output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
        tf_record_close_stack, output_filename, num_shards)
    for idx, example in enumerate(examples):
      if idx % 100 == 0:
        logging.info('On image %d of %d', idx, len(examples))
      xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml')
      mask_path = os.path.join(annotations_dir, 'trimaps', example + '.png')

      if not os.path.exists(xml_path):
        logging.warning('Could not find %s, ignoring example.', xml_path)
        continue
      with tf.gfile.GFile(xml_path, 'r') as fid:
        xml_str = fid.read()
      xml = etree.fromstring(xml_str)
      data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']

      try:
        tf_example = dict_to_tf_example(
            data,
            mask_path,
            label_map_dict,
            image_dir,
            faces_only=faces_only,
            mask_type=mask_type)
        if tf_example:
          shard_idx = idx % num_shards
          output_tfrecords[shard_idx].write(tf_example.SerializeToString())
      except ValueError:
        logging.warning('Invalid example: %s, ignoring.', xml_path)
Exemplo n.º 4
0
def create_tf_record_for_visualwakewords_dataset(annotations_file, image_dir,
                                                 output_path, num_shards):
  """Loads Visual WakeWords annotations/images and converts to tf.Record format.
  Args:
    annotations_file: JSON file containing bounding box annotations.
    image_dir: Directory containing the image files.
    output_path: Path to output tf.Record file.
    num_shards: number of output file shards.
  """
  with contextlib2.ExitStack() as tf_record_close_stack, \
      tf.gfile.GFile(annotations_file, 'r') as fid:
    output_tfrecords = dataset_utils.open_sharded_output_tfrecords(
        tf_record_close_stack, output_path, num_shards)
    groundtruth_data = json.load(fid)
    images = groundtruth_data['images']

    category_index = {}
    for category in groundtruth_data['categories'].values():
      # if not background class
      if category['id'] != 0:
        category_index[category['id']] = category

    annotations_index = {}
    if 'annotations' in groundtruth_data:
      tf.logging.info(
          'Found groundtruth annotations. Building annotations index.')
      for annotation in groundtruth_data['annotations'].values():
        image_id = annotation[0]['image_id']
        if image_id not in annotations_index:
          annotations_index[image_id] = []
        annotations_index[image_id].append(annotation[0])
    missing_annotation_count = 0
    for image in images:
      image_id = image['id']
      if image_id not in annotations_index:
        missing_annotation_count += 1
        annotations_index[image_id] = []
    tf.logging.info('%d images are missing annotations.',
                    missing_annotation_count)

    total_num_annotations_skipped = 0
    for idx, image in enumerate(images):
      if idx % 100 == 0:
        tf.logging.info('On image %d of %d', idx, len(images))
      annotations_list = annotations_index[image['id']]
      _, tf_example, num_annotations_skipped = _create_tf_example(
          image, annotations_list[0], image_dir)
      total_num_annotations_skipped += num_annotations_skipped
      shard_idx = idx % num_shards
      output_tfrecords[shard_idx].write(tf_example.SerializeToString())
    tf.logging.info('Finished writing, skipped %d annotations.',
                    total_num_annotations_skipped)
Exemplo n.º 5
0
def _create_tf_record_from_coco_annotations(annotations_file, image_dir,
                                            output_path, include_masks,
                                            num_shards):
    """Loads COCO annotation json files and converts to tf.Record format.

    Args:
      annotations_file: JSON file containing bounding box annotations.
      image_dir: Directory containing the image files.
      output_path: Path to output tf.Record file.
      include_masks: Whether to include instance segmentations masks
        (PNG encoded) in the result. default: False.
      num_shards: number of output file shards.
    """
    with contextlib2.ExitStack() as tf_record_close_stack, tf.gfile.GFile(
            annotations_file, 'r') as fid:
        output_tfrecords = open_sharded_output_tfrecords(
            tf_record_close_stack, output_path, num_shards)
        groundtruth_data = json.load(fid)
        images = groundtruth_data['images']
        category_index = create_category_index(groundtruth_data['categories'])

        annotations_index = {}
        if 'annotations' in groundtruth_data:
            tf.logging.info(
                'Found groundtruth annotations. Building annotations index.')
            for annotation in groundtruth_data['annotations']:
                image_id = annotation['image_id']
                if image_id not in annotations_index:
                    annotations_index[image_id] = []
                annotations_index[image_id].append(annotation)
        missing_annotation_count = 0
        for image in images:
            image_id = image['id']
            if image_id not in annotations_index:
                missing_annotation_count += 1
                annotations_index[image_id] = []
        tf.logging.info('%d images are missing annotations.',
                        missing_annotation_count)

        total_num_annotations_skipped = 0
        idx = 0
        for image in tqdm(images):
            annotations_list = annotations_index[image['id']]
            _, tf_example, num_annotations_skipped = create_tf_example(
                image, annotations_list, image_dir, category_index,
                include_masks)
            total_num_annotations_skipped += num_annotations_skipped
            shard_idx = idx % num_shards
            output_tfrecords[shard_idx].write(tf_example.SerializeToString())
            idx += 1
        tf.logging.info('Finished writing, skipped %d annotations.',
                        total_num_annotations_skipped)
Exemplo n.º 6
0
 def test_inspect_cpus(self):
     with contextlib.ExitStack() as stack:
         stack.enter_context(
             mock.patch.object(self.inspector.connection,
                               'lookupByUUIDString',
                               return_value=self.domain))
         stack.enter_context(
             mock.patch.object(self.domain,
                               'info',
                               return_value=(0, 0, 0, 2, 999999)))
         cpu_info = self.inspector.inspect_cpus(self.instance)
         self.assertEqual(2, cpu_info.number)
         self.assertEqual(999999, cpu_info.time)
    def test_sharded_tfrecord_writes(self):
        with contextlib2.ExitStack() as tf_record_close_stack:
            output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
                tf_record_close_stack,
                os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), 10)
            for idx in range(10):
                output_tfrecords[idx].write('test_{}'.format(idx))

        for idx in range(10):
            tf_record_path = '{}-{:05d}-of-00010'.format(
                os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), idx)
            records = list(tf.python_io.tf_record_iterator(tf_record_path))
            self.assertAllEqual(records, ['test_{}'.format(idx)])
Exemplo n.º 8
0
    def test_filters_empty_strings_from_command_args_when_shell_is_false(self):
        with contextlib2.ExitStack() as stack:
            logs = stack.enter_context(self.capture_logging())
            popen_calls = stack.enter_context(self._swap_popen())

            stack.enter_context(
                common.managed_process(['', 'a', '', 1],
                                       shell=False,
                                       timeout_secs=10))

        self.assertEqual(logs, [])
        self.assertEqual(popen_calls,
                         [self.POPEN_CALL(['a', '1'], {'shell': False})])
Exemplo n.º 9
0
def create_tf_record(output_filename, file_pars):
    # Your code here
    print(output_filename)
    with contextlib2.ExitStack() as tf_record_close_stack:
        writer = tf.python_io.TFRecordWriter(output_filename)
        for data,label in file_pars:
            #print(data)
            #print(label)
            example = dict_to_tf_example(data,label)
            if example == None:
                print(data + " is null!")
            else:
                writer.write(example.SerializeToString())
Exemplo n.º 10
0
    def test_kills_child_processes(self):
        with contextlib2.ExitStack() as stack:
            logs = stack.enter_context(self.capture_logging())
            stack.enter_context(self._swap_popen(
                make_processes_unresponsive=True, num_children=3))

            proc = stack.enter_context(
                common.managed_process(['a'], timeout_secs=10))
            pids = [c.pid for c in proc.children()] + [proc.pid]

        self.assertEqual(len(pids), 4)
        self.assertItemsEqual(
            logs, ['Process killed (pid=%d)' % p for p in pids])
    def install(self, test):
        """Installs a new instance of the stub on the given test class.

        The stub will emulate an initially-empty Firebase authentication server.

        Args:
            test: test_utils.TestBase. The test to install the stub on.
        """
        self._test = test

        with contextlib2.ExitStack() as swap_stack:
            swap_stack.enter_context(
                test.swap_to_always_return(firebase_admin,
                                           'initialize_app',
                                           value=object()))
            swap_stack.enter_context(
                test.swap_to_always_return(firebase_admin, 'delete_app'))
            swap_stack.enter_context(
                test.swap(firebase_admin.auth, 'create_session_cookie',
                          self._create_session_cookie))
            swap_stack.enter_context(
                test.swap(firebase_admin.auth, 'verify_session_cookie',
                          self._verify_session_cookie))
            swap_stack.enter_context(
                test.swap(firebase_admin.auth, 'import_users',
                          self._import_users))
            swap_stack.enter_context(
                test.swap(firebase_admin.auth, 'verify_id_token',
                          self._verify_id_token))
            swap_stack.enter_context(
                test.swap(firebase_admin.auth, 'get_user', self._get_user))
            swap_stack.enter_context(
                test.swap(firebase_admin.auth, 'delete_user',
                          self._delete_user))
            swap_stack.enter_context(
                test.swap(firebase_admin.auth, 'update_user',
                          self._update_user))

            for function_name in self._UNIMPLEMENTED_SDK_FUNCTION_NAMES:
                swap_stack.enter_context(
                    test.swap_to_always_raise(firebase_admin.auth,
                                              function_name,
                                              NotImplementedError))

            # Standard usage of ExitStack: enter a bunch of context managers
            # from the safety of an ExitStack's context. Once they've all been
            # opened, pop_all() of them off of the original context so they can
            # *stay* open. Calling the function returned will exit all of them
            # in reverse order.
            # https://docs.python.org/3/library/contextlib.html#cleaning-up-in-an-enter-implementation
            self._swap_stack = swap_stack.pop_all()
def _create_tf_record(annotation_file: str,
                      label_names_file: str,
                      label_source: LabelSource,
                      image_dir: str,
                      output_path: str,
                      include_masks: bool = False,
                      include_keypoint: bool = False,
                      num_shards: int = 1):
    """ Load labelme annotation json files and converts to tf.Record format.

    :param annotations_file: text file, each line is name of file label
    :param image_dir: Directory containing the image files.
    :param output_path: Path to output tf.Record fil .
    :param num_shards: number of output file shards.
    :return:
    """
    with contextlib2.ExitStack() as tf_record_close_stack, \
            tf.gfile.GFile(annotation_file, 'r') as fid:
        output_tfrecords = open_sharded_output_tfrecords(
            tf_record_close_stack, output_path, num_shards)

        cat2idx, idx2cat = get_category_mapping(label_names_file)

        for i, line in tqdm.tqdm(enumerate(fid)):
            if line.startswith("#"):
                continue
            label_path = line.strip()
            if label_source == LabelSource.LABEL_ME:
                annotation = ObjectsImage.get_from_labelme(
                    label_path, image_dir, cat2idx, idx2cat)
            elif label_source == LabelSource.LABEL_VOC:
                annotation = ObjectsImage.get_from_voc(label_path, image_dir,
                                                       cat2idx, idx2cat)
            elif label_source == LabelSource.LABEL_YOLO:
                annotation = ObjectsImage.get_from_yolo(
                    label_path, image_dir, cat2idx, idx2cat)
            else:
                raise Exception(f"Can't process source {label_source}")
            try:
                tf_example = create_tf_example(
                    annotation,
                    include_keypoint=include_keypoint,
                    include_masks=include_masks)
                shard_idx = i % num_shards
                if tf_example:
                    output_tfrecords[shard_idx].write(
                        tf_example.SerializeToString())
            except Exception as e:
                print(e)
                print("Error at:", label_path)
                traceback.print_exc()
Exemplo n.º 13
0
    def test_respects_processes_that_are_killed_early(self):
        with contextlib2.ExitStack() as stack:
            logs = stack.enter_context(self.capture_logging())
            stack.enter_context(self._swap_popen())

            proc = stack.enter_context(common.managed_process(
                ['a'], timeout_secs=10))
            time.sleep(1)
            proc.kill()
            proc.wait()

        self.assert_proc_was_managed_as_expected(
            logs, proc.pid,
            manager_should_have_sent_terminate_signal=False)
Exemplo n.º 14
0
def main(_):
  tf.logging.set_verbosity(tf.logging.INFO)

  required_flags = [
      'input_box_annotations_csv', 'input_images_directory', 'input_label_map',
      'output_tf_record_path_prefix'
  ]
  for flag_name in required_flags:
    if not getattr(FLAGS, flag_name):
      raise ValueError('Flag --{} is required'.format(flag_name))

  label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)
  all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv)
  if FLAGS.input_image_label_annotations_csv:
    all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv)
    all_label_annotations.rename(
        columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
  else:
    all_label_annotations = None
  all_images = tf.gfile.Glob(
      os.path.join(FLAGS.input_images_directory, '*.jpg'))
  all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]
  all_image_ids = pd.DataFrame({'ImageID': all_image_ids})
  all_annotations = pd.concat(
      [all_box_annotations, all_image_ids, all_label_annotations])

  tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))

  with contextlib2.ExitStack() as tf_record_close_stack:
    output_tfrecords = open_sharded_output_tfrecords(
        tf_record_close_stack, FLAGS.output_tf_record_path_prefix,
        FLAGS.num_shards)

    for counter, image_data in enumerate(all_annotations.groupby('ImageID')):
      tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
                             counter)

      image_id, image_annotations = image_data
      # In OID image file names are formed by appending ".jpg" to the image ID.
      image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')
      if not os.path.exists(image_path):
          continue
      with tf.gfile.Open(image_path, 'rb') as image_file:
        encoded_image = image_file.read()

      tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
          image_annotations, label_map, encoded_image)
      if tf_example:
        shard_idx = int(image_id, 16) % FLAGS.num_shards
        output_tfrecords[shard_idx].write(tf_example.SerializeToString())
Exemplo n.º 15
0
def gen_tfrecord(panda_df, output_path, num_shards = 10):
	"""Creates a TFRecord of the current dataframe into the output file"""
	with contextlib2.ExitStack() as tf_record_close_stack:
		writer = tf_record_creation_util.open_sharded_output_tfrecords(
			tf_record_close_stack, output_path, num_shards)
		grouped = split(panda_df, 'filename')
		for idx, group in enumerate(grouped):
			if idx % 100 == 0:
				print("On image " + str(idx) + " of " + str(len(grouped)))
			tf_example = create_tf_example(group, "./images/raw")
			shard_idx = idx % num_shards
			writer[shard_idx].write(tf_example.SerializeToString())
		
		print("Successfully creates the TFRecords: {}".format(output_path))
Exemplo n.º 16
0
 def test_inspect_cpu_l3_cache(self):
     fake_stats = [({}, {'perf.cmt': 90112})]
     connection = self.inspector.connection
     with contextlib.ExitStack() as stack:
         stack.enter_context(
             mock.patch.object(connection,
                               'lookupByUUIDString',
                               return_value=self.domain))
         stack.enter_context(
             mock.patch.object(connection,
                               'domainListGetStats',
                               return_value=fake_stats))
         cpu_info = self.inspector.inspect_cpu_l3_cache(self.instance)
         self.assertEqual(90112, cpu_info.l3_cache_usage)
Exemplo n.º 17
0
def create_tf_record(output_filename,
                     num_shards,
                     label_map_dict,
                     annotations_dir,
                     image_dir,
                     examples,
                     use_alt_names):
    """Creates a TFRecord file from examples.

    Args:
        output_filename: Path to where output file is saved.
        num_shards: Number of shards for output file.
        label_map_dict: The label map dictionary.
        annotations_dir: Directory where annotation files are stored.
        image_dir: Directory where image files are stored.
        examples: Examples to parse and save to tf record.
        use_alt_names: use alternative class name mapping.
    """

    with contextlib2.ExitStack() as tf_record_close_stack:
        output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
            tf_record_close_stack, output_filename, num_shards)

        for idx, example in enumerate(examples):
            if idx % 10 == 0:
                logger.info('On image %d of %d', idx, len(examples))

            xml_path = os.path.join(annotations_dir, 'xmls', example + '.xml')

            if not os.path.exists(xml_path):
                logger.warning('Could not find %s, ignoring example.', xml_path)
                continue

            with tf.io.gfile.GFile(xml_path, 'r') as fid:
                xml_str = fid.read()
                xml = etree.fromstring(xml_str)
                data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
                try:
                    tf_example = dict_to_tf_example(
                        data=data,
                        label_map_dict=label_map_dict,
                        image_subdirectory=image_dir,
                        use_alt_names=use_alt_names)

                    if tf_example:
                        shard_idx = idx % num_shards

                    output_tfrecords[shard_idx].write(tf_example.SerializeToString())
                except ValueError:
                    logger.warning('Invalid example: %s, ignoring.', xml_path)
Exemplo n.º 18
0
 def test_inspect_disks_with_domain_shutoff(self):
     connection = self.inspector.connection
     with contextlib.ExitStack() as stack:
         stack.enter_context(
             mock.patch.object(connection,
                               'lookupByUUIDString',
                               return_value=self.domain))
         stack.enter_context(
             mock.patch.object(self.domain,
                               'info',
                               return_value=(5, 0, 0, 2, 999999)))
         inspect = self.inspector.inspect_disks
         self.assertRaises(virt_inspector.InstanceShutOffException, list,
                           inspect(self.instance))
def main(image_directory: str, annotation_directory: str, output_path_training_split: str,
         output_path_validation_split: str, output_path_test_split: str, label_map_path: str, number_of_shards: int,
         included_classes: List[str]):
    os.makedirs(os.path.dirname(output_path_training_split), exist_ok=True)
    label_map_dict = label_map_util.get_label_map_dict(label_map_path)
    all_jpg_image_paths = glob(f"{image_directory}/**/*.jpg", recursive=True)
    all_png_image_paths = glob(f"{image_directory}/**/*.png", recursive=True)
    all_image_paths = all_jpg_image_paths + all_png_image_paths
    all_annotation_paths = glob(f"{annotation_directory}/**/*.json", recursive=True)

    # Filter out the dataset.json files, which are complete dataset annotations
    all_annotation_paths = [a for a in all_annotation_paths if "dataset.json" not in a]

    training_sample_indices, validation_sample_indices, test_sample_indices = get_training_validation_test_indices(
        all_image_paths)

    all_annotation_paths = sorted(all_annotation_paths)
    all_image_paths = sorted(all_image_paths)

    if len(all_image_paths) != len(all_annotation_paths):
        print("Not every image has annotations")

    for annotation_path, image_path in zip(all_annotation_paths, all_image_paths):
        if os.path.splitext(os.path.basename(image_path))[0] not in annotation_path:
            print("Invalid annotations detected: {0}, {1}".format(image_path, annotation_path))

    print(f"Exporting\n"
          f"- {len(training_sample_indices)} training samples\n"
          f"- {len(validation_sample_indices)} validation samples\n"
          f"- {len(test_sample_indices)} test samples")

    with contextlib2.ExitStack() as tf_record_close_stack:
        training_tf_records = tf_record_creation_util.open_sharded_output_tfrecords(
            tf_record_close_stack, output_path_training_split, number_of_shards)
        validation_tf_records = tf_record_creation_util.open_sharded_output_tfrecords(
            tf_record_close_stack, output_path_validation_split, number_of_shards)
        test_tf_records = tf_record_creation_util.open_sharded_output_tfrecords(
            tf_record_close_stack, output_path_test_split, number_of_shards)
        index = 0
        for tf_example in annotations_to_tf_example_list(all_image_paths, all_annotation_paths, label_map_dict, included_classes):
            shard_index = index % number_of_shards
            index += 1

            if index in training_sample_indices:
                training_tf_records[shard_index].write(tf_example.SerializeToString())
            elif index in validation_sample_indices:
                validation_tf_records[shard_index].write(tf_example.SerializeToString())
            elif index in test_sample_indices:
                test_tf_records[shard_index].write(tf_example.SerializeToString())
Exemplo n.º 20
0
    def _ThreadProc(self):
        """Handles one whole test from start to finish."""
        self._exit_stack = None
        self._test_state = None
        self._output_thread = None

        with contextlib.ExitStack() as exit_stack, \
            LogSleepSuppress() as suppressor:
            # Top level steps required to run a single iteration of the Test.
            _LOG.info('Starting test %s', self._test_data.code_info.name)

            # Any access to self._exit_stack must be done while holding this lock.
            with self._lock:
                # Initial setup of exit stack and final cleanup of attributes.
                self._exit_stack = exit_stack

            # Wait here until the test start trigger returns a DUT ID.  Don't hold
            # self._lock while we do this, or else calls to Stop() will deadlock.
            # Create plugs while we're here because that may also take a while and
            # we don't want to hold self._lock while we wait.
            dut_id = self._WaitForTestStart(suppressor)
            self._status = self.FrameworkStatus.INITIALIZING
            self._InitializePlugs(suppressor)

            with self._lock:
                if not self._exit_stack:
                    # We shouldn't get here, but just in case something went weird with a
                    # call to Stop() and we ended up resuming execution here but the
                    # exit stack was already cleared, bail.  Try to tear down plugs on a
                    # best-effort basis.
                    self._plug_manager.TearDownPlugs()
                    raise TestStopError('Test Stopped.')

                # Tear down plugs first, then output test record.
                exit_stack.callback(self._plug_manager.TearDownPlugs)

                # Perform initialization of some top-level stuff we need.
                self._test_state = self._MakeTestState(dut_id, suppressor)
                executor = self._MakePhaseExecutor(exit_stack, suppressor)

            # Everything is set, set status and begin test execution.  Note we don't
            # protect this with a try: block because the PhaseExecutor handles any
            # exceptions from test code.  Any exceptions here are caused by the
            # framework, and we probably want them to interrupt framework state
            # changes (like the transition to FINISHING).
            self._status = self.FrameworkStatus.EXECUTING
            suppressor.failure_reason = 'Failed to execute test.'
            self._ExecuteTestPhases(executor)
            self._status = self.FrameworkStatus.FINISHING
Exemplo n.º 21
0
 def __init__(self, path, num_shards=10):
     self.stack = contextlib2.ExitStack()
     base_path = os.path.dirname(path)
     if not os.path.isdir(base_path):
         os.makedirs(base_path)
     self.filenames = [
         '{}-{:05d}-of-{:05d}'.format(path, idx, num_shards)
         for idx in range(num_shards)
     ]
     self.shards = [
         self.stack.enter_context(tf.io.TFRecordWriter(filename))
         for filename in self.filenames
     ]
     self.index = 0
     self.num_shards = num_shards
Exemplo n.º 22
0
    def setUp(self):
        super(PopulateFirebaseAccountsOneOffJobTests, self).setUp()
        self._auth_id_generator = itertools.count()
        self.exit_stack = contextlib2.ExitStack()
        self.sdk_stub = firebase_auth_services_test.FirebaseAdminSdkStub()

        self.sdk_stub.install(self)
        self.exit_stack.callback(self.sdk_stub.uninstall)

        # Forces all users to produce the same hash value during unit tests to
        # prevent them from being sharded and complicating the testing logic.
        self.exit_stack.enter_context(
            self.swap_to_always_return(auth_jobs,
                                       'ID_HASHING_FUNCTION',
                                       value=1))
Exemplo n.º 23
0
def _set_arg_scope_defaults(defaults):
    """Sets arg scope defaults for all items present in defaults.

  Args:
    defaults: dictionary mapping function to default_dict

  Yields:
    context manager
  """
    with contextlib2.ExitStack() as stack:
        _ = [
            stack.enter_context(slim.arg_scope(func, **default_arg))
            for func, default_arg in defaults.items()
        ]
        yield
Exemplo n.º 24
0
def vlan_set(primary_iface):
    "constructor to create a variable set of macvlan interfaces"

    with contextlib2.ExitStack() as stack:

        def inner(count):
            try:
                return [
                    stack.enter_context(network.MacVLan(primary_iface))
                    for _ in range(count)
                ]
            except network.NetlinkError as err:
                vlan_handle_error(err)

        yield inner
Exemplo n.º 25
0
def test_sync_across_cluster(default_data_center, default_cluster, host_0_up,
                             host_1_up):

    cluster_hosts_up = (host_0_up, host_1_up)
    with clusterlib.new_assigned_network('sync-net', default_data_center,
                                         default_cluster) as sync_net:
        with contextlib2.ExitStack() as stack:
            for i, host in enumerate(cluster_hosts_up):
                att_datum = create_attachment(sync_net, i)
                stack.enter_context(hostlib.setup_networks(
                    host, (att_datum, )))
                stack.enter_context(unsynced_host_network(host))
            default_cluster.sync_all_networks()
            for host in cluster_hosts_up:
                host.wait_for_networks_in_sync()
Exemplo n.º 26
0
 def remote_tunnel(self):
     with contextlib.ExitStack() as stack:
         if self.ssh_tunnel:
             output = stack.enter_context(
                 contextlib.closing(open(os.devnull, 'w')))  # noqa
             # forward sys.stdout to os.devnull to prevent
             # printing debug messages by fab.remote_tunnel
             stack.enter_context(utils.patch(sys, 'stdout', output))
             stack.enter_context(
                 fab.remote_tunnel(
                     remote_bind_address=self.ssh_tunnel.bind_address,
                     remote_port=self.ssh_tunnel.port,
                     local_host=self.ssh_tunnel.host,
                     local_port=self.ssh_tunnel.host_port,
                 ))
         yield
Exemplo n.º 27
0
def write_sharded_tf_records(examples, output_filebase, num_shards):
    """Writes encoded TFRecords to `num_shards` files.

  Args:
    examples: encoded TFRecord examples.
    output_filebase: Path to the destination directory.
    num_shards: number of output shards.
  """
    with contextlib2.ExitStack() as tf_record_close_stack:
        output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
            tf_record_close_stack, output_filebase, num_shards)
        for index, example in examples.enumerate():
            tf_example = create_tf_example(example)
            output_shard_index = index % num_shards
            output_tfrecords[output_shard_index].write(
                tf_example.SerializeToString())
Exemplo n.º 28
0
    def test_kills_child_processes(self):
        with contextlib2.ExitStack() as stack:
            logs = stack.enter_context(self.capture_logging())
            stack.enter_context(self._swap_popen(
                num_children=3, make_procs_unresponsive=True))

            proc = stack.enter_context(
                common.managed_process(['a'], timeout_secs=10))
            pids = [c.pid for c in proc.children()] + [proc.pid]

        self.assertEqual(len(set(pids)), 4)
        for pid in pids:
            self.assert_proc_was_managed_as_expected(
                logs, pid,
                manager_should_have_sent_terminate_signal=True,
                manager_should_have_sent_kill_signal=True)
Exemplo n.º 29
0
def main(_):
    LOG_FILE = FLAGS.log_path
    if LOG_FILE is not None:
        with open(LOG_FILE, 'w') as log:
            log.write("IMG_PATH\n")
    label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
    DATASET_PATH = os.path.normpath(FLAGS.data_dir)
    OUTPUT_PATH = os.path.normpath(FLAGS.output_path)
    if '%TYPE%' in DATASET_PATH:
        PATHS = [(DATASET_PATH.replace('%TYPE%', datasetType),
                  OUTPUT_PATH.replace('%TYPE%', datasetType))
                 for datasetType in ['train', 'val']]
    else:
        PATHS = [(DATASET_PATH, FLAGS.output_path)]

    for datasetPath, outputPath in PATHS:
        logging.info(f'Using {datasetPath}')
        start = time()
        imageDirList = os.listdir(datasetPath)
        nbImage = len(imageDirList)
        record_dir = os.path.dirname(outputPath)
        if not os.path.exists(record_dir):
            os.makedirs(record_dir, exist_ok=True)
        num_shards = max(
            1, nbImage // IMG_PER_SHARD +
            (0 if nbImage % IMG_PER_SHARD < IMG_PER_SHARD * 0.2 else 1))
        if FLAGS.no_shard:
            num_shards = 1
        with contextlib2.ExitStack() as tf_record_close_stack:
            output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
                tf_record_close_stack, outputPath, num_shards)
            for idx, imageDir in enumerate(imageDirList):
                if LOG_FILE is not None:
                    with open(LOG_FILE, 'a') as log:
                        log.write(f"{imageDir}\n")
                if idx % 50 == 0:
                    logging.info(f'On image {idx} of {len(imageDirList)}')

                IMAGE_DIR_PATH = os.path.join(datasetPath, imageDir)
                data = getImageData(str(IMAGE_DIR_PATH), label_map_dict)
                tf_example = data2TFExample(data)
                output_tfrecords[idx % num_shards].write(
                    tf_example.SerializeToString())
        total_time = time() - start
        m = int(total_time) // 60
        s = int(total_time) % 60
        print(f"{m:02d}:{s:02d}", flush=True)
Exemplo n.º 30
0
    def _capture_variables(self):
        """Adds variables used by this module to self._all_variables.

    Upon entering this context manager the module adds itself onto the top
    of the module call stack. Any variables created with `tf.get_variable()`
    inside `_build()` or `_enter_variable_scope()` while this module is on top
    of the call stack will be added to `self._all_variables`.

    Before exiting the context the module removes itself from the top of the
    call stack, and adds all of the variables in `self._all_variables` to its
    parent module (the new top) of the call stack.

    Yields:
      Nothing, the yield just transfers focus back to the inner context.
    """
        module_stack = get_module_stack()
        module_stack.append(self)
        try:
            with contextlib2.ExitStack() as stack:
                # Ideally move re-entering store into Template.variable_scope.
                template_store = getattr(self._template, "_template_store",
                                         None)
                if template_store is not None:
                    # In eager mode, the template store keeps references to created
                    # variables such that they survive even if there are no references to
                    # them in Python code. Variables added to an eager template store are
                    # also added to TensorFlow global collections (unlike regular
                    # variables created in eager mode).
                    stack.enter_context(template_store.as_default())

                stack.enter_context(
                    util.notify_about_new_variables(self._all_variables.add))

                yield

                if self._original_name:
                    self._all_variables.update(self._template.variables)

        finally:
            # Remove `self` from `module_stack`, this happens as part of cleanup
            # even if an error is raised.
            module_stack.pop()

        if module_stack:
            # Peek into the stack to add created variables to the parent
            parent_module = module_stack[-1]
            parent_module._all_variables.update(self._all_variables)  # pylint: disable=protected-access