コード例 #1
0
def parse_tfrecord_glue_files(record):
    # The tensors you pull into the model MUST have the same name
    # as what was encoded in the TFRecord

    # FixedLenFeature means that you know the number of tensors associated
    # with each label and example.

    # For example, there will only be 1 review per example, and as
    # a result, sentence is a FixedLenFeature.

    features_spec = {
        'input_ids': tf.io.FixedLenFeature([], tf.string, default_value=''),
        'attention_mask': tf.io.FixedLenFeature([], tf.string, default_value=''),
        'token_type_ids': tf.io.FixedLenFeature([], tf.string, default_value=''),
        'label': tf.io.FixedLenFeature([], tf.int64, default_value=0)
    }

    # tr_parse_ds = tr_ds.map(parse_example)
    example = tf.io.parse_single_example(record, features_spec)

    # bad idea to use (None, ) ?
    #f0 = tf.ensure_shape(tf.io.parse_tensor(example['input_ids'], out_type=tf.int32), (None,))
    #f1 = tf.ensure_shape(tf.io.parse_tensor(example['attention_mask'], out_type=tf.int32), (None,))
    #f2 = tf.ensure_shape(tf.io.parse_tensor(example['token_type_ids'], out_type=tf.int32), (None,))
    f0 = tf.ensure_shape(tf.io.parse_tensor(example['input_ids'], out_type=tf.int32), (128,))
    f1 = tf.ensure_shape(tf.io.parse_tensor(example['attention_mask'], out_type=tf.int32), (128,))
    f2 = tf.ensure_shape(tf.io.parse_tensor(example['token_type_ids'], out_type=tf.int32), (128,))  
    
    return {'input_ids': f0, 'attention_mask': f1, 'token_type_ids': f2}, example['label']
コード例 #2
0
def shapley_inputs_one_sample(sparse_positive_example: tf.sparse.SparseTensor, dense_shape: Tuple[int, int]) -> \
        Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
    ones_like_values = tf.ones_like(sparse_positive_example.values)
    tf.debugging.assert_equal(sparse_positive_example.values, ones_like_values), 'assuming non-zero elements equal 1.'

    indices = tf.random.shuffle(sparse_positive_example.indices)
    num_indices = tf.shape(indices)[0]

    random_len = tf.random.uniform(shape=(), minval=1, maxval=num_indices + 1, dtype=tf.dtypes.int32)

    ones = tf.ones(shape=(random_len,), dtype=tf.dtypes.bool)
    zeros = tf.zeros(shape=(num_indices - random_len + 1,), dtype=tf.dtypes.bool)
    retain_inclusive = tf.concat((ones, zeros[:-1]), axis=0)
    retain_exclusive = tf.concat((ones[:-1], zeros), axis=0)
    toggled_index = indices[random_len - 1]

    sparse_positive_example_shuffled = tf.sparse.SparseTensor(indices=indices, values=ones_like_values,
                                                              dense_shape=sparse_positive_example.dense_shape)

    example_inclusive = tf.sparse.retain(sp_input=sparse_positive_example_shuffled, to_retain=retain_inclusive)
    example_exclusive = tf.sparse.retain(sp_input=sparse_positive_example_shuffled, to_retain=retain_exclusive)

    example_inclusive = tf.sparse.reorder(example_inclusive)
    example_exclusive = tf.sparse.reorder(example_exclusive)

    example_inclusive = tf.sparse.to_dense(example_inclusive)
    example_exclusive = tf.sparse.to_dense(example_exclusive)

    example_inclusive = tf.ensure_shape(example_inclusive, shape=dense_shape)
    example_exclusive = tf.ensure_shape(example_exclusive, shape=dense_shape)

    return example_exclusive, example_inclusive, toggled_index
コード例 #3
0
    def __init__(
        self,
        kernel: Union[gpflow.kernels.Kernel, NoneType],
        feature_functions: tf.keras.layers.Layer,
        feature_coefficients: TensorType,
    ):
        r"""
        :param kernel: The kernel corresponding to the feature decomposition.
            If ``None``, there is no analytical expression associated with the infinite
            sum and we approximate the kernel based on the feature decomposition.

            .. note::

                In certain cases, the analytical expression for the kernel is
                not available. In this case, passing `None` is allowed, and
                :meth:`K` and :meth:`K_diag` will be computed using the
                approximation provided by the feature decomposition.

        :param feature_functions: A Keras layer for which the call evaluates the
            ``L`` features of the kernel :math:`\phi_i(\cdot)`. For ``X`` with the shape ``[N, D]``,
            ``feature_functions(X)`` returns a tensor with the shape ``[N, L]``.
        :param feature_coefficients: A tensor with the shape ``[L, 1]`` with coefficients
            associated with the features, :math:`\lambda_i`.
        """
        super().__init__()

        if kernel is None:
            self._kernel = _ApproximateKernel(feature_functions,
                                              feature_coefficients)
        else:
            self._kernel = kernel

        self._feature_functions = feature_functions
        self._feature_coefficients = feature_coefficients  # [L, 1]
        tf.ensure_shape(self._feature_coefficients, tf.TensorShape([None, 1]))
コード例 #4
0
def create_input(image, label, height=768, width=768):
    image, label = resize(image, label, height, width)
    image = tf.cast(image, tf.float32) / 127.5 - 1
    label = tf.cast(label, tf.float32)
    image = tf.ensure_shape(image, [height, width, 3])
    label = tf.ensure_shape(label, [height, width, 1])
    return image, label
コード例 #5
0
def load(image_file):
  """
  TF function that calls NP helper function to load all the images, colors, masks, and face ids.
  """
  masks, colors, image, faceid, cmask, mask2, cmask2 = tf.numpy_function(load_mask, [image_file], [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])

  return tf.ensure_shape(masks, [256, 256, 7]), tf.reverse(tf.ensure_shape(image, [256, 256, 3]), [-1]), tf.reverse(tf.ensure_shape(colors, [1, 1, 7*3]), [-1]), tf.ensure_shape(faceid, [1,1,128]), tf.ensure_shape(cmask, [256, 256, 3]), tf.ensure_shape(mask2, [256, 256, 7]), tf.ensure_shape(cmask2, [256, 256, 3])
    def parse_fn(self, file):
        config = self.config
        image_size = config.IMAGE_SIZE
        dmap_size = config.MAP_SIZE
        label_size = 1

        def _parse_function(_file):
            _file = _file.decode('UTF-8')
            image_bytes = image_size * image_size * 3
            dmap_bytes = dmap_size * dmap_size
            bin = np.fromfile(_file, dtype='uint8')
            image = np.transpose(
                bin[0:image_bytes].reshape((3, image_size, image_size)) / 255,
                (1, 2, 0))
            dmap = np.transpose(
                bin[image_bytes:image_bytes + dmap_bytes].reshape(
                    (1, dmap_size, dmap_size)) / 255, (1, 2, 0))
            label = bin[image_bytes + dmap_bytes:image_bytes + dmap_bytes +
                        label_size] / 1
            dmap1 = dmap * (1 - label)
            dmap2 = np.ones_like(dmap) * label
            dmap = np.concatenate([dmap1, dmap2], axis=2)

            return image.astype(np.float32), dmap.astype(
                np.float32), label.astype(np.float32)

        image_ts, dmap_ts, label_ts = tf.numpy_function(
            _parse_function, [file], [tf.float32, tf.float32, tf.float32])
        image_ts = tf.ensure_shape(image_ts,
                                   [config.IMAGE_SIZE, config.IMAGE_SIZE, 3])
        dmap_ts = tf.ensure_shape(dmap_ts,
                                  [config.MAP_SIZE, config.MAP_SIZE, 2])
        label_ts = tf.ensure_shape(label_ts, [1])
        return image_ts, dmap_ts, label_ts
コード例 #7
0
ファイル: dataset.py プロジェクト: mike8411251995/ECCV20-STDN
    def parse_fn(self, file1, file2):
        config = self.config
        imsize = config.IMAGE_SIZE
        lm_reverse_list = np.array([17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,
                           27,26,25,24,23,22,21,20,19,18,
                           28,29,30,31,36,35,34,33,32,
                           46,45,44,43,48,47,40,39,38,37,42,41,
                           55,54,53,52,51,50,49,60,59,58,57,56,65,64,63,62,61,68,67,66],np.int32) -1

        def _parse_function(_file1, _file2):
            # live
            _file1 = _file1.decode('UTF-8')
            meta = glob.glob(_file1+'/*.npy')
            try:
                fr = meta[random.randint(0, len(meta) - 1)]
            except:
                print(_file1, len(meta))
            lm_name = fr
            im_name = (fr[:-3] + 'png').replace('landmarks', 'sample_processed').replace('/live', '')
            image = Image.open(im_name)
            width, height = image.size
            image_li = image.resize((imsize,imsize))
            image_li = np.array(image_li,np.float32)
            lm_li = np.load(lm_name) / width
            if np.random.rand() > 0.5:
                image_li = cv2.flip(image_li, 1)
                lm_li[:,0] = 1 - lm_li[:,0]
                lm_li = lm_li[lm_reverse_list,:]

            # spoof
            _file2 = _file2.decode('UTF-8')
            meta = glob.glob(_file2+'/*.npy')
            try:
                fr = meta[random.randint(0, len(meta) - 1)]
            except:
                print(_file2, len(meta))
            lm_name = fr
            im_name = (fr[:-3] + 'png').replace('landmarks', 'sample_processed').replace('/spoof', '')
            image = Image.open(im_name)
            width, height = image.size
            image_sp = image.resize((imsize,imsize))
            image_sp = np.array(image_sp,np.float32)
            lm_sp = np.load(lm_name) / width
            if np.random.rand() > 0.5:
                image_sp = cv2.flip(image_sp, 1)
                lm_sp[:,0] = 1 - lm_sp[:,0]
                lm_sp = lm_sp[lm_reverse_list,:]

            # offset map
            reg_map_sp = generate_offset_map(lm_sp, lm_li)

            return np.array(image_li,np.float32)/255, np.array(image_sp,np.float32)/255, reg_map_sp.astype(np.float32)

        image_li, image_sp, reg_map_sp = tf.py_func(_parse_function, [file1, file2], [tf.float32, tf.float32, tf.float32])
        image_li   = tf.ensure_shape(image_li,   [imsize, imsize, 3])
        image_sp   = tf.ensure_shape(image_sp,   [imsize, imsize, 3])
        reg_map_sp = tf.ensure_shape(reg_map_sp, [imsize, imsize, 3])
        # data augmentation
        image      = tf.stack([tf.image.random_brightness(image_li, 0.25), tf.image.random_brightness(image_sp, 0.25)], axis=0)
        return image, reg_map_sp
コード例 #8
0
ファイル: edge_maps.py プロジェクト: rlinus/sleap
        def generate_pafs(example):
            """Local processing function for dataset mapping."""
            instances = example["instances"]
            in_img = (instances > 0) & (instances < tf.reshape(
                tf.stack([xv[-1], yv[-1]], axis=0), [1, 1, 2]))
            in_img = tf.reduce_any(tf.reduce_all(in_img, axis=-1), axis=1)
            in_img = tf.ensure_shape(in_img, [None])
            instances = tf.boolean_mask(instances, in_img)

            edge_sources, edge_destinations = get_edge_points(
                instances, edge_inds)
            edge_sources = tf.ensure_shape(edge_sources, (None, n_edges, 2))
            edge_destinations = tf.ensure_shape(edge_destinations,
                                                (None, n_edges, 2))

            pafs = make_multi_pafs(
                xv=xv,
                yv=yv,
                edge_sources=edge_sources,
                edge_destinations=edge_destinations,
                sigma=self.sigma,
            )
            pafs = tf.ensure_shape(pafs, (grid_height, grid_width, n_edges, 2))

            if self.flatten_channels:
                pafs = tf.reshape(pafs, [grid_height, grid_width, n_edges * 2])
                pafs = tf.ensure_shape(pafs,
                                       (grid_height, grid_width, n_edges * 2))

            example["part_affinity_fields"] = pafs

            return example
コード例 #9
0
def wrapped_user_batch_dataset(user_batch):
    def func(user_batch):
        return user_batch_dataset(int(user_batch[0]), int(user_batch[1]))

    py_func = tf.py_function(func, [user_batch], [tf.float32, tf.float32])

    return (tf.ensure_shape(py_func[0],
                            (None, 5, AugmentedBeatmapColumns.N_COLUMNS)),
            tf.ensure_shape(py_func[1], (None, )))
コード例 #10
0
 def _define_shapes(self, features: TensorDict, labels: TensorDict):
     """Define the tensor shapes for TPU compiling."""
     if not self._is_shape_defined:
         return
     features['images'] = tf.ensure_shape(
         features['images'],
         (self._output_dimension, self._output_dimension, 3))
     labels['segmentation_output']['gt_word_score'] = tf.ensure_shape(
         labels['segmentation_output']['gt_word_score'],
         (self._mask_dimension, self._mask_dimension))
     labels['instance_labels']['num_instance'] = tf.ensure_shape(
         labels['instance_labels']['num_instance'], [])
     if self._max_num_instance >= 0:
         labels['instance_labels']['masks_sizes'] = tf.ensure_shape(
             labels['instance_labels']['masks_sizes'],
             (self._max_num_instance, ))
         labels['instance_labels']['masks'] = tf.ensure_shape(
             labels['instance_labels']['masks'],
             (self._mask_dimension, self._mask_dimension))
         labels['instance_labels']['classes'] = tf.ensure_shape(
             labels['instance_labels']['classes'],
             (self._max_num_instance, ))
         labels['instance_labels']['gt_weights'] = tf.ensure_shape(
             labels['instance_labels']['gt_weights'],
             (self._max_num_instance, ))
         labels['paragraph_labels']['paragraph_ids'] = tf.ensure_shape(
             labels['paragraph_labels']['paragraph_ids'],
             (self._max_num_instance, ))
         labels['paragraph_labels']['has_para_ids'] = tf.ensure_shape(
             labels['paragraph_labels']['has_para_ids'], [])
コード例 #11
0
 def call(self, inputs):  # images, encoded
     images, encoded = inputs
     # TODO: fix
     images, encoded = tf.ensure_shape(images,
                                       (32, 28, 28, 1)), tf.ensure_shape(
                                           encoded, (32, 100))
     #images, encoded = tf.ensure_shape(images, shape=self.ensure_batch_size(images)), tf.ensure_shape(encoded, shape=self.ensure_batch_size(encoded))
     encoded = tf.expand_dims(tf.expand_dims(encoded, axis=-2), axis=-2)
     encoded = tf.tile(encoded, (1, ) + images.shape[1:])
     output = tf.concat([images, encoded], axis=-1)
     return output
コード例 #12
0
def convert_to_tensors(train_example, image_feature_description,
                       bottleneck_shape, total_classes):
    bottleneck_label_pair = tf.io.parse_single_example(
        train_example, image_feature_description)
    bottleneck_vector = tf.ensure_shape(
        tf.io.parse_tensor(bottleneck_label_pair['bottleneck_vector'],
                           out_type=tf.float32), bottleneck_shape)
    class_label = tf.ensure_shape(
        tf.io.parse_tensor(bottleneck_label_pair['label'], out_type=tf.uint8),
        tf.zeros([total_classes], tf.uint8).shape)
    return bottleneck_vector, class_label
コード例 #13
0
 def __init__(self, dim: int, n_gh: int):
     """
     :param dim: dimension of the multivariate normal
     :param n_gh: number of Gauss-Hermite points per dimension
     """
     self.dim = dim
     self.n_gh = n_gh
     self.n_gh_total = n_gh ** dim
     Z, dZ = ndgh_points_and_weights(self.dim, self.n_gh)
     self.Z = tf.ensure_shape(Z, (self.n_gh_total, self.dim))
     self.dZ = tf.ensure_shape(dZ, (self.n_gh_total, 1))
コード例 #14
0
 def read_tfrecord(self, example_proto):
     example_description = {
         'feature': tf.io.FixedLenFeature([], tf.string),
         'label': tf.io.FixedLenFeature([], tf.string),
     }
     example = tf.io.parse_single_example(example_proto, example_description)
     feature = tf.ensure_shape(tf.io.parse_tensor(example['feature'], 
                                                  out_type='float32'), 
                                                 [self.encoder.shape()[0],self.encoder.shape()[1],self.encoder.shape()[2]])
     label = tf.ensure_shape(tf.io.parse_tensor(example['label'], out_type='int64'), [int(self.size*self.size)])
     return feature, label
コード例 #15
0
def random_padding(x, y):
    hpad_b = tf.random.uniform((), minval=0, maxval=8, dtype=tf.int32)
    hpad_a = 7 - hpad_b

    wpad_b = tf.random.uniform((), minval=0, maxval=8, dtype=tf.int32)
    wpad_a = 7 - wpad_b
    x = tf.pad(x, [[hpad_b, hpad_a], [wpad_b, wpad_a]])
    y = tf.pad(y, [[0, 7], [0, 7]])
    x = tf.ensure_shape(x, [16, 16])
    y = tf.ensure_shape(y, [16, 16])

    return x, y
コード例 #16
0
ファイル: Dataset.py プロジェクト: kaarejoergensen/DTL-LivDet
    def _process_path(self, file_path):
        def get_label(file_path):
            # The path contains live if it is a live sample
            fake_bool = tf.math.logical_not(
                tf.strings.regex_full_match(file_path, ".*(?i)live/.*"))
            fake_float = tf.dtypes.cast(fake_bool, tf.float32)
            return tf.reshape(fake_float, [1])

        def decode_img(parts, img):
            # convert the compressed string to a 3D uint8 tensor
            png_bool = tf.strings.regex_full_match(parts[-1], ".*(?i)png.*")
            if png_bool:
                img = tf.image.decode_png(img, channels=3)
            else:
                img = tf.image.decode_bmp(img)
                img = tf.cond(
                    tf.shape(img)[2] == 1,
                    lambda: tf.image.grayscale_to_rgb(img), lambda: img)
            # Use `convert_image_dtype` to convert to floats in the [0,1] range.
            img = tf.image.convert_image_dtype(img, tf.float32)
            # resize the image to the desired size.
            img_size = self.config.IMG_SIZE
            return tf.image.resize(img, [img_size, img_size])

        def get_spoof_type(label, file_path, parts):
            return tf.cond(tf.equal(label, 1.),
                           lambda: get_spoof_type_spoof(file_path, parts),
                           lambda: tf.constant('live'))

        def get_spoof_type_spoof(file_path, parts):
            spoof_index = tf.cond(
                tf.strings.regex_full_match(file_path, ".*LivDet2009.*"),
                lambda: tf.constant(-3), lambda: tf.constant(-2))
            return tf.strings.regex_replace(
                tf.strings.lower(parts[spoof_index]), r'\s+|\d+|_|-', '')

        def get_sensor_type(parts):
            return tf.strings.regex_replace(parts[6], r'(?i)(test|train|_)',
                                            '')

        parts = tf.strings.split(file_path, os.path.sep)

        label = get_label(file_path)
        spoof_type = get_spoof_type(label, file_path, parts)
        sensor_type = get_sensor_type(parts)
        dataset_name = parts[4]
        # load the raw data from the file as a string
        img = tf.io.read_file(file_path)
        img = decode_img(parts, img)
        tf.ensure_shape(label, [1])
        tf.ensure_shape(img, [self.config.IMG_SIZE, self.config.IMG_SIZE, 3])
        return img, label, spoof_type, sensor_type, dataset_name
コード例 #17
0
    def train_step(self, first_epoch):
        epoch_global_norm = tf.TensorArray(
            tf.float32,
            size=self.params['dataloader']["number_of_elements"],
            dynamic_size=False,
            clear_after_read=False,
        )
        epoch_loss_avg = tf.TensorArray(
            tf.float32,
            size=self.params['dataloader']["number_of_elements"],
            dynamic_size=False,
            clear_after_read=False,
        )

        for element in self.train_dataset.enumerate():
            index = tf.dtypes.cast(element[0], tf.int32)
            set = element[1]
            shape = [
                self.params['dataloader']['batch_size'], self.pixel_num,
                self.params['dataloader']['tomographic_bin_number']
            ]
            kappa_data = tf.boolean_mask(tf.transpose(set[0], perm=[0, 2, 1]),
                                         self.bool_mask,
                                         axis=1)
            kappa_data = tf.ensure_shape(kappa_data, shape)
            labels = set[1]
            # Add noise
            noise = tf.ensure_shape(self._make_noise(), shape)
            kappa_data = tf.math.add(kappa_data, noise)

            # Optimize the model
            with tf.GradientTape() as tape:
                loss_object = tf.keras.losses.MeanAbsoluteError()
                y_ = self.model.__call__(kappa_data, training=True)
                loss_value = loss_object(y_true=labels, y_pred=y_)
            if self.params['training']['distributed']:
                tape = hvd.DistributedGradientTape(tape)
            grads = tape.gradient(loss_value, self.model.trainable_variables)
            self.optimizer.apply_gradients(
                zip(grads, self.model.trainable_variables))

            if self.params['training'][
                    'distributed'] and index == 0 and first_epoch:
                hvd.broadcast_variables(self.model.variables, root_rank=0)
                hvd.broadcast_variables(self.optimizer.variables(),
                                        root_rank=0)

            epoch_loss_avg = epoch_loss_avg.write(index, loss_value)
            epoch_global_norm = epoch_global_norm.write(
                index, tf.linalg.global_norm(grads))

        return epoch_loss_avg.stack(), epoch_global_norm.stack()
コード例 #18
0
ファイル: note.py プロジェクト: drwangxian/amt-by-dconv
    def _nn_model_fn(self, task):
        assert task in ('frame', 'onset')
        inputs = self.batch['spectrogram']
        inputs = tf.ensure_shape(inputs, [1, None, 336])

        _nn_fn = dict(onset=MiscFns.onset_detector_fn,
                      frame=MiscFns.frame_label_detector_fn)
        _nn_fn = _nn_fn[task]
        outputs = _nn_fn(inputs=inputs, is_training=self.is_training)
        outputs = tf.stop_gradient(outputs)
        outputs = tf.ensure_shape(outputs, [1, None, 88])

        return outputs
コード例 #19
0
def convert_to_estimator_input(d):
    # just the mixture standar mode
    inputs = tf.ensure_shape(d["mix"], config.INPUT_SHAPE)
    if config.MODE == 'conditioned':
        if config.CONTROL_TYPE == 'dense':
            c_shape = (1, config.Z_DIM)
        if config.CONTROL_TYPE == 'cnn':
            c_shape = (config.Z_DIM, 1)
        cond = tf.ensure_shape(tf.reshape(d['conditions'], c_shape), c_shape)
        # mixture + condition vector z
        inputs = (inputs, cond)
        # target -> isolate instrument
    outputs = tf.ensure_shape(d["target"], config.INPUT_SHAPE)
    return (inputs, outputs)
コード例 #20
0
ファイル: open_loop_policy.py プロジェクト: adak32/bellman
    def __init__(self, time_step_spec, action_spec, actions: tf.Tensor):
        super().__init__(time_step_spec, action_spec)

        tf.ensure_shape(actions,
                        [None] + action_spec.shape)  # [time_step, features...]
        self._actions = actions

        self._action_index = tf.constant(0, shape=())
        self._next_action = common.create_variable(
            "next_action",
            initial_value=self._actions[self._action_index],
            shape=action_spec.shape,
            dtype=action_spec.dtype,
        )
コード例 #21
0
ファイル: providers.py プロジェクト: rlinus/sleap
        def fetch_lf(ind):
            """Local function that fetches a sample given the index."""
            ind = tf.cast(ind, tf.int64)
            (
                image,
                raw_image_size,
                instances,
                video_ind,
                frame_ind,
                skeleton_inds,
                track_inds,
                n_tracks,
            ) = tf.py_function(
                py_fetch_lf,
                [ind],
                [
                    image_dtype,
                    tf.int32,
                    tf.float32,
                    tf.int32,
                    tf.int64,
                    tf.int32,
                    tf.int32,
                    tf.int32,
                ],
            )

            # Ensure shape with constant or variable height/width, based on whether or
            # not the videos have mixed sizes.
            if self.is_from_multi_size_videos:
                image = tf.ensure_shape(image, (None, None, image_num_channels))
            else:
                image = tf.ensure_shape(image, first_image.shape)

            instances = tf.ensure_shape(instances, tf.TensorShape([None, None, 2]))
            skeleton_inds = tf.ensure_shape(skeleton_inds, tf.TensorShape([None]))
            track_inds = tf.ensure_shape(track_inds, tf.TensorShape([None]))

            return {
                "image": image,
                "raw_image_size": raw_image_size,
                "example_ind": ind,
                "video_ind": video_ind,
                "frame_ind": frame_ind,
                "scale": tf.ones([2], dtype=tf.float32),
                "instances": instances,
                "skeleton_inds": skeleton_inds,
                "track_inds": track_inds,
                "n_tracks": n_tracks,
            }
コード例 #22
0
    def call(self, inputs: TensorType) -> tf.Tensor:
        """
        Evaluate the basis functions at ``inputs``.

        :param inputs: The evaluation points, a tensor with the shape ``[N, D]``.

        :return: A tensor with the shape ``[N, M]``.
        """
        c = tf.sqrt(2 * self.kernel.variance / self.output_dim)
        inputs = tf.divide(inputs, self.kernel.lengthscales)  # [N, D]
        basis_functions = tf.cos(
            tf.matmul(inputs, self.W, transpose_b=True) + self.b)  # [N, M]
        output = c * basis_functions  # [N, M]
        tf.ensure_shape(output, self.compute_output_shape(inputs.shape))
        return output
コード例 #23
0
def keypose_loss_proj(uvdw_pos, labels, mparams, num_order):
    """Compute the reprojection error on the target frames.

  Args:
    uvdw_pos: predicted uvd, always positive.
    labels: sample labels.
    mparams: model parameters.
    num_order: number of order permutations.

  Returns:
    Scalar loss.
  """
    num_kp = mparams.num_kp
    to_world = labels['to_world_L']  # [batch, 4, 4]
    to_world_order = tf.stack([to_world] * num_order,
                              axis=1)  # [batch, order, 4, 4]
    to_world_order = tf.ensure_shape(to_world_order, [None, num_order, 4, 4],
                                     name='to_world_order')
    world_coords = project(to_world_order, uvdw_pos,
                           True)  # [batch, order, 4, num_kp]
    world_coords = tf.ensure_shape(world_coords, [None, num_order, 4, num_kp],
                                   name='world_coords')
    print('world_coords shape [batch, order, 4, num_kp]:', world_coords.shape)

    # Target transform and keypoints.
    # [batch, num_targs, 4, 4] for transforms
    # [batch, num_targs, 4, num_kp] for keypoints (after transpose)
    targets_to_uvd = labels['targets_to_uvd_L']
    targets_keys_uvd = tf.transpose(labels['targets_keys_uvd_L'], [0, 1, 3, 2])
    targets_keys_uvd_order = tf.stack([targets_keys_uvd] * num_order, axis=1)
    print('Model fn targets_to_uvd shape [batch, num_targs, 4, 4]:',
          targets_to_uvd.shape)
    print(
        'Model fn targets_keys_uvd_order shape [batch, order, num_targs, 4, '
        'num_kp]:', targets_keys_uvd_order.shape)

    # [batch, order, num_targs, 4, num_kp]
    proj_uvds = project(tf.stack([targets_to_uvd] * num_order, axis=1),
                        tf.stack([world_coords] * num_targs, axis=2))
    proj_uvds = tf.ensure_shape(proj_uvds, [None, num_order, 5, 4, num_kp],
                                name='proj_uvds')
    print('proj_uvds shape [batch, order, num_targs, 4, num_kp]:',
          proj_uvds.shape)
    loss_proj = keypoint_loss_targets(proj_uvds, targets_keys_uvd_order,
                                      mparams)
    loss_proj = tf.ensure_shape(loss_proj, [None, num_order], name='loss_proj')
    print('loss_proj shape [batch, order]:', loss_proj.shape)
    return loss_proj
コード例 #24
0
ファイル: utils.py プロジェクト: WeilerWebServices/TensorFlow
    def flatten(self, tensor):
        """Flattens and caches the tensor's batch_dims."""
        with tf.name_scope('batch_flatten'):
            if self._batch_dims == 1:
                return tensor

            self._original_tensor_shape = composite.shape(tensor)

            if tensor.shape[self._batch_dims:].is_fully_defined():
                return composite.reshape(
                    tensor, [-1] + tensor.shape[self._batch_dims:].as_list())

            reshaped = composite.reshape(
                tensor,
                tf.concat(
                    [[-1], composite.shape(tensor)[self._batch_dims:]],
                    axis=0),
            )
            # If the batch dimensions are all defined but the rest are undefined,
            # `reshaped` will have None as the first squashed dim since we are calling
            # tf.shape above. Since we know how many batch_dims we have, we can check
            # if all the elements we want to squash are defined, allowing us to
            # call ensure_shape to set the shape of the squashed dim. Note that this
            # is only implemented for tf.Tensor and not SparseTensors.
            if (isinstance(tensor, tf.Tensor)
                    and tensor.shape[:self._batch_dims].is_fully_defined()):
                return tf.ensure_shape(
                    reshaped,
                    [np.prod(tensor.shape[:self._batch_dims], dtype=np.int64)
                     ] + tensor.shape[self._batch_dims:])
            return reshaped
コード例 #25
0
ファイル: tfrecords.py プロジェクト: fmi-basel/dl-utils
    def parse(self, example):
        '''parse a tfrecord example.

        Returns
        -------
        sample : dict of tensors
            sample containing image and label. Note that the image is
            always converted to tf.float32 and the label is one-hot encoded.

        '''
        features = {
            # Extract features using the keys set during creation
            self.shape_key:
            tf.io.FixedLenSequenceFeature([], tf.int64, True),
            self.label_key:
            tf.io.FixedLenFeature([], tf.int64),
            self.image_key:
            tf.io.FixedLenFeature([], tf.string),
        }
        sample = tf.io.parse_single_example(example, features)

        # Fixed shape appears to be necessary for training with keras.
        if self.fixed_ndim is not None:
            shape = tf.ensure_shape(sample[self.shape_key],
                                    (self.fixed_ndim, ))
        else:
            shape = sample[self.shape_key]

        image = tf.io.decode_raw(sample[self.image_key], self.image_dtype)
        image = tf.reshape(image, shape)
        image = tf.cast(image, tf.float32)
        return {
            self.label_key: tf.one_hot(sample[self.label_key], self.n_classes),
            self.image_key: image
        }
コード例 #26
0
ファイル: video_ssl_input.py プロジェクト: zss1980/models
    def _parse_eval_data(
        self, decoded_tensors: Dict[str, tf.Tensor]
    ) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
        """Parses data for evaluation."""
        image = decoded_tensors[self._image_key]
        image = _process_image(image=image,
                               is_training=False,
                               num_frames=self._num_frames,
                               stride=self._stride,
                               num_test_clips=self._num_test_clips,
                               min_resize=self._min_resize,
                               crop_size=self._crop_size)
        image = tf.cast(image, dtype=self._dtype)
        features = {'image': image}

        label = decoded_tensors[self._label_key]
        label = _process_label(label, self._one_hot_label, self._num_classes)

        if self._output_audio:
            audio = decoded_tensors[self._audio_feature]
            audio = tf.cast(audio, dtype=self._dtype)
            audio = preprocess_ops_3d.sample_sequence(audio,
                                                      20,
                                                      random=False,
                                                      stride=1)
            audio = tf.ensure_shape(audio, [20, 2048])
            features['audio'] = audio

        return features, label
コード例 #27
0
ファイル: scripts.py プロジェクト: stmharry/body-composition
        def _f(inputs):
            input_list = []
            for (name, dtype, default) in zip(f.input_names, f.input_types,
                                              f.input_defaults):
                if name in inputs:
                    input_ = inputs[name]
                elif is_numpy:
                    input_ = np.asarray(default)
                elif is_tf:
                    input_ = tf.constant(default, dtype=dtype)

                input_list.append(input_)

            if is_numpy:
                output_list = f_partial(*input_list)
            elif is_tf:
                output_list = tf.numpy_function(f_partial, input_list,
                                                f.output_types)

            if include_inputs:
                outputs = inputs
            else:
                outputs = {}

            for (output, name, shape) in zip(output_list, f.output_names,
                                             f.output_shapes):
                if is_numpy:
                    outputs[name] = output
                elif is_tf:
                    outputs[name] = tf.ensure_shape(output, shape)

            return outputs
コード例 #28
0
ファイル: decoder.py プロジェクト: isi-vista/VistaMT
 def call(self, inputs, step_mode=False, pos=0, training=False, **kwargs):
     y_in, ctx, ctx_plus_emb, x_mask, prev_state = inputs
     if step_mode:
         y_in = tf.expand_dims(y_in, axis=1)
     seq_len = tf.shape(y_in)[1]
     next_state = [] if step_mode else None
     y_emb = self.y_emb(y_in)
     y_emb += self.pos_encoding[:, pos:pos+seq_len, :]
     if step_mode:
         y_emb = tf.ensure_shape(y_emb, (None, 1, self.emb_dim))
     y_emb = self.dropout_in(y_emb, training)
     h_dec = self.prj_in(y_emb)
     for idx in range(len(self.convolutions)):
         prj = self.conv_in_projections[idx]
         if prj:
             h_dec = prj(h_dec)
         stack_prev_state = prev_state[idx] if step_mode else None
         conv = self.convolutions[idx]
         h_dec, stack_next_state = conv([h_dec, ctx, ctx_plus_emb, x_mask, y_emb,
                                         stack_prev_state],
                                        step_mode=step_mode, training=training)
         if step_mode:
             next_state.append(stack_next_state)
     next_state = tuple(next_state) if next_state is not None else None
     dec_out = self.prj_out_1(h_dec)
     dec_out = self.dropout_out(dec_out, training)
     logits = self.prj_out_2(dec_out)
     return logits, next_state
コード例 #29
0
ファイル: facenet.py プロジェクト: JuneXia/flaskface
def fixed_contract(image, control, raw_size=(182, 182)):
    if raw_size[0] != raw_size[1]:
        raise Exception('At present, the image width and height are equal. \n\
        If the width and height are not equal, part of the code of this function needs to be modified. \n\
        If you have known this idea, you can screen this exception.')

    contract_size = 132

    '''
    central_crop = tf.image.central_crop(image, contract_size / raw_image_size)
    print(
        '6 type(central_crop)={}, central_crop.shape={}, central_crop={}'.format(type(central_crop), central_crop.shape,
                                                                                 central_crop))

    # 随机的稍微放大一点
    h = w = tf.random_uniform([], contract_size, raw_image_size + 1, dtype=tf.int32)
    resize_image = tf.image.resize_image_with_pad(central_crop, h, w)
    # resize_image = tf.cast(resize_image, dtype=tf.uint8)

    # 填充至目标大小
    image = tf.image.resize_image_with_crop_or_pad(resize_image, raw_image_size, raw_image_size)
    '''

    # contract_size = tf.random_uniform([], 124, 144, dtype=tf.float64)
    image = tf.cond(get_control_flag(control, FIXED_CONTRACT),
                    # lambda: tf.image.central_crop(image, tf.divide(contract_size, raw_image_size)),  # 随机中心裁剪s
                    lambda: tf.image.central_crop(tf.ensure_shape(image, (None, None, 3)), contract_size / raw_size[0]),  # 固定中心裁剪
                    lambda: tf.identity(image))

    image = tf.cond(get_control_flag(control, FIXED_CONTRACT),
                    lambda: tf.cast(tf.image.resize_image_with_pad(image, raw_size[0], raw_size[1]), tf.uint8),
                    lambda: tf.identity(image))

    return image
コード例 #30
0
    def _parse_train_data(
        self, decoded_tensors: Dict[str, tf.Tensor]
    ) -> Tuple[Dict[str, tf.Tensor], tf.Tensor]:
        """Parses data for training."""
        # Process image and label.
        image = decoded_tensors[self._image_key]
        image = _process_image(image=image,
                               is_training=True,
                               num_frames=self._num_frames,
                               stride=self._stride,
                               num_test_clips=self._num_test_clips,
                               min_resize=self._min_resize,
                               crop_size=self._crop_size)
        image = tf.cast(image, dtype=self._dtype)
        features = {'image': image}

        label = decoded_tensors[self._label_key]
        label = _process_label(label, self._one_hot_label, self._num_classes)

        if self._output_audio:
            audio = decoded_tensors[self._audio_feature]
            audio = tf.cast(audio, dtype=self._dtype)
            # TODO(yeqing): synchronize audio/video sampling. Especially randomness.
            audio = preprocess_ops_3d.sample_sequence(audio,
                                                      self._audio_shape[0],
                                                      random=False,
                                                      stride=1)
            audio = tf.ensure_shape(audio, self._audio_shape)
            features['audio'] = audio

        return features, label