Ejemplo n.º 1
0
def allreduce(tensor, average=True, device_dense='', device_sparse=''):
    """Perform an allreduce on a tf.Tensor or tf.IndexedSlices.

    Arguments:
        tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
        The shape of the input must be identical across all ranks.
        average: If True, computes the average over all ranks.
                 Otherwise, computes the sum over all ranks.
        device_dense: Device to be used for dense tensors. Uses GPU by default
                      if Horovod was build with HOROVOD_GPU_ALLREDUCE.
        device_sparse: Device to be used for sparse tensors. Uses GPU by default
                       if Horovod was build with HOROVOD_GPU_ALLGATHER.

    This function performs a bandwidth-optimal ring allreduce on the input
    tensor. If the input is an tf.IndexedSlices, the function instead does an
    allgather on the values and the indices, effectively doing an allreduce on
    the represented tensor.
    """
    if isinstance(tensor, tf.IndexedSlices):
        with tf.device(device_sparse):
            # For IndexedSlices, do two allgathers intead of an allreduce.
            horovod_size = tf.cast(size(), tensor.values.dtype)
            values = allgather(tensor.values)
            indices = allgather(tensor.indices)

            # To make this operation into an average, divide all gathered values by
            # the Horovod size.
            new_values = tf.div(values, horovod_size) if average else values
        return tf.IndexedSlices(new_values, indices,
                                dense_shape=tensor.dense_shape)
    else:
        with tf.device(device_dense):
            horovod_size = tf.cast(size(), tensor.dtype)
            summed_tensor = _allreduce(tensor)
            new_tensor = (tf.div(summed_tensor, horovod_size)
                          if average else summed_tensor)
        return new_tensor
Ejemplo n.º 2
0
def grouped_allreduce(tensors,
                      average=None,
                      device_dense='',
                      device_sparse='',
                      compression=Compression.none,
                      op=None,
                      prescale_factor=1.0,
                      postscale_factor=1.0,
                      process_set=global_process_set):
    if not tensors:
        return tensors

    op = handle_average_backwards_compatibility(op, average)

    average_in_framework = False
    if rocm_built():
        # For ROCm, perform averaging at framework level
        average_in_framework = op == Average or op == Adasum
        op = Sum if op == Average else op

    if any(isinstance(t, tf.IndexedSlices) for t in tensors):
        # TODO: Need to fix this to actuall call Adasum
        if op == Adasum:
            raise NotImplementedError(
                'The Adasum reduction does not currently support sparse tensors. As a '
                'workaround please pass sparse_as_dense=True to DistributedOptimizer'
            )
        with tf.device(device_sparse):
            new_values = []
            for tensor in tensors:
                # For IndexedSlices, do two allgathers instead of an allreduce.
                horovod_size = tf.cast(
                    size_op(process_set_id=process_set.process_set_id) if int(
                        os.environ.get("HOROVOD_ELASTIC",
                                       0)) else process_set.size(),
                    dtype=tensor.values.dtype)
                values = allgather(tensor.values, process_set=process_set)
                indices = allgather(tensor.indices, process_set=process_set)

                # To make this operation into an average, divide allgathered values by
                # the Horovod size.
                new_values += (values /
                               horovod_size) if op == Average else values
        return [
            tf.IndexedSlices(x, indices, dense_shape=t.dense_shape)
            for x, t in zip(new_values, tensors)
        ]
    else:
        with tf.device(device_dense):
            tensors_compressed, ctxs = zip(
                *[compression.compress(tensor) for tensor in tensors])
            summed_tensors_compressed = _grouped_allreduce(
                tensors_compressed,
                op=op,
                prescale_factor=prescale_factor,
                postscale_factor=postscale_factor,
                process_set=process_set)
            summed_tensors = [
                compression.decompress(t, ctx)
                for t, ctx in zip(summed_tensors_compressed, ctxs)
            ]
            if op == Adasum:
                if process_set != global_process_set:
                    raise NotImplementedError(
                        "Adasum does not support non-global process sets yet.")
                if 'CPU' not in tensor.device and gpu_available('tensorflow'):
                    if nccl_built():
                        if not is_homogeneous:
                            raise NotImplementedError(
                                'Running GPU Adasum on heterogeneous cluster is not supported yet.'
                            )
                        elif not check_num_rank_power_of_2(
                                int(size() / local_size())):
                            raise NotImplementedError(
                                'Running GPU Adasum with non-power of 2 nodes is not supported yet.'
                            )
                        if rocm_built():
                            new_tensors = []
                            for tensor in summed_tensors:
                                horovod_local_size = tf.cast(
                                    local_size_op() if int(
                                        os.environ.get("HOROVOD_ELASTIC",
                                                       0)) else local_size(),
                                    dtype=tensor.dtype)
                                new_tensors += tensor / horovod_local_size
                        else:
                            new_tensors = summed_tensors
                    else:
                        warnings.warn(
                            'Adasum reduction does not currently support GPU reduction using MPI. Tensors '
                            'are copied to CPU memory instead. To use Adasum for GPU reduction, please '
                            'compile Horovod with HOROVOD_GPU_OPERATIONS=NCCL.'
                        )
                        new_tensors = summed_tensors
                else:
                    if not check_num_rank_power_of_2(size()):
                        raise NotImplementedError(
                            'Running Adasum with non-power of 2 ranks is not supported yet.'
                        )
                    new_tensors = summed_tensors
            else:
                if rocm_built():
                    new_tensors = []
                    for tensor in summed_tensors:
                        horovod_size = tf.cast(
                            size_op(process_set_id=process_set.process_set_id)
                            if int(os.environ.get("HOROVOD_ELASTIC",
                                                  0)) else process_set.size(),
                            dtype=tensor.dtype)
                        new_tensors += (
                            tensor /
                            horovod_size) if average_in_framework else tensor
                else:
                    new_tensors = summed_tensors
        return new_tensors
Ejemplo n.º 3
0
def allreduce(tensor,
              average=None,
              device_dense='',
              device_sparse='',
              compression=Compression.none,
              op=None):
    """Perform an allreduce on a tf.Tensor or tf.IndexedSlices.

    This function performs a bandwidth-optimal ring allreduce on the input
    tensor. If the input is an tf.IndexedSlices, the function instead does an
    allgather on the values and the indices, effectively doing an allreduce on
    the represented tensor.

    Arguments:
        tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
                The shape of the input must be identical across all ranks.
        average:
            .. warning:: .. deprecated:: 0.19.0

                Use `op` instead. Will be removed in v0.21.0.

        device_dense: Device to be used for dense tensors. Uses GPU by default
                      if Horovod was built with HOROVOD_GPU_OPERATIONS.
        device_sparse: Device to be used for sparse tensors. Uses GPU by default
                       if Horovod was built with HOROVOD_GPU_OPERATIONS.
        compression: Compression algorithm used to reduce the amount of data
                     sent and received by each worker node.  Defaults to not
                     using compression.
        op: The reduction operation to combine tensors across different ranks.
            Defaults to Average if None is given.

    Returns:
        A tensor of the same shape and type as `tensor`, summed across all
        processes.
    """
    op = handle_average_backwards_compatibility(op, average)
    # Averaging happens in framework code, so translate that to Sum for the actual call
    true_op = Sum if op == Average else op

    if isinstance(tensor, tf.IndexedSlices):
        # TODO: Need to fix this to actuall call Adasum
        if op == Adasum:
            raise NotImplementedError(
                'The Adasum reduction does not currently support sparse tensors. As a '
                'workaround please pass sparse_as_dense=True to DistributedOptimizer'
            )
        with tf.device(device_sparse):
            # For IndexedSlices, do two allgathers instead of an allreduce.
            horovod_size = tf.cast(size(), dtype=tensor.values.dtype)
            values = allgather(tensor.values)
            indices = allgather(tensor.indices)

            # To make this operation into an average, divide allgathered values by
            # the Horovod size.
            new_values = (values / horovod_size) if op == Average else values
        return tf.IndexedSlices(new_values,
                                indices,
                                dense_shape=tensor.dense_shape)
    else:
        with tf.device(device_dense):
            horovod_size = tf.cast(size(), dtype=tensor.dtype)
            tensor_compressed, ctx = compression.compress(tensor)
            summed_tensor_compressed = _allreduce(tensor_compressed,
                                                  op=true_op)
            summed_tensor = compression.decompress(summed_tensor_compressed,
                                                   ctx)
            if op == Adasum:
                if 'CPU' not in tensor.device and gpu_available('tensorflow'):
                    if nccl_built():
                        if not is_homogeneous:
                            raise NotImplementedError(
                                'Running GPU Adasum on heterogeneous cluster is not supported yet.'
                            )
                        elif not check_num_rank_power_of_2(
                                int(size() / local_size())):
                            raise NotImplementedError(
                                'Running GPU Adasum with non-power of 2 nodes is not supported yet.'
                            )
                        horovod_local_size = tf.cast(local_size(),
                                                     dtype=tensor.dtype)
                        new_tensor = summed_tensor / horovod_local_size
                    else:
                        warnings.warn(
                            'Adasum reduction does not currently support GPU reduction using MPI. Tensors '
                            'are copied to CPU memory instead. To use Adasum for GPU reduction, please '
                            'compile Horovod with HOROVOD_GPU_OPERATIONS=NCCL.'
                        )
                        new_tensor = summed_tensor
                else:
                    if not check_num_rank_power_of_2(size()):
                        raise NotImplementedError(
                            'Running Adasum with non-power of 2 ranks is not supported yet.'
                        )
                    new_tensor = summed_tensor
            else:
                new_tensor = (summed_tensor /
                              horovod_size) if op == Average else summed_tensor
        return new_tensor
  def __init__(self,
               config,
               x,
               y,
               x_b,
               y_b,
               x_b_v,
               y_b_v,
               num_classes_a,
               num_classes_b,
               is_training=True,
               ext_wts=None,
               y_sel=None,
               w_class_a=None,
               b_class_a=None):
    self._config = config
    self._is_training = is_training
    self._num_classes_a = num_classes_a
    self._num_classes_b = num_classes_b

    if config.backbone_class == 'resnet_backbone':
      bb_config = config.resnet_config
    else:
      assert False, 'Not supported'
    opt_config = config.optimizer_config
    proto_config = config.protonet_config
    transfer_config = config.transfer_config

    self._backbone = get_model(config.backbone_class, bb_config)
    self._inputs = x
    self._labels = y
    if opt_config.num_gpu > 1:
      self._labels_all = allgather(self._labels)
    else:
      self._labels_all = self._labels
    self._inputs_b = x_b
    self._labels_b = y_b
    self._inputs_b_v = x_b_v
    self._labels_b_v = y_b_v
    if opt_config.num_gpu > 1:
      self._labels_b_v_all = allgather(self._labels_b_v)
    else:
      self._labels_b_v_all = self._labels_b_v
    self._y_sel = y_sel
    self._mask = tf.placeholder(tf.bool, [], name='mask')

    # global_step = tf.get_variable(
    #     'global_step', shape=[], dtype=tf.int64, trainable=False)
    global_step = tf.contrib.framework.get_or_create_global_step()
    self._global_step = global_step
    log.info('LR decay steps {}'.format(opt_config.lr_decay_steps))
    log.info('LR list {}'.format(opt_config.lr_list))
    learn_rate = tf.train.piecewise_constant(
        global_step, list(
            np.array(opt_config.lr_decay_steps).astype(np.int64)),
        list(opt_config.lr_list))
    self._learn_rate = learn_rate

    opt = self.get_optimizer(opt_config.optimizer, learn_rate)
    if opt_config.num_gpu > 1:
      opt = hvd.DistributedOptimizer(opt)

    with tf.name_scope('TaskA'):
      h_a = self.backbone(x, is_training=is_training, ext_wts=ext_wts)
      self._h_a = h_a

    # Apply BN ops.
    bn_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

    with tf.name_scope('TaskB'):
      x_b_all = tf.concat([x_b, x_b_v], axis=0)
      if ext_wts is not None:
        h_b_all = self.backbone(
            x_b_all, is_training=is_training, reuse=True, ext_wts=ext_wts)
      else:
        h_b_all = self.backbone(x_b_all, is_training=is_training, reuse=True)

    with tf.name_scope('TaskA'):
      # Calculates hidden activation size.
      h_shape = h_a.get_shape()
      h_size = 1
      for ss in h_shape[1:]:
        h_size *= int(ss)

      if w_class_a is None:
        if ext_wts is not None:
          w_class_a = weight_variable(
              [h_size, num_classes_a],
              init_method='numpy',
              dtype=tf.float32,
              init_param={'val': np.transpose(ext_wts['w_class_a'])},
              wd=config.wd,
              name='w_class_a')
          b_class_a = weight_variable([],
                                      init_method='numpy',
                                      dtype=tf.float32,
                                      init_param={'val': ext_wts['b_class_a']},
                                      wd=0e0,
                                      name='b_class_a')
        else:
          w_class_a = weight_variable([h_size, num_classes_a],
                                      init_method='truncated_normal',
                                      dtype=tf.float32,
                                      init_param={'stddev': 0.01},
                                      wd=bb_config.wd,
                                      name='w_class_a')
          b_class_a = weight_variable([num_classes_a],
                                      init_method='constant',
                                      init_param={'val': 0.0},
                                      name='b_class_a')
        self._w_class_a_orig = w_class_a
        self._b_class_a_orig = b_class_a
      else:
        assert b_class_a is not None
        w_class_a_orig = weight_variable([h_size, num_classes_a],
                                         init_method='truncated_normal',
                                         dtype=tf.float32,
                                         init_param={'stddev': 0.01},
                                         wd=bb_config.wd,
                                         name='w_class_a')
        b_class_a_orig = weight_variable([num_classes_a],
                                         init_method='constant',
                                         init_param={'val': 0.0},
                                         name='b_class_a')
        self._w_class_a_orig = w_class_a_orig
        self._b_class_a_orig = b_class_a_orig

      self._w_class_a = w_class_a
      self._b_class_a = b_class_a
      num_classes_a_dyn = tf.cast(tf.shape(b_class_a)[0], tf.int64)
      num_classes_a_dyn32 = tf.shape(b_class_a)[0]

      if ext_wts is None:
        init_val = 10.0
      else:
        init_val = ext_wts['tau'][0]
      tau = weight_variable([],
                            init_method='constant',
                            init_param={'val': init_val},
                            name='tau')
      w_class_a_norm = self._normalize(w_class_a, 0)
      h_a_norm = self._normalize(h_a, 1)
      dot = tf.matmul(h_a_norm, w_class_a_norm)
      if ext_wts is not None:
        dot += b_class_a
      logits_a = tau * dot
      self._prediction_a = logits_a
      if opt_config.num_gpu > 1:
        self._prediction_a_all = allgather(self._prediction_a)
      else:
        self._prediction_a_all = self._prediction_a

      xent_a = tf.nn.sparse_softmax_cross_entropy_with_logits(
          logits=logits_a, labels=y)
      cost_a = tf.reduce_mean(xent_a, name='xent')
      self._cost_a = cost_a
      cost_a += self._decay()
      correct_a = tf.equal(tf.argmax(logits_a, axis=1), y)
      self._correct_a = correct_a
      self._acc_a = tf.reduce_mean(tf.cast(correct_a, cost_a.dtype))

    with tf.name_scope('TaskB'):
      h_b = h_b_all[:tf.shape(x_b)[0]]
      h_b_v = h_b_all[tf.shape(x_b)[0]:]

      # Add new axes for the `batch` dimension.
      h_b_ = tf.expand_dims(h_b, 0)
      h_b_v_ = tf.expand_dims(h_b_v, 0)
      y_b_ = tf.expand_dims(y_b, 0)
      y_b_v_ = tf.expand_dims(y_b_v, 0)
      protos_b = self._compute_protos(num_classes_b, h_b_,
                                      y_b_ - num_classes_a)
      w_class_a_ = tf.expand_dims(tf.transpose(w_class_a_norm), 0)  # [1, K, D]
      w_class_b = self._normalize(protos_b, 2)  # [1, K, D]
      self._w_class_b = w_class_b
      w_class_all = tf.concat([w_class_a_, w_class_b], axis=1)
      logits_b_v = tau * compute_logits_cosine(w_class_all, h_b_v_)
      self._logits_b_v = logits_b_v
      self._prediction_b = logits_b_v[0]
      if opt_config.num_gpu > 1:
        self._prediction_b_all = allgather(self._prediction_b)
      else:
        self._prediction_b_all = self._prediction_b

      # Mask out the old classes.
      def mask_fn():
        bin_mask = tf.expand_dims(
            tf.reduce_sum(
                tf.one_hot(y_sel, num_classes_a + num_classes_b),
                0,
                keep_dims=True), 0)
        logits_b_v_m = logits_b_v * (1.0 - bin_mask)
        logits_b_v_m -= bin_mask * 100.0
        return logits_b_v_m

      if transfer_config.old_and_new:
        logits_b_v = tf.cond(self._mask, mask_fn, lambda: logits_b_v)
      xent_b_v = tf.nn.sparse_softmax_cross_entropy_with_logits(
          logits=logits_b_v, labels=y_b_v_)
      cost_b = tf.reduce_mean(xent_b_v, name='xent')
      self._cost_b = cost_b

    if transfer_config.old_and_new:
      total_cost = cost_b
    else:
      total_cost = (transfer_config.cost_a_ratio * cost_a +
                    transfer_config.cost_b_ratio * cost_b)
    self._total_cost = total_cost

    if not transfer_config.meta_only:
      # assert False, 'let us go for pretrained model first'
      var_list = tf.trainable_variables()
      var_list = list(filter(lambda x: 'phi' in x.name, var_list))
      [log.info('Slow weights {}'.format(v.name)) for v in var_list]
    else:
      var_list = []

    if is_training:
      grads_and_vars = opt.compute_gradients(total_cost, var_list)
      with tf.control_dependencies(bn_ops):
        [log.info('BN op {}'.format(op.name)) for op in bn_ops]
        train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)

      grads_and_vars_b = opt.compute_gradients(cost_b, var_list)
      with tf.control_dependencies(bn_ops):
        train_op_b = opt.apply_gradients(
            grads_and_vars_b, global_step=global_step)

      with tf.control_dependencies(bn_ops):
        train_op_a = opt.minimize(cost_a, global_step=global_step)
      self._train_op = train_op
      self._train_op_a = train_op_a
      self._train_op_b = train_op_b
    self._initializer = tf.global_variables_initializer()