示例#1
0
    def initialize_model_with_bias(sess: tf.compat.v1.Session, input_op_names: List[str], output_op_names: List[str]) \
            -> tf.compat.v1.Session:
        """
        Initializes given model with bias.
        Adds zero bias to conv/linear layers without bias param, in given model.
        :param sess: model to be updated as tf.compat.v1.Session
        :return: updated session as tf.compat.v1.Session
        """

        assert sess is not None
        with sess.graph.as_default():
            ops = get_valid_ops(sess.graph, input_op_names, output_op_names)

            for op in ops:
                # skip gradient ops
                if not op.name.startswith('gradients/') and \
                        op.type in ['Conv2D', 'DepthwiseConv2dNative', 'MatMul']:
                    # add bias if not present
                    if BiasUtils.is_bias_none(op):
                        # add bias param
                        bias_shape = BiasUtils._get_bias_shape_from_weights(op)
                        zero_bias = tf.Variable(
                            initial_value=np.zeros(bias_shape),
                            dtype=tf.float32)
                        BiasUtils._create_bias_add_op_and_insert(
                            sess, op, zero_bias)

        new_sess = save_and_load_graph('./temp', sess)
        sess.close()

        return new_sess
示例#2
0
    def insert_bias_add_op(sess: tf.compat.v1.Session,
                           conv_op_out_tensor: tf.Tensor,
                           new_bias_tensor: tf.Variable,
                           bias_name="bias_value") -> None:
        """
        Insert bias-add op to given conv op.
        :param sess: model as tf.compat.v1.Session
        :param conv_op_out_tensor: output of conv op that should feed into the new bias op as tf.Tensor
        :param new_bias_tensor:  bias tensor to be added as tf.Variable
        :param bias_name: name string for the bias op
        :return: None ,
        Note : Higher level api needs to perform a save and load to get updated session after usage of this api
        """

        assert conv_op_out_tensor is not None, 'Error, insert_bias_add_op() : conv op output tensor must be provided'
        with sess.graph.as_default():
            if conv_op_out_tensor.consumers():

                consumer_list = []
                for consumer in conv_op_out_tensor.consumers():
                    consumer_list.append(consumer)

                # create new Bias add op
                bias_add_op = tf.nn.bias_add(value=conv_op_out_tensor,
                                             bias=new_bias_tensor,
                                             name=bias_name)

                # use reroute to insert bias-add and swap current outputs of conv with bias-add op
                ge.reroute_ts(bias_add_op,
                              conv_op_out_tensor,
                              can_modify=consumer_list)

                # initialize tensor once it's added
                sess.run(tf.compat.v1.variables_initializer([new_bias_tensor]))
示例#3
0
def evaluate(model: tf.compat.v1.Session, iterations: int, use_cuda: bool):
    """
    eval function for MNIST LeNet model
    :param model: tf.compat.v1.Session
    :param iterations: iterations
    :param use_cuda: use_cuda
    :return:
    """

    total_test_images = 10000
    batch_size = 64

    # iterate over entire test data set, when iterations is None
    # TODO : figure out way to end iterator when the data set is exhausted
    if iterations is None:
        iterations = int(total_test_images / batch_size)

    parser = MnistParser(data_inputs=['reshape_input'],
                         validation_inputs=['labels'],
                         batch_size=batch_size)

    # Allocate the generator you wish to use to provide the network with data
    generator = tfrecord_generator.TfRecordGenerator(
        tfrecords=[os.path.join('data', 'mnist', 'validation.tfrecords')],
        parser=parser,
        num_gpus=1)

    # Create the tensor map for input and ground truth ops
    input_tensor_map = {}
    inputs = ['reshape_input', 'labels']

    for name in inputs:
        input_tensor_map[name] = model.graph.get_tensor_by_name(name + ':0')

    # get the evaluation tensor
    eval_tensor = model.graph.get_tensor_by_name('accuracy:0')

    avg_accuracy = 0
    current_iterations = 0

    for batch in generator:

        current_iterations += 1
        # Setup the feed dictionary
        feed_dict = {}

        for name, data in batch.items():
            feed_dict[input_tensor_map[name]] = data

        with model.as_default():
            accuracy = model.run(eval_tensor, feed_dict=feed_dict)

        avg_accuracy += accuracy

        if current_iterations >= iterations:
            break

    return avg_accuracy / current_iterations
示例#4
0
def restore_from_pkl(sess: tf.compat.v1.Session, varlist: list, pklfile: str):
    with open(pklfile, 'rb') as f:
        tensordict = pickle.load(f)
    l = len(tensordict.keys())
    cnt = 0
    assgin_list = []
    for var in varlist:
        for k in tensordict.keys():
            if var.name == k:
                assgin_list.append(tf.assign(var, tensordict[k]))
                cnt += 1
    assert l == cnt
    for i in range(len(assgin_list)):
        sess.run(assgin_list[i])
示例#5
0
    def _run_epoch(self, sess: tf.compat.v1.Session, dropout: float,
                   print_progress_every: int, epoch: int, log: bool
                   ) -> NoReturn:
        """
        Run one epoch.

        Parameters
        ----------
        sess : tf.compat.v1.Session
            Initialized tf session.
        dropout : float
            Dropout rate (1 - keep probability).
        print_progress_every : int
            Print statistic every print_progress_every iterations.
        epoch : int
            Number of current epoch (for printing statistic).
        log : bool
            Whether to log or not.
        """
        self.minibatch.shuffle()
        for batch_edges, current_edge_type, current_edge_type_idx in self.minibatch:
            # Construct feed dictionary
            self.feed_dict = self.minibatch.batch_feed_dict(
                batch_edges=batch_edges,
                batch_edge_type=current_edge_type_idx,
                dropout=dropout,
                placeholders=self.placeholders)

            t = time.time()

            # Training step: run single weight update
            outs = sess.run([self.opt.opt_op, self.opt.cost,
                             self.opt.batch_edge_type_idx],
                            feed_dict=self.feed_dict)
            train_cost = outs[1]
            batch_edge_type = outs[2]

            if self.minibatch.iter % print_progress_every == 0:
                val_auc, val_auprc, val_apk = self._get_accuracy_scores(
                    sess, self.minibatch.val_edges,
                    self.minibatch.val_edges_false,
                    current_edge_type)

                print("Epoch:", "%04d" % (epoch + 1), "Iter:",
                      "%04d" % (self.minibatch.iter + 1), "Edge:", "%04d" % batch_edge_type,
                      "train_loss=", "{:.5f}".format(train_cost),
                      "val_roc=", "{:.5f}".format(val_auc), "val_auprc=",
                      "{:.5f}".format(val_auprc),
                      "val_apk=", "{:.5f}".format(val_apk), "time=",
                      "{:.5f}".format(time.time() - t))
                if log:
                    import neptune
                    neptune.log_metric("val_roc", val_auc,
                                       timestamp=time.time())
                    neptune.log_metric("val_apk", val_apk,
                                       timestamp=time.time())
                    neptune.log_metric("val_auprc", val_auprc,
                                       timestamp=time.time())
                    neptune.log_metric("train_loss", train_cost,
                                       timestamp=time.time())
示例#6
0
    def _get_accuracy_scores(self, sess: tf.compat.v1.Session,
                             edges_pos: Dict[Tuple[int, int], List[np.array]],
                             edges_neg: Dict[Tuple[int, int], List[np.array]],
                             edge_type: Tuple[int, int, int]):
        """
        Calculate metrics (AUROC, AUPRC, AP@50)

        Parameters
        ----------
        sess : tf.compat.v1.Session
            Initialized tf session.
        edges_pos : Dict[Tuple[int, int], List[np.array]]
            From edge type to np.arrays of real edges for every edge class in this type.
        edges_neg : Dict[Tuple[int, int], List[np.array]]
            From edge type to np.arrays of fake edges for every edge class in this type.
        edge_type : Tuple[int, int, int]
            Edge type with class.
            Two first elements --- edge type, last element --- class in this type.
        Returns
        -------

        """
        self.feed_dict.update({self.placeholders['dropout']: 0})
        self.feed_dict.update({self.placeholders['batch_edge_type_idx']:
                                   self.minibatch.edge_type2idx[edge_type]})
        self.feed_dict.update({self.placeholders['batch_row_edge_type']: edge_type[0]})
        self.feed_dict.update({self.placeholders['batch_col_edge_type']: edge_type[1]})

        rec = sess.run(self.opt.predictions, feed_dict=self.feed_dict)

        uv = edges_pos[edge_type[:2]][edge_type[2]]
        u = uv[:, 0]
        v = uv[:, 1]
        preds = expit(rec[u, v])
        assert np.all(self.adj_mats[edge_type[:2]][edge_type[2]][u, v] == 1), \
            'Positive examples (real edges) are not exist'

        uv = edges_neg[edge_type[:2]][edge_type[2]]
        u = uv[:, 0]
        v = uv[:, 1]
        preds_neg = expit(rec[u, v])
        assert np.all(self.adj_mats[edge_type[:2]][edge_type[2]][u, v] == 0), \
            'Negative examples (fake edges) are real'

        # Predicted probs
        preds_all = np.hstack([preds, preds_neg])
        # preds_all = np.nan_to_num(preds_all)
        # Real probs: 1 for pos, 0 for neg
        labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))])
        roc_sc = metrics.roc_auc_score(labels_all, preds_all)
        aupr_sc = metrics.average_precision_score(labels_all, preds_all)

        # Real existing edges (local indexes)
        actual = range(len(preds))
        # All local indexes with probability (sorted)
        predicted = sorted(range(len(preds_all)), reverse=True,
                           key=lambda i: preds_all[i])
        apk_sc = rank_metrics.apk(actual, predicted, k=50)

        return roc_sc, aupr_sc, apk_sc
示例#7
0
def reduce_pad(sess: tf.compat.v1.Session,
               op_tensor_tuple: Tuple[Op, List[tf.Tensor]], _) -> (str, tf.Operation, tf.Operation):
    """
    Pad module reducer
    :param sess: current tf.compat.v1.Session
    :param op_tensor_tuple: tuple containing the op to reduce, and a list of input tensors to the op
    """

    name = "reduced_" + op_tensor_tuple[0].dotted_name
    pad_op = op_tensor_tuple[0].get_module()

    # Get padding tensor dimensions
    # Padding dimension information is captured in an input tensor to the pad op, index 1 of pad op inputs
    # Dimensions of this tensor are always (N, 2), where N is the dimensionality of the input tensor coming into pad.
    # The value of padding[N][0] gives the amount to pad in dimension N prior to the contents of the input to pad, while
    # padding[N][1] gives the amount to pad in dimension N after the contents of the input.
    # Currently we do not support reducing a pad op that modifies the channel dimension, which is the last dimension,
    # indexed by -1 below.  So check to make sure that indices [-1][0] and [-1][1] remain 0 (no padding).
    padding_tensor_eval = sess.run(pad_op.inputs[1])
    if padding_tensor_eval[-1][0] != 0 or padding_tensor_eval[-1][1] != 0:
        raise NotImplementedError("Attempting to reduce pad operation that modifies channel size, not supported.")
    new_padding_tensor = tf.constant(padding_tensor_eval)       # No need to actually modify padding tensor

    # Get constant value for padding
    # If pad op takes a non default constant value (default = 0), it appears as a third input tensor to pad op, index 2
    const_val = 0
    if len(pad_op.inputs) > 2:
        const_val = sess.run(pad_op.inputs[2])

    # Get mode
    # Mode can be 'CONSTANT', 'SYMMETRIC', or 'REFLECT'.  'CONSTANT' is default, and will not appear as a mode attribute
    # if it is the case.
    try:
        mode = pad_op.get_attr('mode')
        mode = mode.decode('utf-8')
    except ValueError:
        mode = 'CONSTANT'

    new_tensor = tf.pad(op_tensor_tuple[1][0],
                        new_padding_tensor,
                        constant_values=const_val,
                        mode=mode,
                        name=name)
    module = sess.graph.get_operation_by_name(name)

    return name, new_tensor.op, module
示例#8
0
    def get_tensor_as_numpy_data(sess: tf.compat.v1.Session,
                                 op: tf.Operation) -> np.array:
        """
        return weight kernel in the op as numpy data
        :param sess: TensorFlow session
        :param op: tf operation to extract weight tensor from.
        :return : weight tensor as numpy array type, if found in the given op
        """

        wt_tensor = WeightTensorUtils.get_wt_as_read_var_tensor(op)
        numpy_data = sess.run(wt_tensor)
        return numpy_data
示例#9
0
    def _create_bias_add_op_and_insert(sess: tf.compat.v1.Session,
                                       conv_op: tf.Operation,
                                       new_bias_var: tf.Variable,
                                       bias_name="bias_value") -> None:
        """
        creates and adds a bias_add op to conv op
        :param sess: active tf.compat.v1.Session
        :param conv_op: Convolution op
        :param new_bias_var: bias variable
        :param bias_name: an optional string for bias name
        :return: None
        """

        assert conv_op.type in ['Conv2D', 'DepthwiseConv2dNative', 'MatMul']

        with sess.graph.as_default():
            if conv_op.outputs:
                bias_index_in_op = BiasUtils.get_bias_index_in_given_op(
                    conv_op)
                conv_op_out_tensor = conv_op.outputs[bias_index_in_op]
                sess.run(tf.compat.v1.variables_initializer([new_bias_var]))
                BiasUtils.insert_bias_add_op(sess, conv_op_out_tensor,
                                             new_bias_var, bias_name)
示例#10
0
    def get_bias_as_numpy_data(sess: tf.compat.v1.Session,
                               op: tf.Operation) -> tf.Variable:
        """
        return bias in the op as a tf variable type
        :param sess: TensorFlow session
        :param op: tf operation to extract weight tensor from.
        :return : weight tensor as tf variable type, if found in the given op
        """

        # bias tensor feeds into bias-add op through ReadVariableOp type
        # bias add inputs[1] is the bias tensor we want to read
        bias_tensor = BiasUtils.get_bias_tensor(op)
        assert bias_tensor is not None
        numpy_data = sess.run(bias_tensor)
        return numpy_data
示例#11
0
def gradcam(sess: tf.compat.v1.Session, input_frame, action_idx):

    input_img = cv2.resize(input_frame, (160, 120))
    input_img = np.expand_dims(input_img, axis=2)

    input_layer = sess.graph.get_tensor_by_name(
        'main_level/agent/main/online/network_0/observation/observation:0')
    output_layer = sess.graph.get_tensor_by_name(
        'main_level/agent/main/online/network_1/ppo_head_0/policy:0')
    convolutional_output = sess.graph.get_tensor_by_name(
        'main_level/agent/main/online/network_1/observation/Conv2d_4/Conv2D:0')

    feed_dict = {input_layer: [input_img]}

    # Get output for this action
    y_c = tf.reduce_sum(tf.multiply(
        output_layer, tf.one_hot([action_idx], output_layer.shape[-1])),
                        axis=1)

    # Compute gradients based on last cnn layer
    target_grads = tf.gradients(y_c, convolutional_output)[0]

    out, grads_value = sess.run([convolutional_output, target_grads],
                                feed_dict=feed_dict)
    out, grads_value = out[0, :], grads_value[0, :, :, :]

    weights = np.mean(grads_value, axis=(0, 1))
    cam = np.dot(out, weights)

    # ReLU (only positive values are of interest)
    cam = np.maximum(0, cam)

    # Postprocess
    # Scale maximum value to 1.0
    cam = cam / np.max(cam)

    # Scale back to input frame dimensions.
    input_h, input_w = input_frame.shape[:2]
    cam = cv2.resize(cam, (input_w, input_h))

    return cam
示例#12
0
    def get_beta_as_numpy_data(sess: tf.compat.v1.Session,
                               bn_op: tf.Operation) -> np.ndarray:
        """
        Get beta param from BN op specified.

        :param sess: tensorflow session
        :param bn_op: bn_op as tf.Operation
        :return: beta tensor as numpy data
        """
        try:
            # try name based tensor look up for Keras layers
            beta_tensor = BNUtils._get_bn_param_tensor_using_name(
                sess, bn_op, constants.BNOpParamType.beta)
        except KeyError:
            # if we can't find the tensor name, use structure match
            # to figure out the read tensor for param
            beta_tensor = BNUtils.get_beta_read_var_op_tensor(bn_op)

        with sess.graph.as_default():
            numpy_data = sess.run(beta_tensor)
        return numpy_data
示例#13
0
    def get_gamma_as_numpy_data(sess: tf.compat.v1.Session,
                                bn_op: tf.Operation) -> np.ndarray:
        """
        Get gamma param from BN op specified.

        :param sess: tensorflow session
        :param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
        :return: gamma as numpy data
        """
        try:
            # try name based tensor look up for Keras layers
            gamma_tensor = BNUtils._get_bn_param_tensor_using_name(
                sess, bn_op, constants.BNOpParamType.gamma)
        except KeyError:
            # if we can't find the tensor name, use structure match
            # to figure out the read tensor for param
            gamma_tensor = BNUtils.get_gamma_read_var_op_tensor(bn_op)

        with sess.graph.as_default():
            numpy_data = sess.run(gamma_tensor)

        return numpy_data
示例#14
0
    def predict(self, image, specific=True, sess: tf.compat.v1.Session = None):
        if sess is None:
            sess = get_session()
        if isinstance(image, str):
            origin_image = read_img(image)
        else:
            origin_image = image

        origin_h, origin_w = origin_image.shape[:-1]
        # resize image and return the image with model input size and offset and scale infomation
        input_image, offset_xy, scale_xy = _image_preprocess(
            origin_image, input_shape=self.input_shape)
        input_image = np.expand_dims(input_image, 0)

        # box [[xmin, ymin, xmax, ymax]...]
        # box scores [s1, s2, s3...] 0 <= s_i <= 1
        # labels etc [c1, c2, c3 ...] 0 <= c_i <= C
        # score etc  [t1, t2, t3 ...] 0 <= t_i <= 1
        b, bc, l, ls = sess.run(
            [self._box, self._box_confidence, self._labels, self._scores],
            feed_dict={self.input_image: input_image})

        b[:, [0, 2]] = b[:, [0, 2]] - offset_xy[0]
        b[:, [1, 3]] = b[:, [1, 3]] - offset_xy[1]

        b[:, [0, 2]] = b[:, [0, 2]] / scale_xy[0]
        b[:, [1, 3]] = b[:, [1, 3]] / scale_xy[1]

        b[:, [0, 2]] = np.clip(b[:, [0, 2]], 0, origin_w - 1)
        b[:, [1, 3]] = np.clip(b[:, [1, 3]], 0, origin_h - 1)
        # nms
        _box, _box_confidence, _label, _score = _bbox_proprocess_soft(
            b, bc, l, ls, image=origin_image)

        if specific:
            return _box, _box_confidence, _label, _score
        else:
            return _box, _label, _score