示例#1
0
def action_augmentation_loss_verbose(y_true, y_pred):
    y_pred = K.print_tensor(y_pred, "y_pred")
    y_true = K.print_tensor(y_true, "y_true")
    diff = K.print_tensor(y_true - y_pred, "diff")
    squared = K.print_tensor(K.square(diff), "squared")
    sum = K.print_tensor(K.sum(squared), "sum")
    return sum
示例#2
0
    def _apply_edge_weights(features, edge_weights, aggregation=None):
        features = K.expand_dims(features, axis=1)  # (B, 1, v, f)
        edge_weights = K.expand_dims(edge_weights, axis=3)  # (B, u, v, 1)

        if DEBUG:
            features = K.print_tensor(features,
                                      message='applying on features ',
                                      summarize=debug_summarize)
            edge_weights = K.print_tensor(edge_weights,
                                          message='applying weights ',
                                          summarize=debug_summarize)

        out = edge_weights * features  # (B, u, v, f)

        if DEBUG:
            out = K.print_tensor(out,
                                 message='before aggregation ',
                                 summarize=debug_summarize)

        if aggregation:
            out = aggregation(out, axis=2)  # (B, u, f)
        else:
            out = K.reshape(out,
                            (-1, edge_weights.shape[1], features.shape[-1] *
                             features.shape[-2]))  # for keras = .shape[].value

        return out
示例#3
0
def mse_verbose(y_true, y_pred):
    y_pred = K.print_tensor(y_pred, "y_pred")
    y_true = K.print_tensor(y_true, "y_true")
    diff = K.print_tensor(y_true - y_pred, "diff")
    squared = K.print_tensor(K.square(diff), "squared")
    mean = K.print_tensor(K.mean(squared), "mean")
    return mean
示例#4
0
def qa_pair_cross_entropy_loss(y_true, y_pred):
    y_true = tf.eye(tf.shape(y_pred)[0])
    q_embedding, a_embedding = tf.unstack(y_pred, axis=1)
    similarity_matrix = tf.matmul(
        a = q_embedding, b = a_embedding, transpose_b=True)
    similarity_matrix_softmaxed = tf.nn.softmax(similarity_matrix)
    K.print_tensor(similarity_matrix_softmaxed, message="similarity_matrix_softmaxed is: ")
    return tf.keras.losses.categorical_crossentropy(y_true, similarity_matrix_softmaxed, from_logits=False)
示例#5
0
    def attack_evaluate(self):
        params = K.cast(self.total_weights, dtype='float32')
        target_data = K.cast(self.extracted_data, dtype='float32')
        params_mean = K.mean(params)
        target_mean = K.mean(target_data)
        params_d = params - params_mean
        target_d = target_data - target_mean

        num = K.sum((params_d) * (target_d))
        den = K.sqrt(K.sum(K.square(params_d)) * K.sum(K.square(target_d)))
        co = num / den
        print(params)
        print("Corr: ", co)
        #loss_co = 1 - abs(co)

        img_name = 'test.png'
        img_path = os.path.join(img_dir, img_name)
        param_img_name = 'param_test.png'
        param_img_path = os.path.join(img_dir, param_img_name)

        data_in_params = K.get_value(self.total_weights)
        img_from_params = normalize(data_in_params)
        #img_from_params = (img_from_params * 255).astype(np.uint8)
        img_from_params = (img_from_params * 255)
        #img_from_params = np.asarray(ImageOps.invert(Image.fromarray(img_from_params.reshape(32, 32, 3)))).flatten()
        #img_from_params = rgb_to_grayscale(img_from_params.reshape(32, 32, 3)).flatten().astype(np.uint8)
        #img_from_params = rgb_to_grayscale(img_from_params.reshape(32, 32, 3)).flatten()

        #self.extracted_data = rgb_to_grayscale(self.extracted_data.reshape(32,32,3)).flatten().astype(np.uint8)
        #self.extracted_data = rgb_to_grayscale(self.extracted_data.reshape(32,32,3)).flatten()

        K.print_tensor(img_from_params, "pa_before=")
        K.print_tensor(self.extracted_data, "data_before=")

        #img_from_params = rgb_to_grayscale(img_from_params.reshape(32, 32, 3)).flatten().astype(np.uint16)
        #self.extracted_data = rgb_to_grayscale(self.extracted_data.reshape(32,32,3)).flatten().astype(np.uint16)
        K.print_tensor(img_from_params, "pa_after=")
        K.print_tensor(self.extracted_data, "data_after=")

        difference = self.extracted_data - img_from_params
        print("mean:", np.mean(np.abs(difference)))
        print("var:", np.var(np.abs(difference)))

        print("mean_pa, var_pa", np.mean(img_from_params),
              np.var(img_from_params))
        print("mean, var", np.mean(self.extracted_data),
              np.var(self.extracted_data))

        #img_from_params = img_from_params.reshape(32, 32, 3)
        #img_from_dataset = self.extracted_data.reshape(32, 32, 3)
        img_from_params = img_from_params.reshape(32, 32)
        img_from_dataset = self.extracted_data.reshape(32, 32)
        cv2.imwrite(param_img_path, img_from_params)
        cv2.imwrite(img_path, img_from_dataset)
示例#6
0
    def _unpack_input(self, x):
        if self.input_format == 'x':
            data = x

            vertex_mask = K.cast(K.not_equal(data[..., 3:4], 0.), 'float32')
            num_vertex = K.sum(vertex_mask)

        elif self.input_format in ['xn', 'xen']:
            if self.input_format == 'xn':
                data, num_vertex = x
            else:
                data_x, data_e, num_vertex = x
                data = K.concatenate(
                    (data_x, K.reshape(data_e, (-1, data_e.shape[1], 1))),
                    axis=-1)

            if DEBUG:
                data = K.print_tensor(data,
                                      message='data is ',
                                      summarize=debug_summarize)
                num_vertex = K.print_tensor(num_vertex,
                                            message='num_vertex is ',
                                            summarize=debug_summarize)

            data_shape = K.shape(data)
            B = data_shape[0]
            V = data_shape[1]
            vertex_indices = K.tile(K.expand_dims(K.arange(0, V), axis=0),
                                    (B, 1))  # (B, [0..V-1])
            vertex_mask = K.expand_dims(K.cast(
                K.less(vertex_indices, K.cast(num_vertex, 'int32')),
                'float32'),
                                        axis=-1)  # (B, V, 1)
            num_vertex = K.cast(num_vertex, 'float32')

        if DEBUG:
            vertex_mask = K.print_tensor(vertex_mask,
                                         message='vertex_mask is ',
                                         summarize=debug_summarize)

        return data, num_vertex, vertex_mask
示例#7
0
def denominateur(y_true, y_pred):
    my_mean = y_pred[:, :5]

    my_var = y_pred[:, 5:]
    my_mean_temp = K.repeat(my_mean, K.shape(y_true)[1])
    my_var = (K.log(1 + K.exp(my_var)) + 1e-6)
    print(K.print_tensor(my_var))
    # return K.mean(K.log(K.square(my_var))/2 + K.min(K.square(my_mean_temp-y_true), axis=1)/(2*K.square(my_var))) +\
    #        0.5*K.log(2*np.pi)
    # return K.mean(K.log(K.square(my_var))/2 + K.min(K.square(my_mean_temp-y_true), axis=1)/(2*K.square(my_var))) + 0.5*K.log(2*np.pi)   K.min(K.square(my_mean_temp-y_true), axis=1)/
    denominateur = 2*my_var
    return denominateur
示例#8
0
    def _collapse_output(self, output):
        if self.collapse == 'mean':
            if self.mean_by_nvert:
                output = K.sum(output, axis=1) / num_vertex
            else:
                output = K.mean(output, axis=1)
        elif self.collapse == 'sum':
            output = K.sum(output, axis=1)
        elif self.collapse == 'max':
            output = K.max(output, axis=1)

        if DEBUG:
            output = K.print_tensor(output, message='output is ', summarize=-1)

        return output
示例#9
0
        def print_data_and_train_step(original_data):
            # Basically copied one-to-one from https://git.io/JvDTv
            data = data_adapter.expand_1d(original_data)
            x, y_true, w = data_adapter.unpack_x_y_sample_weight(data)
            y_pred = keras_model(x, training=True)

            # this is pretty much like on_train_batch_begin
            K.print_tensor(w, "Sample weight (w) =")
            K.print_tensor(x, "Batch input (x) =")
            K.print_tensor(y_true, "Batch output (y_true) =")
            K.print_tensor(y_pred, "Prediction (y_pred) =")

            result = original_train_step(original_data)

            # add anything here for on_train_batch_end-like behavior

            return result
示例#10
0
def correlation_coefficient_loss(y_true, y_pred):
    #print(f'suhyun true: {y_true.type}, pred: {y_pred.type}')
    x = tf.convert_to_tensor(y_true, dtype=tf.float32)
    y = y_pred

    mx = K.mean(x)
    my = K.mean(y)
    xm, ym = x-mx, y-my
    r_num = K.sum(tf.multiply(xm,ym))
    r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
    r = r_num / r_den

    r = K.maximum(K.minimum(r, 1.0), -1.0)
    r = K.print_tensor(r, message='suhyun --- corr rank r = ')
    return 1 - K.square(r)
 def call(self, x, **kwargs):
     if not isinstance(x, list):
         raise TypeError("Expected x to be a list")
     msg_id = 0
     first_item = x[0]
     y_true = first_item[
         :, (first_item.shape[1] - 1):(first_item.shape[1])
     ]  # keep shape
     if self.debug:
         y_true = K.print_tensor(
             y_true, message=str(msg_id).zfill(3) + " " + "y_true"
         )
         msg_id = msg_id + 1
     last_item = x[len(x) - 1]
     y_pred = last_item
     # per-row cross-entropy or deviance/2 part of loss
     eps = 1.0e-6
     y_pred = K.maximum(y_pred, eps)
     y_pred = K.minimum(y_pred, 1 - eps)
     if self.debug:
         y_pred = K.print_tensor(
             y_pred, message=str(msg_id).zfill(3) + " " + "y_pred"
         )
         msg_id = msg_id + 1
     loss = -safe_mult(y_true, K.log(y_pred)) - safe_mult(1 - y_true, K.log(1 - y_pred))
     if self.debug:
         loss = K.print_tensor(
             loss, message=str(msg_id).zfill(3) + " " + "entropy loss"
         )
         msg_id = msg_id + 1
     # conditional clustered action/variation on activation
     var_loss, msg_id = self.compute_variational_loss(
         x=x, y_true=y_true, msg_id=msg_id
     )
     if var_loss is not None:
         if self.debug:
             var_loss = K.print_tensor(
                 var_loss, message=str(msg_id).zfill(3) + " " + "variational loss"
             )
             msg_id = msg_id + 1
         loss = loss + var_loss
     if self.debug:
         loss = K.print_tensor(
             loss, message=str(msg_id).zfill(3) + " " + "final squared loss"
         )
         msg_id = msg_id + 1
     loss = K.sqrt(loss)
     if self.debug:
         loss = K.print_tensor(
             loss, message=str(msg_id).zfill(3) + " " + "final loss"
         )
         # noinspection PyUnusedLocal
         msg_id = msg_id + 1
     return loss
示例#12
0
 def _apply_map(self, x):
     return K.print_tensor(x)
示例#13
0
    def loss(y_true, y_pred):

        y_pred = backend.clip(y_pred, _EPSILON, 1.0 - _EPSILON)
        print('---=ytrue', np.shape(y_true))
        print('---=ypred', np.shape(y_pred))
        """ here all calculations will be based on the class greater than 0, except accuracy"""
        avgIOU = backend.variable(0.0)

        for i in range(batch_size):
            numUnion = backend.variable(1.0)
            recall = backend.variable(0.0)
            numClass = backend.variable(0.0)
            IOU = backend.variable(0.0)
            mask = backend.argmax(y_true[i], -1)
            pred = backend.argmax(y_pred[i], -1)
            print('---=mask', np.shape(mask))
            print('---=pred', np.shape(pred))

            mask_shape1 = backend.shape(mask)
            mask_shape1 = backend.print_tensor(mask_shape1,
                                               message='mask_shape1 = ')

            for c in np.arange(1, num_classes, 1):
                msk_equal = backend.cast(backend.equal(mask, c),
                                         dtype='float32')

                #    msks_shape1 = backend.shape(msk_equal)
                #    msks_shape1 = backend.print_tensor(msks_shape1, message='msk_equal shape = ' )

                masks_sum = backend.sum(msk_equal)

                msks_shape2 = backend.shape(masks_sum)
                msks_shape2 = backend.print_tensor(
                    msks_shape2, message='masks_sum shape   = ')

                masks_sum = backend.print_tensor(masks_sum,
                                                 message='masks_sum = ')

                predictions_sum = backend.sum(
                    backend.cast(backend.equal(pred, c), 'float32'))

                predictions_sum = backend.print_tensor(
                    predictions_sum, message='predictions_sum = ')

                print('---=masks_sum', np.shape(masks_sum))
                print('---=predictions_sum', np.shape(predictions_sum))

                numTrue = backend.sum(
                    backend.cast(backend.equal(pred, c), 'float32') *
                    backend.cast(backend.equal(mask, c), 'float32'))
                unionSize = masks_sum + predictions_sum - numTrue
                #    unionSize = tf.Print(unionSize, [unionSize], "union size : ")
                unionSize = backend.print_tensor(unionSize,
                                                 message='unionSize = ')

                maskhaslabel = backend.greater(masks_sum, 0)
                predhaslabel = backend.greater(predictions_sum, 0)

                maskhaslabel = backend.print_tensor(maskhaslabel,
                                                    message='maskhaslabel = ')
                predhaslabel = backend.print_tensor(predhaslabel,
                                                    message='maskhaslabel = ')

                predormaskexistlabel = backend.any(
                    backend.stack([maskhaslabel, predhaslabel], axis=0),
                    axis=0
                )  # backend.cond(backend.logical_or(maskhaslabel, predhaslabel), lambda:True,lambda:False)
                predormaskexistlabel = backend.print_tensor(
                    predormaskexistlabel, message='predormaskexistlabel = ')

                IOU = backend.switch(predormaskexistlabel,
                                     lambda: IOU + numTrue / unionSize,
                                     lambda: IOU)
                numUnion = backend.switch(predormaskexistlabel,
                                          lambda: numUnion + 1,
                                          lambda: numUnion)
                recall = backend.switch(maskhaslabel,
                                        lambda: recall + numTrue / masks_sum,
                                        lambda: recall)
                numClass = backend.switch(maskhaslabel, lambda: numClass + 1,
                                          lambda: numClass)
            IOU = IOU / numUnion
            avgIOU = avgIOU + IOU
        avgIOU = avgIOU / batch_size
        iou_loss = 1.0 - avgIOU

        # iou_loss = backend.print_tensor(iou_loss, message='\n\n\niouloss = ')
        return iou_loss
示例#14
0
    def _garnet(self, data, num_vertex, vertex_mask, in_transform, d_compute,
                out_transform):
        features = in_transform(data)  # (B, V, F)
        distance = d_compute(data)  # (B, V, S)

        if DEBUG:
            features = K.print_tensor(features,
                                      message='features is ',
                                      summarize=debug_summarize)
            distance = K.print_tensor(distance,
                                      message='distance is ',
                                      summarize=debug_summarize)

        if self.discretize_distance:
            distance = K.round(distance)
            if DEBUG:
                distance = K.print_tensor(distance,
                                          message='rounded distance is ',
                                          summarize=debug_summarize)

        edge_weights = vertex_mask * K.exp(
            K.square(distance) * (-math.log(2.)))  # (B, V, S)

        if DEBUG:
            edge_weights = K.print_tensor(edge_weights,
                                          message='edge_weights is ',
                                          summarize=debug_summarize)

        if self.mean_by_nvert:

            def graph_mean(out, axis):
                s = K.sum(out, axis=axis)
                # reshape just to enable broadcasting
                s = K.reshape(
                    s, (-1, d_compute.units * in_transform.units)) / num_vertex
                s = K.reshape(s, (-1, d_compute.units, in_transform.units))
                return s
        else:
            graph_mean = K.mean

        # vertices -> aggregators
        edge_weights_trans = K.permute_dimensions(edge_weights,
                                                  (0, 2, 1))  # (B, S, V)
        aggregated = self._apply_edge_weights(
            features, edge_weights_trans, aggregation=graph_mean)  # (B, S, F)

        if DEBUG:
            aggregated = K.print_tensor(aggregated,
                                        message='aggregated is ',
                                        summarize=debug_summarize)

        # aggregators -> vertices
        updated_features = self._apply_edge_weights(
            aggregated, edge_weights)  # (B, V, S*F)

        if DEBUG:
            updated_features = K.print_tensor(updated_features,
                                              message='updated_features is ',
                                              summarize=debug_summarize)

        return vertex_mask * out_transform(updated_features)
 def compute_variational_loss(self, *, x, y_true, msg_id=0):
     var_loss = None
     if self.alpha <= 0:
         return var_loss, msg_id
     n_internal_layers = len(x) - 2
     if n_internal_layers <= 0:
         return var_loss, msg_id
     layers_normalization = n_internal_layers * (n_internal_layers + 1) / 2
     y_triples = [(y_is, row_indicator, K.sum(row_indicator) + 1.0e-6) for
                  y_is, row_indicator in [(1, y_true), (0, 1 - y_true)]]  # assuming y_true is 0/1
     for y_is, row_indicator, row_weight in y_triples:
         if len(row_weight.shape) != 0:
             raise ValueError("Expected row_weight.shape to be 0")
     for i in range(1, len(x) - 1):  # all but first and last layer
         layer = x[i]
         layer_weight = i / (layers_normalization * layer.shape.as_list()[1])
         for j in range(layer.shape[1]):
             xij = layer[:, j:(j + 1)]  # try to keep shape
             # y-pass 1/2 get conditional distributions and means
             y_derived = dict()
             for y_is, row_indicator, row_weight in y_triples:
                 coords = (
                         "(" + "y=" + str(y_is) + ", i=" + str(i) + ", j=" + str(j) + ")"
                 )
                 if self.debug:
                     xij = K.print_tensor(
                         xij, message=str(msg_id).zfill(3) + " " + "xij" + coords
                     )
                     msg_id = msg_id + 1
                 xij_conditional = safe_mult(row_indicator, xij)
                 if self.debug:
                     xij_conditional = K.print_tensor(
                         xij_conditional,
                         message=str(msg_id).zfill(3)
                         + " "
                         + "xij_conditional"
                         + coords,
                     )
                     msg_id = msg_id + 1
                 xbar = K.sum(xij_conditional) / row_weight
                 if self.debug:
                     xbar = K.print_tensor(
                         xbar, message=str(msg_id).zfill(3) + " " + "xbar" + coords
                     )
                     msg_id = msg_id + 1
                 y_derived[y_is] = (xij_conditional, xbar)
             mean_sq_diff = 1
             if self.var_ratio:
                 xbar_0 = y_derived[0][1]
                 if len(xbar_0.shape) != 0:
                     raise ValueError("Expected xbar_0.shape to be 0")
                 xbar_1 = y_derived[1][1]
                 if len(xbar_1.shape) != 0:
                     raise ValueError("Expected xbar_1.shape to be 0")
                 mean_sq_diff = (xbar_1 - xbar_0)**2 + self.var_ratio_smoothing
                 if len(mean_sq_diff.shape) != 0:
                     raise ValueError("Expected mean_sq_diff.shape to be 0")
                 if self.debug:
                     coords = (
                             "(" + "i=" + str(i) + ", j=" + str(j) + ")"
                     )
                     mean_sq_diff = K.print_tensor(
                         mean_sq_diff,
                         message=str(msg_id).zfill(3) + " " + "mean_sq_diff" + coords,
                     )
                     msg_id = msg_id + 1
             # y-pass 2/2 compute conditional variances
             for y_is, row_indicator, row_weight in y_triples:
                 coords = (
                         "(" + "y=" + str(y_is) + ", i=" + str(i) + ", j=" + str(j) + ")"
                 )
                 xij_conditional, xbar = y_derived[y_is]
                 if len(xbar.shape) != 0:
                     raise ValueError("Expected xbar.shape to be 0")
                 diff_ij = xij - xbar
                 if self.debug:
                     diff_ij = K.print_tensor(
                         diff_ij,
                         message=str(msg_id).zfill(3) + " " + "diff_ij" + coords,
                     )
                     msg_id = msg_id + 1
                 diff_ij_conditional = safe_mult(row_indicator, diff_ij)
                 if self.debug:
                     diff_ij_conditional = K.print_tensor(
                         diff_ij_conditional,
                         message=str(msg_id).zfill(3)
                         + " "
                         + "diff_ij_conditional"
                         + coords,
                     )
                     msg_id = msg_id + 1
                 # ratio of y-conditioned var over y-different var
                 conditional_var = diff_ij_conditional**2 / mean_sq_diff
                 wij = self.alpha * layer_weight
                 if self.debug:
                     conditional_var = K.print_tensor(
                         conditional_var,
                         message=str(msg_id).zfill(3)
                         + " "
                         + "conditional_var"
                         + coords
                         + " * "
                         + str(wij),
                     )
                     msg_id = msg_id + 1
                 if var_loss is None:
                     var_loss = wij * conditional_var
                 else:
                     var_loss = var_loss + wij * conditional_var
     return var_loss, msg_id
示例#16
0
    def customloss(y_true, y_pred):

        losses = tf.reduce_mean(tf.math.squared_difference(y_true, y_pred))

        loss = tf.reduce_mean(tf.multiply(
            loss_weights, losses)) / tf.math.reduce_sum(loss_weights)

        for m in range(_N_SLIDERS):

            if m == 0:
                pusher_penetration = tf.math.sqrt(
                    tf.math.square((inputs[:, _DOF] + y_pred[:, 0]) -
                                   inputs[:, 0] * _CTRL_DUR) +
                    tf.math.square(y_pred[:, 1] -
                                   inputs[:, 1] * _CTRL_DUR)) - (rs + rp)
            else:
                pusher_penetration = tf.math.sqrt(
                    tf.math.square(
                        (inputs[:, _DOF + _VARS * m - 1] +
                         y_pred[:, _VARS * m]) - inputs[:, 0] * _CTRL_DUR) +
                    tf.math.square(y_pred[:, _VARS * m + 1] -
                                   inputs[:, 1] * _CTRL_DUR)) - (rs + rp)

            zero = tf.zeros_like(pusher_penetration)

            pusher_penetration = tf.where(
                pusher_penetration < (zero - max_penetration),
                pusher_penetration, zero)

            loss += tf.math.square(
                tf.reduce_mean(pusher_penetration) * penalizing_factor)

            slider_penetration = 0

            for i in range(m + 1, _N_SLIDERS):

                if m == 0:
                    penetration = tf.math.sqrt(
                        tf.math.square((inputs[:, _DOF] + y_pred[:, 0]) -
                                       (inputs[:, _DOF + _VARS * i - 1] +
                                        y_pred[:, _VARS * i])) +
                        tf.math.square(y_pred[:, 1] -
                                       (inputs[:, _DOF + _VARS * i] +
                                        y_pred[:, _VARS * i + 1]))) - 2 * rs
                else:
                    penetration = tf.math.sqrt(
                        tf.math.square((inputs[:, _DOF + _VARS * m - 1] +
                                        y_pred[:, _VARS * m]) -
                                       (inputs[:, _DOF + _VARS * i - 1] +
                                        y_pred[:, _VARS * i])) +
                        tf.math.square((inputs[:, _DOF + _VARS * m] +
                                        y_pred[:, _VARS * m + 1]) -
                                       (inputs[:, _DOF + _VARS * i] +
                                        y_pred[:, _VARS * i + 1]))) - 2 * rs

                zero = tf.zeros_like(penetration)

                penetration = tf.where(penetration < (zero - max_penetration),
                                       penetration, zero)

                slider_penetration += tf.math.square(
                    tf.reduce_mean(penetration) * penalizing_factor)

            slider_penetration = K.print_tensor(slider_penetration)

            loss += slider_penetration

        return loss