示例#1
0
def box_diou(b1, b2):
    b1_xy = b1[..., :2]
    b1_wh = b1[..., 2:4]
    b1_wh_half = b1_wh / 2.
    b1_mins = b1_xy - b1_wh_half
    b1_maxes = b1_xy + b1_wh_half

    b2_xy = b2[..., :2]
    b2_wh = b2[..., 2:4]
    b2_wh_half = b2_wh / 2.
    b2_mins = b2_xy - b2_wh_half
    b2_maxes = b2_xy + b2_wh_half

    intersect_mins = K.maximum(b1_mins, b2_mins)
    intersect_maxes = K.minimum(b1_maxes, b2_maxes)
    intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
    intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
    b1_area = b1_wh[..., 0] * b1_wh[..., 1]
    b2_area = b2_wh[..., 0] * b2_wh[..., 1]
    union_area = b1_area + b2_area - intersect_area
    iou = intersect_area / (union_area + K.epsilon())

    center_distance = K.sum(K.square(b1_xy - b2_xy), axis=-1)
    enclose_mins = K.minimum(b1_mins, b2_mins)
    enclose_maxes = K.maximum(b1_maxes, b2_maxes)
    enclose_wh = K.maximum(enclose_maxes - enclose_mins, 0.0)
    enclose_diagonal = K.sum(K.square(enclose_wh), axis=-1)
    diou = iou - 1.0 * (center_distance) / (enclose_diagonal + K.epsilon())

    diou = K.expand_dims(diou, -1)
    return diou
 def loss(y_true, y_pred):
     # scale predictions so that the class probas of each sample sum to 1
     y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
     # clip to prevent NaN's and Inf's
     y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
     # calc
     loss = y_true * K.log(y_pred) * weights
     loss = -K.sum(loss, -1)
     return loss
示例#3
0
def do_batch_normalization(
        feature_matrix, scale_parameter=1., shift_parameter=0.):
    """Performs batch normalization on each feature in the batch.

    E = number of examples in batch

    :param feature_matrix: Input feature maps (numpy array).  Dimensions must be
        E x M x N x C or E x M x N x H x C.
    :param scale_parameter: Scale parameter (beta in the equation on page 3 of
        Ioffe and Szegedy 2015).
    :param shift_parameter: Shift parameter (gamma in the equation).
    :return: feature_matrix: Feature maps after batch normalization (same
        dimensions).
    """

    error_checking.assert_is_numpy_array_without_nan(feature_matrix)
    error_checking.assert_is_greater(scale_parameter, 0.)

    num_dimensions = len(feature_matrix.shape)
    error_checking.assert_is_geq(num_dimensions, 4)
    error_checking.assert_is_leq(num_dimensions, 5)

    stdev_matrix = numpy.std(feature_matrix, axis=0, ddof=1)
    stdev_matrix = numpy.expand_dims(stdev_matrix, axis=0)
    stdev_matrix = numpy.repeat(stdev_matrix, feature_matrix.shape[0], axis=0)

    mean_matrix = numpy.mean(feature_matrix, axis=0)
    mean_matrix = numpy.expand_dims(mean_matrix, axis=0)
    mean_matrix = numpy.repeat(mean_matrix, feature_matrix.shape[0], axis=0)

    return shift_parameter + scale_parameter * (
        (feature_matrix - mean_matrix) / (stdev_matrix + K.epsilon())
    )
示例#4
0
def euclidean_distance(vects):
    '''
    Computes the euclidean distances between vects[0] and vects[1]
    '''
    x, y = vects
    return K.sqrt(
        K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
示例#5
0
    def call(self, inputs, mask=None, **kwargs):
        """Core implemention of soft attention

        Args:
            inputs (object): input tensor.

        Returns:
            object: weighted sum of input tensors.
        """

        attention = K.tanh(K.dot(inputs, self.W) + self.b)
        attention = K.dot(attention, self.q)

        attention = K.squeeze(attention, axis=2)

        if mask is None:
            attention = K.exp(attention)
        else:
            attention = K.exp(attention) * K.cast(mask, dtype="float32")

        attention_weight = attention / (
            K.sum(attention, axis=-1, keepdims=True) + K.epsilon())

        attention_weight = K.expand_dims(attention_weight)
        weighted_input = inputs * attention_weight
        return K.sum(weighted_input, axis=1)
示例#6
0
    def get_log_probability_density(pred, y):
        mu_and_sigma = pred
        mu = mu_and_sigma[:, :2]
        sigma = mu_and_sigma[:, 2:]
        variance = K.square(sigma)
        pdf = 1. / K.sqrt(2. * np.pi * variance) * K.exp(-K.square(y - mu) /
                                                         (2. * variance))
        log_pdf = K.log(pdf + K.epsilon())

        return log_pdf
示例#7
0
    def concrete_dropout(self, x):
        '''
        Concrete dropout - used at training time (gradients can be propagated)
        :param x: input
        :return:  approx. dropped out input
        '''
        eps = K.cast_to_floatx(K.epsilon())
        temp = 0.1

        unif_noise = K.random_uniform(shape=K.shape(x))
        drop_prob = (K.log(self.p + eps) - K.log(1. - self.p + eps) +
                     K.log(unif_noise + eps) - K.log(1. - unif_noise + eps))
        drop_prob = K.sigmoid(drop_prob / temp)
        random_tensor = 1. - drop_prob

        retain_prob = 1. - self.p
        x *= random_tensor
        x /= retain_prob
        return x
示例#8
0
    def call(self, x, mask=None):
        features_dim = self.features_dim
        step_dim = self.step_dim

        e = K.reshape(
            K.dot(K.reshape(x, (-1, features_dim)),
                  K.reshape(self.W, (features_dim, 1))),
            (-1, step_dim))  # e = K.dot(x, self.W)
        if self.bias:
            e += self.b
        e = K.tanh(e)

        a = K.exp(e)
        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())
        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
        a = K.expand_dims(a)

        c = K.sum(a * x, axis=1)
        return c
示例#9
0
def recall_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    recall = true_positives / (possible_positives + K.epsilon())
    return recall
示例#10
0
def f1_m(y_true, y_pred):
    precision = precision_m(y_true, y_pred)
    recall = recall_m(y_true, y_pred)
    return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
示例#11
0
def precision_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
示例#12
0
    def __call__(self, dag, gpuID, epoch_num=100, out_model='cgpunet.hdf5'):
        
        if self.verbose:
            print('GPUID     :', gpuID)
            print('epoch_num :', epoch_num)
            print('batch_size:', self.batchsize)

        
        train_steps = int(self.train_len/self.batchsize)
        valid_steps = int(self.valid_len/self.batchsize_valid)
        
        model = dag_2_cnn(dag, gpuID, self.input_shape, self.target_shape)

        #print summary
        model.summary()

        model_checkpoint = ModelCheckpoint(out_model, monitor='loss',verbose=1, save_best_only=True)
        history = History()
        
        #NOTE: default values: workers=1, multiprocessing=False.
        #TODO: investigate workers>1
        history = model.fit_generator(generator=self.trainGenerator, steps_per_epoch=train_steps, epochs=epoch_num, callbacks=[model_checkpoint], validation_data=self.validGenerator, validation_steps=valid_steps, validation_freq= int(epoch_num))
        
        val_acc = history.history['val_accuracy']
        #val_loss = history.history['val_loss']
        val_precision = history.history['val_precision']
        val_recall = history.history['val_recall']

        last_epoch = len(val_precision) - 1
        val_f1 = 2*((val_precision[last_epoch]*val_recall[last_epoch])/(val_precision[last_epoch]+val_recall[last_epoch]+K.epsilon()))
        
        trainable_count = int(np.sum([K.count_params(p) for p in model.trainable_weights]))

        if not os.path.isdir('./figures'):
            os.makedirs('./figures')

        acc_fig_name = out_model.replace('.hdf5', '_acc.png')
        acc_fig_name = './figures/' + acc_fig_name

        loss_fig_name = out_model.replace('.hdf5', '_loss.png')
        loss_fig_name = './figures/' + loss_fig_name

        # Plot training & validation accuracy values
        plt.figure()
        plt.plot(history.history['accuracy'])
        plt.plot(history.history['val_accuracy'])
        plt.title('Model accuracy: {}'.format(out_model))
        plt.ylabel('Accuracy')
        plt.xlabel('Epoch')
        plt.legend(['Train', 'Validation'], loc='upper left')
        plt.savefig(acc_fig_name)
        
        plt.figure()
        plt.plot(history.history['loss'])
        plt.plot(history.history['val_loss'])
        plt.title('Model loss": {}'.format(out_model))
        plt.ylabel('Loss')
        plt.xlabel('Epoch')
        plt.legend(['Train', 'Validation'], loc='upper left')
        plt.savefig(loss_fig_name)

        pickle_name = out_model.replace('.hdf5', '.gpickle')
        
        if not os.path.isdir('./p_files'):
            os.makedirs('./p_files')

        pickle_name = './p_files/' + pickle_name
        nx.write_gpickle(dag, pickle_name)

        K.clear_session()

        return (float(val_f1), trainable_count)