Ejemplo n.º 1
0
    def loss(self, inputs):
        
        use_avg_cc=True
        coords, truth_indices, row_splits, beta_like = None, None, None, None
        if len(inputs) == 3:
            coords, truth_indices, row_splits = inputs
        elif len(inputs) == 4:
            coords, truth_indices, beta_like, row_splits = inputs
            use_avg_cc = False
        else:
            raise ValueError("LLClusterCoordinates requires 3 or 4 inputs")

        zeros = tf.zeros_like(coords[:,0:1])
        if beta_like is None:
            beta_like = zeros+1./2.
        else:
            beta_like = tf.nn.sigmoid(beta_like)
            beta_like = tf.stop_gradient(beta_like)#just informing, no grad # 0 - 1
            beta_like = 0.1* beta_like + 0.5 #just a slight scaling
        
        #this takes care of noise through truth_indices < 0
        V_att, V_rep,_,_,_,_=oc_loss(coords, beta_like, #beta constant
                truth_indices, row_splits, 
                zeros, zeros,Q_MIN=1.0, S_B=0.,energyweights=None,
                use_average_cc_pos=use_avg_cc,payload_rel_threshold=0.01)
        
        att = (1.-self.repulsion_contrib)*V_att
        rep = self.repulsion_contrib*V_rep
        lossval = att + rep
        if self.print_loss:
            print(self.name, lossval.numpy(), 'att loss:', att.numpy(), 'rep loss:',rep.numpy())
        return lossval
Ejemplo n.º 2
0
    def loss(self, inputs):

        assert len(inputs) == 20 or len(inputs) == 19
        hasunique = False
        if len(inputs) == 20:
            pred_beta, pred_ccoords, pred_distscale,\
            pred_energy, pred_energy_low_quantile,pred_energy_high_quantile,\
            pred_pos, pred_time, pred_id,\
            rechit_energy,\
            t_idx, t_energy, t_pos, t_time, t_pid, t_spectator_weights,t_fully_contained,t_rec_energy,\
            t_is_unique,\
            rowsplits = inputs
            hasunique = True
        elif len(inputs) == 19:
            pred_beta, pred_ccoords, pred_distscale,\
            pred_energy, pred_energy_low_quantile,pred_energy_high_quantile,\
            pred_pos, pred_time, pred_id,\
            rechit_energy,\
            t_idx, t_energy, t_pos, t_time, t_pid, t_spectator_weights,t_fully_contained,t_rec_energy,\
            rowsplits = inputs

            t_is_unique = tf.concat([t_idx[0:1] * 0 + 1, t_idx[1:] * 0],
                                    axis=0)
            hasunique = False
            print('WARNING. functions using unique will not work as expected')

            #guard

        if rowsplits.shape[0] is None:
            return tf.constant(0, dtype='float32')

        energy_weights = self.calc_energy_weights(t_energy, t_pid)
        if not self.use_energy_weights:
            energy_weights = tf.zeros_like(energy_weights) + 1.

        #reduce weight on not fully contained showers
        energy_weights = tf.where(t_fully_contained > 0, energy_weights,
                                  energy_weights * 0.01)

        q_min = self.q_min  #self.calc_qmin_weight(rechit_energy)#FIXME

        #also kill any gradients for zero weight
        energy_loss, energy_quantiles_loss = None, None
        if self.train_energy_correction:
            energy_loss, energy_quantiles_loss = self.calc_energy_correction_factor_loss(
                t_energy, t_rec_energy, pred_energy, pred_energy_low_quantile,
                pred_energy_high_quantile)
            energy_loss *= self.energy_loss_weight
        else:
            energy_loss = self.energy_loss_weight * self.calc_energy_loss(
                t_energy, pred_energy)
            _, energy_quantiles_loss = self.calc_energy_correction_factor_loss(
                t_energy, t_rec_energy, pred_energy, pred_energy_low_quantile,
                pred_energy_high_quantile)
        energy_quantiles_loss *= self.energy_loss_weight / 2.

        position_loss = self.position_loss_weight * self.calc_position_loss(
            t_pos, pred_pos)
        timing_loss = self.timing_loss_weight * self.calc_timing_loss(
            t_time, pred_time)
        classification_loss = self.classification_loss_weight * self.calc_classification_loss(
            t_pid, pred_id, t_is_unique, hasunique)

        full_payload = tf.concat([
            energy_loss, position_loss, timing_loss, classification_loss,
            energy_quantiles_loss
        ],
                                 axis=-1)

        if self.payload_beta_clip > 0:
            full_payload = tf.where(pred_beta < self.payload_beta_clip, 0.,
                                    full_payload)
            #clip not weight, so there is no gradient to push below threshold!

        is_spectator = t_spectator_weights  #not used right now, and likely never again (if the truth remains ok)
        if is_spectator is None:
            is_spectator = tf.zeros_like(pred_beta)

        pred_beta = tf.debugging.check_numerics(pred_beta,
                                                "beta has nans of infs")
        #safe guards
        with tf.control_dependencies([
                tf.assert_equal(rowsplits[-1], pred_beta.shape[0]),
                tf.assert_equal(pred_beta >= 0., True),
                tf.assert_equal(pred_beta <= 1., True),
                tf.assert_equal(is_spectator <= 1., True),
                tf.assert_equal(is_spectator >= 0., True)
        ]):

            att, rep, noise, min_b, payload, exceed_beta = oc_loss(
                x=pred_ccoords,
                beta=pred_beta,
                truth_indices=t_idx,
                row_splits=rowsplits,
                is_spectator=is_spectator,
                payload_loss=full_payload,
                Q_MIN=q_min,
                S_B=self.s_b,
                noise_q_min=self.noise_q_min,
                distance_scale=pred_distscale,
                energyweights=energy_weights,
                use_average_cc_pos=self.use_average_cc_pos,
                payload_rel_threshold=self.payload_rel_threshold,
                cont_beta_loss=self.cont_beta_loss,
                prob_repulsion=self.prob_repulsion,
                phase_transition=self.phase_transition > 0.,
                phase_transition_double_weight=self.
                phase_transition_double_weight,
                #removed
                #alt_potential_norm=self.alt_potential_norm,
                payload_beta_gradient_damping_strength=self.
                payload_beta_gradient_damping_strength,
                kalpha_damping_strength=self.kalpha_damping_strength,
                beta_gradient_damping=self.beta_gradient_damping,
                repulsion_q_min=self.repulsion_q_min,
                super_repulsion=self.super_repulsion,
                super_attraction=self.super_attraction,
                div_repulsion=self.div_repulsion,
                dynamic_payload_scaling_onset=self.
                dynamic_payload_scaling_onset)

        self.add_prompt_metric(att + rep,
                               self.name + '_dynamic_payload_scaling')

        att *= self.potential_scaling
        rep *= self.potential_scaling * self.repulsion_scaling
        min_b *= self.beta_loss_scale
        noise *= self.noise_scaler
        exceed_beta *= self.too_much_beta_scale

        #unscaled should be well in range < 1.
        att = self.softclip(att, self.potential_scaling)
        rep = self.softclip(rep,
                            self.potential_scaling * self.repulsion_scaling)
        #min_b = self.softclip(min_b, 5.)  # not needed, limited anyway
        #noise = self.softclip(noise, 5.)  # not needed limited to 1 anyway

        energy_loss = payload[0]
        pos_loss = payload[1]
        time_loss = payload[2]
        class_loss = payload[3]
        energy_unc_loss = payload[4]

        #explicit cc damping
        ccdamp = self.cc_damping_strength * (
            0.02 *
            tf.reduce_mean(pred_ccoords))**4  # gently keep them around 0

        lossval = att + rep + min_b + noise + energy_loss + energy_unc_loss + pos_loss + time_loss + class_loss + exceed_beta + ccdamp

        lossval = tf.reduce_mean(lossval)

        self.add_prompt_metric(lossval, self.name + '_loss')
        self.add_prompt_metric(att, self.name + '_attractive_loss')
        self.add_prompt_metric(rep, self.name + '_repulsive_loss')
        self.add_prompt_metric(min_b, self.name + '_min_beta_loss')
        self.add_prompt_metric(noise, self.name + '_noise_loss')
        self.add_prompt_metric(energy_loss, self.name + '_energy_loss')
        self.add_prompt_metric(energy_unc_loss, self.name + '_energy_unc_loss')
        self.add_prompt_metric(pos_loss, self.name + '_position_loss')
        self.add_prompt_metric(time_loss, self.name + '_time_loss')
        self.add_prompt_metric(class_loss, self.name + '_class_loss')
        self.add_prompt_metric(exceed_beta, self.name + '_exceed_beta_loss')

        self.maybe_print_loss(lossval)

        return lossval
Ejemplo n.º 3
0
def obj_cond_loss(truth_dict, pred_dict, feat, rowsplits, config):
    start_time = time.time()

    rowsplits = tf.cast(rowsplits, tf.int64)  # just for first loss evaluation from stupid keras




    energyweights = truth_dict['truthHitAssignedEnergies']
    energyweights = tf.math.log(0.1 * energyweights + 1.)

    if not config['use_energy_weights']:
        energyweights *= 0.
    energyweights += 1.

    # just to mitigate the biased sample
    energyweights = tf.where(truth_dict['truthHitAssignedEnergies'] > 10., energyweights + 0.1,
                             energyweights * (truth_dict['truthHitAssignedEnergies'] / 10. + 0.1))
    if not config['downweight_low_energy']:
        energyweights = tf.zeros_like(energyweights) + 1.
        if config['use_energy_weights']:
            energyweights = tf.math.log(0.1 * truth_dict['truthHitAssignedEnergies'] + 1.)

    # also using log now, scale back in evaluation #
    den_offset = config['energy_den_offset']
    if config['log_energy']:
        raise ValueError(
            "loss config log_energy is not supported anymore. Please use the 'ExpMinusOne' layer within the model instead to scale the output.")

    energy_diff = (pred_dict['predEnergy'] - truth_dict['truthHitAssignedEnergies'])

    scaled_true_energy = truth_dict['truthHitAssignedEnergies']
    if config['rel_energy_mse']:
        scaled_true_energy *= scaled_true_energy
    sqrt_true_en = tf.sqrt(scaled_true_energy + 1e-6)
    energy_loss = energy_diff / (sqrt_true_en + den_offset)


    if config['n_classes'] > 0:
        print("Classification loss will be applied")
        truth_classes_label_encoding = truth_dict['truthClasses'][:, 0] - 1
        tf.debugging.Assert(tf.greater_equal(tf.reduce_min(truth_classes_label_encoding), 0), 'Problem in labels (make sure they don\'t start at 0')
        tf.debugging.Assert(tf.greater_equal(tf.reduce_max(truth_classes_label_encoding), config['n_classes']-1), 'Problem in labels (make sure they don\'t start at 0')
        truth_classes_one_hot = tf.one_hot(tf.cast(truth_classes_label_encoding, tf.int32), depth=config['n_classes'])
        pred_classes_probab_scores = pred_dict['predClasses']

        classification_loss = tf.nn.softmax_cross_entropy_with_logits(labels=truth_classes_one_hot, logits=pred_classes_probab_scores)[..., tf.newaxis]
    else:
        classification_loss = tf.zeros((energyweights.shape[0], 1)) # Just a zeros vector - should result in zero error

    if config['huber_energy_scale'] > 0:
        huber_scale = config['huber_energy_scale'] * sqrt_true_en
        energy_loss = huber(energy_loss, huber_scale)
    else:
        energy_loss = energy_loss ** 2

    pos_offs = None
    payload_loss = None

    xdiff = pred_dict['predX'] + feat['recHitX'] - truth_dict['truthHitAssignedX']
    ydiff = pred_dict['predY'] + feat['recHitY'] - truth_dict['truthHitAssignedY']

    # print("Pred ", tf.reduce_mean(pred_dict['predX']), tf.reduce_mean(pred_dict['predY']))
    # print("Feat ", tf.reduce_mean(feat['recHitX']), tf.reduce_mean(feat['recHitY']))
    # print("Truth", tf.reduce_mean(truth_dict['truthHitAssignedX']), tf.reduce_mean(truth_dict['truthHitAssignedY']))

    pos_offs = tf.reduce_sum(tf.concat([xdiff, ydiff], axis=-1) ** 2, axis=-1, keepdims=True)

    tdiff = pred_dict['predT'] - truth_dict['truthHitAssignedT']
    # print("d['truthHitAssignedT']", tf.reduce_mean(d['truthHitAssignedT']), tdiff)
    tdiff = (1e6 * tdiff) ** 2 #time is in ns
    # self.timing_loss_weight

    payload_loss = tf.concat([config['energy_loss_weight'] * energy_loss,
                              config['position_loss_weight'] * pos_offs,
                              config['timing_loss_weight'] * tdiff,
                              config['classification_loss_weight'] * classification_loss], axis=-1)

    if not config['use_spectators']:
        truth_dict['truthIsSpectator'] = tf.zeros_like(truth_dict['truthIsSpectator'])

    attractive_loss, rep_loss, noise_loss, min_beta_loss, payload_loss_full, too_much_beta_loss = oc_loss(

        x=pred_dict['predCCoords'],
        beta=pred_dict['predBeta'],
        truth_indices=truth_dict['truthHitAssignementIdx'],
        row_splits=rowsplits,
        is_spectator=truth_dict['truthIsSpectator'],
        payload_loss=payload_loss,
        Q_MIN=config['q_min'],
        S_B=config['s_b'],
        energyweights=energyweights,
        use_average_cc_pos=config['use_average_cc_pos'],
        payload_rel_threshold=config['payload_rel_threshold'],
        cont_beta_loss=config['cont_beta_loss']
    )

    attractive_loss *= config['potential_scaling']
    rep_loss *= config['potential_scaling'] * config['repulsion_scaling']
    min_beta_loss *= config['beta_loss_scale']
    noise_loss *= config['noise_scaler']
    too_much_beta_loss *= config['too_much_beta_scale']

    spectator_beta_penalty = tf.constant(0.)
    if config['use_spectators']:
        spectator_beta_penalty = 0.1 * spectator_penalty_2(truth_dict, pred_dict, rowsplits)
        spectator_beta_penalty = tf.where(tf.math.is_nan(spectator_beta_penalty), 0, spectator_beta_penalty)

    # attractive_loss = tf.where(tf.math.is_nan(attractive_loss),0,attractive_loss)
    # rep_loss = tf.where(tf.math.is_nan(rep_loss),0,rep_loss)
    # min_beta_loss = tf.where(tf.math.is_nan(min_beta_loss),0,min_beta_loss)
    # noise_loss = tf.where(tf.math.is_nan(noise_loss),0,noise_loss)
    # payload_loss_full = tf.where(tf.math.is_nan(payload_loss_full),0,payload_loss_full)

    energy_loss = payload_loss_full[0]
    pos_loss = payload_loss_full[1]
    time_loss = payload_loss_full[2]
    class_loss = payload_loss_full[3]
    # energy_loss *= 0.0000001

    # neglect energy loss almost fully
    loss = attractive_loss + rep_loss + min_beta_loss + noise_loss + energy_loss + time_loss + pos_loss + spectator_beta_penalty + too_much_beta_loss

    loss = tf.debugging.check_numerics(loss, "loss has nan")

    if config['pre_train']:
        preloss = pre_training_loss(truth_dict, pred_dict)
        loss /= 10.
        loss += preloss

    print('loss', loss.numpy(),
          'attractive_loss', attractive_loss.numpy(),
          'rep_loss', rep_loss.numpy(),
          'min_beta_loss', min_beta_loss.numpy(),
          'noise_loss', noise_loss.numpy(),
          'energy_loss', energy_loss.numpy(),
          'pos_loss', pos_loss.numpy(),
          'class_loss', class_loss.numpy(),
          'time_loss', time_loss.numpy(),
          'spectator_beta_penalty', spectator_beta_penalty.numpy(),
          'too_much_beta_loss', too_much_beta_loss.numpy())

    print('time for this loss eval', int((time.time() - start_time) * 1000), 'ms')
    global g_time
    print('time for total batch', int((time.time() - g_time) * 1000), 'ms')
    g_time = time.time()

    return loss
Ejemplo n.º 4
0
def full_obj_cond_loss(truth, pred_in, rowsplits):
    global full_obj_cond_loss_counter
    full_obj_cond_loss_counter+=1
    start_time = time.time()
    
    if truth.shape[0] is None: 
        return tf.constant(0., tf.float32)
    
    rowsplits = tf.cast(rowsplits, tf.int64)#just for first loss evaluation from stupid keras
    
    feat,pred = split_feat_pred(pred_in)
    d = create_index_dict(truth, pred, n_ccoords=config.n_ccoords)
    feat = create_feature_dict(feat)
    #print('feat',feat.shape)
    
    #d['predBeta'] = tf.clip_by_value(d['predBeta'],1e-6,1.-1e-6)
    
    row_splits = rowsplits[ : rowsplits[-1,0],0]
    
    classes = d['truthHitAssignementIdx'][...,0]
    
    energyweights = d['truthHitAssignedEnergies']
    energyweights = tf.math.log(0.1 * energyweights + 1.)
    
    if not config.use_energy_weights:
        energyweights *= 0.
    energyweights += 1.
    
    #just to mitigate the biased sample
    energyweights = tf.where(d['truthHitAssignedEnergies']>10.,energyweights+0.1, energyweights*(d['truthHitAssignedEnergies']/10.+0.1))
    if not config.downweight_low_energy:
        energyweights = tf.zeros_like(energyweights) + 1.
        if config.use_energy_weights:
            energyweights = tf.math.log(0.1 * d['truthHitAssignedEnergies'] + 1.)
    
    
    #also using log now, scale back in evaluation #
    den_offset = config.energy_den_offset
    if config.log_energy:
        raise ValueError("loss config log_energy is not supported anymore. Please use the 'ExpMinusOne' layer within the model instead to scale the output.")
    
    energy_diff = (d['predEnergy'] - d['truthHitAssignedEnergies']) 
    
    scaled_true_energy = d['truthHitAssignedEnergies']
    if config.rel_energy_mse:
        scaled_true_energy *= scaled_true_energy
    sqrt_true_en = tf.sqrt(scaled_true_energy + 1e-6)
    energy_loss = energy_diff/(sqrt_true_en+den_offset)
    
    if config.huber_energy_scale>0:
        huber_scale = config.huber_energy_scale * sqrt_true_en
        energy_loss = huber(energy_loss, huber_scale)
    else:
        energy_loss = energy_loss**2
    
    pos_offs = None
    payload_loss = None
    
    xdiff = d['predX']+feat['recHitX']  -   d['truthHitAssignedX']
    ydiff = d['predY']+feat['recHitY']  -   d['truthHitAssignedY']
    pos_offs = tf.reduce_sum(tf.concat( [xdiff,  ydiff],axis=-1)**2, axis=-1, keepdims=True)
    
    tdiff = d['predT']  -   d['truthHitAssignedT']
    #print("d['truthHitAssignedT']", tf.reduce_mean(d['truthHitAssignedT']), tdiff)
    tdiff = (1e6 * tdiff)**2
    # self.timing_loss_weight
    
    payload_loss = tf.concat([config.energy_loss_weight * energy_loss ,
                          config.position_loss_weight * pos_offs,
                          config.timing_loss_weight * tdiff], axis=-1)
    
    
    
    
    if not config.use_spectators:
        d['truthIsSpectator'] = tf.zeros_like(d['truthIsSpectator'])
    
    
    attractive_loss, rep_loss, noise_loss, min_beta_loss, payload_loss_full, too_much_beta_loss = oc_loss(
        
        x = d['predCCoords'],
        beta = d['predBeta'], 
        truth_indices = d['truthHitAssignementIdx'], 
        row_splits=row_splits, 
        is_spectator = d['truthIsSpectator'], 
        payload_loss=payload_loss,
        Q_MIN=config.q_min, 
        S_B=config.s_b,
        energyweights=energyweights,
        use_average_cc_pos=config.use_average_cc_pos,
        payload_rel_threshold=config.payload_rel_threshold,
        cont_beta_loss=config.cont_beta_loss
        )
    
    
    attractive_loss *= config.potential_scaling
    rep_loss *= config.potential_scaling * config.repulsion_scaling
    min_beta_loss *= config.beta_loss_scale
    noise_loss *= config.noise_scaler
    too_much_beta_loss *= config.too_much_beta_scale
    
    spectator_beta_penalty = tf.constant(0.)
    if config.use_spectators:
        spectator_beta_penalty =  0.1 * spectator_penalty(d,row_splits)
        spectator_beta_penalty = tf.where(tf.math.is_nan(spectator_beta_penalty),0,spectator_beta_penalty)
    
    #attractive_loss = tf.where(tf.math.is_nan(attractive_loss),0,attractive_loss)
    #rep_loss = tf.where(tf.math.is_nan(rep_loss),0,rep_loss)
    #min_beta_loss = tf.where(tf.math.is_nan(min_beta_loss),0,min_beta_loss)
    #noise_loss = tf.where(tf.math.is_nan(noise_loss),0,noise_loss)
    #payload_loss_full = tf.where(tf.math.is_nan(payload_loss_full),0,payload_loss_full)
    
    
    
    energy_loss = payload_loss_full[0]
    pos_loss = payload_loss_full[1]
    time_loss = payload_loss_full[2]
    #energy_loss *= 0.0000001
    
    # neglect energy loss almost fully
    loss = attractive_loss + rep_loss +  min_beta_loss +  noise_loss  + energy_loss + time_loss + pos_loss + spectator_beta_penalty + too_much_beta_loss
    
    loss = tf.debugging.check_numerics(loss,"loss has nan")

    
    if config.pre_train:
         preloss = pre_training_loss(truth,pred_in)
         loss /= 10.
         loss += preloss
         
    print('call ',full_obj_cond_loss_counter)
    print('loss',loss.numpy(), 
          'attractive_loss',attractive_loss.numpy(),
          'rep_loss', rep_loss.numpy(), 
          'min_beta_loss', min_beta_loss.numpy(), 
          'noise_loss' , noise_loss.numpy(),
          'energy_loss', energy_loss.numpy(), 
          'pos_loss', pos_loss.numpy(), 
          'time_loss', time_loss.numpy(), 
          'spectator_beta_penalty', spectator_beta_penalty.numpy(), 
          'too_much_beta_loss', too_much_beta_loss.numpy())
    
    
    print('time for this loss eval',int((time.time()-start_time)*1000),'ms')
    global g_time
    print('time for total batch',int((time.time()-g_time)*1000),'ms')
    g_time=time.time()
    
    return loss
Ejemplo n.º 5
0
    def loss(self, inputs):

        start_time = 0
        if self.print_time:
            start_time = time.time()

        pred_distscale = None
        rechit_energy = None
        if self.use_local_distances:
            if self.energy_weighted_qmin:
                pred_beta, pred_ccoords, pred_distscale, \
                rechit_energy, \
                pred_energy, pred_pos, pred_time, pred_id,\
                t_idx, t_energy, t_pos, t_time, t_pid,\
                rowsplits = inputs

            else:
                pred_beta, pred_ccoords, pred_distscale, pred_energy, pred_pos, pred_time, pred_id,\
                t_idx, t_energy, t_pos, t_time, t_pid,\
                rowsplits = inputs

        else:
            pred_beta, pred_ccoords, pred_energy, pred_pos, pred_time, pred_id,\
            t_idx, t_energy, t_pos, t_time, t_pid,\
            rowsplits = inputs

        if rowsplits.shape[0] is None:
            return tf.constant(0, dtype='float32')

        energy_weights = self.calc_energy_weights(t_energy)
        if not self.use_energy_weights:
            energy_weights = tf.zeros_like(energy_weights) + 1.

        q_min = self.q_min  #self.calc_qmin_weight(rechit_energy)#FIXME

        #also kill any gradients for zero weight
        energy_loss = self.energy_loss_weight * self.calc_energy_loss(
            t_energy, pred_energy)
        position_loss = self.position_loss_weight * self.calc_position_loss(
            t_pos, pred_pos)
        timing_loss = self.timing_loss_weight * self.calc_timing_loss(
            t_time, pred_time)
        classification_loss = self.classification_loss_weight * self.calc_classification_loss(
            t_pid, pred_id)

        full_payload = tf.concat(
            [energy_loss, position_loss, timing_loss, classification_loss],
            axis=-1)

        if self.payload_beta_clip > 0:
            full_payload = tf.where(pred_beta < self.payload_beta_clip, 0.,
                                    full_payload)
            #clip not weight, so there is no gradient to push below threshold!

        is_spectator = tf.zeros_like(
            pred_beta
        )  #not used right now, and likely never again (if the truth remains ok)

        att, rep, noise, min_b, payload, exceed_beta = oc_loss(
            x=pred_ccoords,
            beta=pred_beta,
            truth_indices=t_idx,
            row_splits=rowsplits,
            is_spectator=is_spectator,
            payload_loss=full_payload,
            Q_MIN=q_min,
            S_B=self.s_b,
            distance_scale=pred_distscale,
            energyweights=energy_weights,
            use_average_cc_pos=self.use_average_cc_pos,
            payload_rel_threshold=self.payload_rel_threshold,
            cont_beta_loss=self.cont_beta_loss,
            prob_repulsion=self.prob_repulsion,
            phase_transition=self.phase_transition > 0.,
            phase_transition_double_weight=self.phase_transition_double_weight,
            alt_potential_norm=self.alt_potential_norm,
            payload_beta_gradient_damping_strength=self.
            payload_beta_gradient_damping_strength,
            kalpha_damping_strength=self.kalpha_damping_strength,
            beta_gradient_damping=self.beta_gradient_damping,
            repulsion_q_min=self.repulsion_q_min,
            super_repulsion=self.super_repulsion)

        att *= self.potential_scaling
        rep *= self.potential_scaling * self.repulsion_scaling
        min_b *= self.beta_loss_scale
        noise *= self.noise_scaler
        exceed_beta *= self.too_much_beta_scale

        #unscaled should be well in range < 1.
        att = self.softclip(att, self.potential_scaling)
        rep = self.softclip(rep,
                            self.potential_scaling * self.repulsion_scaling)
        #min_b = self.softclip(min_b, 5.)  # not needed, limited anyway
        #noise = self.softclip(noise, 5.)  # not needed limited to 1 anyway

        energy_loss = payload[0]
        pos_loss = payload[1]
        time_loss = payload[2]
        class_loss = payload[3]

        #explicit cc damping
        ccdamp = self.cc_damping_strength * (
            0.02 *
            tf.reduce_mean(pred_ccoords))**4  # gently keep them around 0

        lossval = att + rep + min_b + noise + energy_loss + pos_loss + time_loss + class_loss + exceed_beta + ccdamp

        lossval = tf.reduce_mean(lossval)

        #loss should be <1 pretty quickly in most cases; avoid very hard hits from high LRs shooting to the moon

        if self.print_time:
            print('loss layer', self.name, 'took',
                  int((time.time() - start_time) * 100000.) / 100., 'ms')
            print('loss layer info:', self.name, 'batch took',
                  int((time.time() - self.loc_time) * 100000.) / 100., 'ms',
                  'for',
                  len(rowsplits.numpy()) - 1, 'batch elements')
            self.loc_time = time.time()

        if self.print_loss:
            minbtext = 'min_beta_loss'
            if self.phase_transition > 0:
                minbtext = 'phase transition loss'
                print('avg beta', tf.reduce_mean(pred_beta))
            print('loss',
                  lossval.numpy(), 'attractive_loss', att.numpy(), 'rep_loss',
                  rep.numpy(), minbtext, min_b.numpy(), 'noise_loss',
                  noise.numpy(), 'energy_loss', energy_loss.numpy(),
                  'pos_loss', pos_loss.numpy(),
                  'time_loss', time_loss.numpy(), 'class_loss',
                  class_loss.numpy(), 'ccdamp', ccdamp.numpy(), '\n')

        return lossval
Ejemplo n.º 6
0
    def loss(self, inputs):

        start_time = 0
        if self.print_time:
            start_time = time.time()


        pred_beta, pred_ccoords, pred_energy, pred_pos, pred_time, pred_id,\
        t_idx, t_energy, t_pos, t_time, t_pid,\
        rowsplits = inputs

        if rowsplits.shape[0] is None:
            return tf.constant(0, dtype='float32')

        #these are the weights
        energy_weights = t_time

        #pz
        energy_loss = self.energy_loss_weight * self.calc_energy_loss(
            t_energy, pred_energy)

        #px py
        position_loss = self.position_loss_weight * self.calc_position_loss(
            t_pos, pred_pos)

        #0
        timing_loss = self.timing_loss_weight * self.calc_timing_loss(
            t_time, pred_time)

        #0
        classification_loss = self.classification_loss_weight * self.calc_classification_loss(
            t_pid, pred_id)

        full_payload = tf.concat([energy_loss, position_loss], axis=-1)

        is_spectator = tf.zeros_like(
            pred_beta
        )  #not used right now, and likely never again (if the truth remains ok)

        att, rep, noise, min_b, payload, exceed_beta = oc_loss(
            x=pred_ccoords,
            beta=pred_beta,
            truth_indices=t_idx,
            row_splits=rowsplits,
            is_spectator=is_spectator,
            payload_loss=full_payload,
            Q_MIN=self.q_min,
            S_B=self.s_b,
            energyweights=energy_weights,
            use_average_cc_pos=self.use_average_cc_pos,
            payload_rel_threshold=self.payload_rel_threshold,
            cont_beta_loss=self.cont_beta_loss,
            prob_repulsion=self.prob_repulsion,
            phase_transition=self.phase_transition > 0.,
            alt_potential_norm=self.alt_potential_norm)

        att *= self.potential_scaling
        rep *= self.potential_scaling * self.repulsion_scaling
        min_b *= self.beta_loss_scale
        noise *= self.noise_scaler
        exceed_beta *= self.too_much_beta_scale

        energy_loss = tf.debugging.check_numerics(payload[0],
                                                  "pz loss has NaN")
        pos_loss = tf.debugging.check_numerics(payload[1],
                                               "px py loss has NaN")

        lossval = att + rep + min_b + noise + energy_loss + pos_loss + exceed_beta

        lossval = tf.debugging.check_numerics(lossval, "loss has nan")
        lossval = tf.reduce_mean(lossval)

        if self.print_time:
            print('loss layer', self.name, 'took',
                  int((time.time() - start_time) * 100000.) / 100., 'ms')
            print('loss layer info:', self.name, 'batch took',
                  int((time.time() - self.loc_time) * 100000.) / 100., 'ms')
            self.loc_time = time.time()

        if self.print_loss:
            minbtext = 'min_beta_loss'
            if self.phase_transition > 0:
                minbtext = 'phase transition loss'
                print('avg beta', tf.reduce_mean(pred_beta))
            print('loss',
                  lossval.numpy(), 'attractive_loss', att.numpy(), 'rep_loss',
                  rep.numpy(), minbtext, min_b.numpy(), 'noise_loss',
                  noise.numpy(), 'pz', energy_loss.numpy(), 'px py',
                  pos_loss.numpy(), 'exceed_beta', exceed_beta.numpy(), '\n')

        return lossval
Ejemplo n.º 7
0
    def loss(self, inputs):

        start_time = 0
        if self.print_time:
            start_time = time.time()


        pred_beta, pred_ccoords, pred_energy, pred_pos, pred_time, pred_id,\
        t_idx, t_energy, t_pos, t_time, t_pid,\
        rowsplits = inputs

        pred_beta = tf.debugging.check_numerics(pred_beta,
                                                "pred_beta has NaNs")
        pred_ccoords = tf.debugging.check_numerics(pred_ccoords,
                                                   "pred_ccoords has NaNs")
        pred_energy = tf.debugging.check_numerics(pred_energy,
                                                  "pred_energy has NaNs")
        pred_pos = tf.debugging.check_numerics(pred_pos, "pred_pos has NaNs")
        pred_time = tf.debugging.check_numerics(pred_time,
                                                "pred_time has NaNs")
        pred_id = tf.debugging.check_numerics(pred_id, "pred_id has NaNs")

        if rowsplits.shape[0] is None:
            return tf.constant(0, dtype='float32')

        energy_weights = self.calc_energy_weights(t_energy)
        if not self.use_energy_weights:
            energy_weights = tf.zeros_like(energy_weights) + 1.

        t_energy = tf.debugging.check_numerics(t_energy, "t_energy has NaNs")
        #t_energy = tf.where(t_energy<=0,0.,t_energy)
        pred_energy = tf.debugging.check_numerics(pred_energy,
                                                  "pred_energy has NaNs")
        energy_weights = tf.debugging.check_numerics(
            energy_weights, "energy_weights has NaNs")

        #also kill any gradients for zero weight
        energy_loss = self.energy_loss_weight * self.calc_energy_loss(
            t_energy, pred_energy)
        position_loss = self.position_loss_weight * self.calc_position_loss(
            t_pos, pred_pos)
        timing_loss = self.timing_loss_weight * self.calc_timing_loss(
            t_time, pred_time)
        classification_loss = self.classification_loss_weight * self.calc_classification_loss(
            t_pid, pred_id)

        full_payload = tf.concat(
            [energy_loss, position_loss, timing_loss, classification_loss],
            axis=-1)

        if self.payload_beta_clip > 0:
            full_payload = tf.where(pred_beta < self.payload_beta_clip, 0.,
                                    full_payload)
            #clip not weight, so there is no gradient to push below threshold!

        is_spectator = tf.zeros_like(
            pred_beta
        )  #not used right now, and likely never again (if the truth remains ok)

        att, rep, noise, min_b, payload, exceed_beta = oc_loss(
            x=pred_ccoords,
            beta=pred_beta,
            truth_indices=t_idx,
            row_splits=rowsplits,
            is_spectator=is_spectator,
            payload_loss=full_payload,
            Q_MIN=self.q_min,
            S_B=self.s_b,
            energyweights=energy_weights,
            use_average_cc_pos=self.use_average_cc_pos,
            payload_rel_threshold=self.payload_rel_threshold,
            cont_beta_loss=self.cont_beta_loss,
            prob_repulsion=self.prob_repulsion,
            phase_transition=self.phase_transition > 0.,
            phase_transition_double_weight=self.phase_transition_double_weight,
            alt_potential_norm=self.alt_potential_norm,
            cut_payload_beta_gradient=self.cut_payload_beta_gradient,
            kalpha_damping_strength=self.kalpha_damping_strength)

        energy_loss = tf.debugging.check_numerics(att, "att loss has NaNs")
        energy_loss = tf.debugging.check_numerics(rep, "rep loss has NaNs")
        energy_loss = tf.debugging.check_numerics(min_b, "min_b loss has NaNs")
        energy_loss = tf.debugging.check_numerics(noise, "noise loss has NaNs")

        att *= self.potential_scaling
        rep *= self.potential_scaling * self.repulsion_scaling
        min_b *= self.beta_loss_scale
        noise *= self.noise_scaler
        exceed_beta *= self.too_much_beta_scale

        energy_loss = tf.debugging.check_numerics(payload[0],
                                                  "energy loss has NaNs")
        pos_loss = tf.debugging.check_numerics(payload[1],
                                               "position loss has NaNs")
        time_loss = tf.debugging.check_numerics(payload[2],
                                                "time loss has NaNs")
        class_loss = tf.debugging.check_numerics(
            payload[3], "classification loss has NaNs")

        lossval = att + rep + min_b + noise + energy_loss + pos_loss + time_loss + class_loss + exceed_beta

        lossval = tf.debugging.check_numerics(lossval, "loss has nan")
        lossval = tf.reduce_mean(lossval)

        if self.print_time:
            print('loss layer', self.name, 'took',
                  int((time.time() - start_time) * 100000.) / 100., 'ms')
            print('loss layer info:', self.name, 'batch took',
                  int((time.time() - self.loc_time) * 100000.) / 100., 'ms')
            self.loc_time = time.time()

        if self.print_loss:
            minbtext = 'min_beta_loss'
            if self.phase_transition > 0:
                minbtext = 'phase transition loss'
                print('avg beta', tf.reduce_mean(pred_beta))
            print('loss',
                  lossval.numpy(), 'attractive_loss', att.numpy(), 'rep_loss',
                  rep.numpy(), minbtext, min_b.numpy(), 'noise_loss',
                  noise.numpy(), 'energy_loss', energy_loss.numpy(),
                  'pos_loss', pos_loss.numpy(), 'time_loss', time_loss.numpy(),
                  'class_loss', class_loss.numpy(), 'exceed_beta',
                  exceed_beta.numpy(), '\n')

        return lossval