Beispiel #1
0
    def was_described(self,by=None,as_being=None,at_time=None,while_ignoring=0,haste=False,at_x=[-1],at_y=[-1],online=True,record=True,realize_confusion=True,laplace_smoothing=0):

        # TODO: CPD: make likelihood into an attribute?  if so, I need to
        # probably wipe it here to avoid silent bugs (likelihood not updating,
        # etc)

        # Rename some variables:
        a_few_at_the_start = while_ignoring

        if online:
            # Update agent:
            by.N += 1

        if by==None or as_being==None or by.kind == 'banned':
            pass

        # Optional: skip straight past inactive subjects.
        # It would be nice to warn the user that inactive subjects
        # should not be being classified:  this can happen if the
        # subject has not been cleanly retired  in the Zooniverse
        # database. However, this leads to a huge  stream of warnings,
        # so best not do that... Note also that training subjects
        # cannot go inactive - but we need to ignore classifications of
        # them after they cross threshold, for the training set to
        # be useful in giving us the selection function. What you see in
        # the trajectory plot is the *status* of the training subjects,
        # not their *state*.

        elif haste and (     self.state == 'inactive' \
                         or self.status == 'detected' \
                         or self.status == 'rejected' ):

                # print "SWAP: WARNING: subject "+self.ID+" is inactive, but appears to have been just classified"
                pass

        else:

            # update the annotation history
            if record:
                as_being_dict = {'LENS': 1, 'NOT': 0}
                self.annotationhistory['Name'] = np.append(self.annotationhistory['Name'], by.name)
                self.annotationhistory['ItWas'] = np.append(self.annotationhistory['ItWas'], as_being_dict[as_being])
                self.annotationhistory['At_X'].append(at_x)
                self.annotationhistory['At_Y'].append(at_y)
                self.annotationhistory['PL'] = np.append(self.annotationhistory['PL'], by.PL)
                self.annotationhistory['PD'] = np.append(self.annotationhistory['PD'], by.PD)
                self.annotationhistory['At_Time'].append(at_time)

        # Deal with active subjects. Ignore the classifier until they
        # have seen NT > a_few_at_the_start (ie they've had a
        # certain amount of training - at least one training image, for example):

            if by.NT > a_few_at_the_start:

                # Calculate likelihood for all Ntrajectory trajectories, generating as many binomial deviates
                if realize_confusion:

                    PL_realization=by.get_PL_realization(Ntrajectory);
                    PD_realization=by.get_PD_realization(Ntrajectory);
                else:
                    PL_realization=np.ones(Ntrajectory) * by.PL
                    PD_realization=np.ones(Ntrajectory) * by.PD
                prior_probability=self.probability*1.0;  # TODO: not used?!

                if as_being == 'LENS':
                    likelihood = PL_realization + laplace_smoothing
                    likelihood /= (PL_realization*self.probability + (1-PD_realization)*(1-self.probability) + 2 * laplace_smoothing)
                    as_being_number = 1

                elif as_being == 'NOT':
                    likelihood = (1-PL_realization) + laplace_smoothing
                    likelihood /= ((1-PL_realization)*self.probability + PD_realization*(1-self.probability) + 2 * laplace_smoothing)
                    as_being_number = 0

                else:
                    raise Exception("Unrecognised classification result: "+as_being)

                if online:
                    # Update subject:
                    self.probability = likelihood*self.probability
                    idx=np.where(self.probability < swap.pmin)
                    self.probability[idx]=swap.pmin
                    #if self.probability < swap.pmin: self.probability = swap.pmin
                    posterior_probability=self.probability*1.0;

                    self.trajectory = np.append(self.trajectory,self.probability)

                    self.exposure += 1

                    self.update_state(at_time)

                    # Update agent - training history is taken care of in agent.heard(),
                    # which also keeps agent.skill up to date.
                    if self.kind == 'test' and record:

                         by.testhistory['ID'] = np.append(by.testhistory['ID'], self.ID)
                         by.testhistory['I'] = np.append(by.testhistory['I'], swap.informationGain(self.mean_probability, by.PL, by.PD, as_being))
                         by.testhistory['Skill'] = np.append(by.testhistory['Skill'], by.skill)
                         by.testhistory['ItWas'] = np.append(by.testhistory['ItWas'], as_being_number)
                         by.testhistory['At_Time'] = np.append(by.testhistory['At_Time'], at_time)
                         by.contribution += by.skill

                else:
                    # offline
                    return likelihood


            else:
                # Still advance exposure, even if by.NT <= ignore:
                # it would be incorrect to calculate mean classns/retirement
                # different from strict and alt-strict:
                self.exposure += 1

        return
Beispiel #2
0
    def was_described(self,by=None,as_being=None,at_time=None,while_ignoring=0,haste=False,at_x=[-1],at_y=[-1]):

        # Rename some variables:
        a_few_at_the_start = while_ignoring

        # Update agent:
        by.N += 1

        if by==None or as_being==None:
            pass

        # Optional: skip straight past inactive subjects.
        # It would be nice to warn the user that inactive subjects
        # should not be being classified:  this can happen if the
        # subject has not been cleanly retired  in the Zooniverse
        # database. However, this leads to a huge  stream of warnings,
        # so best not do that... Note also that training subjects
        # cannot go inactive - but we need to ignore classifications of
        # them after they cross threshold, for the training set to
        # be useful in giving us the selection function. What you see in
        # the trajectory plot is the *status* of the training subjects,
        # not their *state*.

        elif haste and (     self.state == 'inactive' \
                         or self.status == 'detected' \
                         or self.status == 'rejected' ):

                # print "SWAP: WARNING: subject "+self.ID+" is inactive, but appears to have been just classified"
                pass

        else:

        # Deal with active subjects. Ignore the classifier until they
        # have seen NT > a_few_at_the_start (ie they've had a
        # certain amount of training - at least one training image, for example):
            
            if by.NT > a_few_at_the_start:
               
                # Calculate likelihood for all Ntrajectory trajectories, generating as many binomial deviates
                PL_realization=by.get_PL_realization(Ntrajectory);
                PD_realization=by.get_PD_realization(Ntrajectory);
                prior_probability=self.probability*1.0;

                if as_being == 'LENS':
                    likelihood = PL_realization
                    likelihood /= (PL_realization*self.probability + (1-PD_realization)*(1-self.probability))
                    as_being_number = 1

                elif as_being == 'NOT':
                    likelihood = (1-PL_realization)
                    likelihood /= ((1-PL_realization)*self.probability + PD_realization*(1-self.probability))
                    as_being_number = 0

                else:
                    raise Exception("Unrecognised classification result: "+as_being)

                # Update subject:
                self.probability = likelihood*self.probability
                idx=np.where(self.probability < swap.pmin)
                self.probability[idx]=swap.pmin
                #if self.probability < swap.pmin: self.probability = swap.pmin
                posterior_probability=self.probability*1.0;

                self.trajectory = np.append(self.trajectory,self.probability)

                self.exposure += 1

                # Update median probability
                self.mean_probability=10.0**(sum(np.log10(self.probability))/Ntrajectory)
                self.median_probability=np.sort(self.probability)[Ntrajectory/2]

                # Should we count it as a detection, or a rejection?
                # Only test subjects get de-activated:

                if self.mean_probability < self.rejection_threshold:
                    self.status = 'rejected'
                    if self.kind == 'test':
                        self.state = 'inactive'
                        self.retirement_time = at_time
                        self.retirement_age = self.exposure

                elif self.mean_probability > self.detection_threshold:
                    self.status = 'detected'
                    if self.kind == 'test':
                        # Let's keep the detections live!
                        #   self.state = 'inactive'
                        #   self.retirement_time = at_time
                        #   self.retirement_age = self.exposure
                        pass

                else:
                    # Keep the subject alive! This code is only reached if
                    # we are not being hasty.
                    self.status = 'undecided'
                    if self.kind == 'test':
                        self.state = 'active'
                        self.retirement_time = 'not yet'
                        self.retirement_age = 0.0

                # Update agent - training history is taken care of in agent.heard(), 
                # which also keeps agent.skill up to date.
                if self.kind == 'test':

                     by.testhistory['ID'] = np.append(by.testhistory['ID'], self.ID)
                     by.testhistory['I'] = np.append(by.testhistory['I'], swap.informationGain(self.mean_probability, by.PL, by.PD, as_being))
                     by.testhistory['Skill'] = np.append(by.testhistory['Skill'], by.skill)
                     by.testhistory['ItWas'] = np.append(by.testhistory['ItWas'], as_being_number)
                     by.contribution += by.skill

                # update the annotation history
                self.annotationhistory['Name'] = np.append(self.annotationhistory['Name'], by.name)
                self.annotationhistory['ItWas'] = np.append(self.annotationhistory['ItWas'], as_being_number)
                self.annotationhistory['At_X'].append(at_x)
                self.annotationhistory['At_Y'].append(at_y)
                self.annotationhistory['PL'] = np.append(self.annotationhistory['PL'], by.PL)
                self.annotationhistory['PD'] = np.append(self.annotationhistory['PD'], by.PD)

            else:
                # Still advance exposure, even if by.NT <= ignore:
                # it would be incorrect to calculate mean classns/retirement
                # different from strict and alt-strict:
                self.exposure += 1

        return
Beispiel #3
0
    def was_described(self,
                      by=None,
                      as_being=None,
                      at_time=None,
                      while_ignoring=0,
                      haste=False,
                      at_x=[-1],
                      at_y=[-1],
                      online=True,
                      record=True,
                      realize_confusion=True):

        # TODO: CPD: make likelihood into an attribute?  if so, I need to
        # probably wipe it here to avoid silent bugs (likelihood not updating,
        # etc)

        # Rename some variables:
        a_few_at_the_start = while_ignoring

        if online:
            # Update agent:
            by.N += 1

        if by == None or as_being == None or by.kind == 'banned':
            pass

        # Optional: skip straight past inactive subjects.
        # It would be nice to warn the user that inactive subjects
        # should not be being classified:  this can happen if the
        # subject has not been cleanly retired  in the Zooniverse
        # database. However, this leads to a huge  stream of warnings,
        # so best not do that... Note also that training subjects
        # cannot go inactive - but we need to ignore classifications of
        # them after they cross threshold, for the training set to
        # be useful in giving us the selection function. What you see in
        # the trajectory plot is the *status* of the training subjects,
        # not their *state*.

        elif haste and (     self.state == 'inactive' \
                         or self.status == 'detected' \
                         or self.status == 'rejected' ):

            # print "SWAP: WARNING: subject "+self.ID+" is inactive, but appears to have been just classified"
            pass

        else:

            # update the annotation history
            if record:
                as_being_dict = {'LENS': 1, 'NOT': 0}
                self.annotationhistory['Name'] = np.append(
                    self.annotationhistory['Name'], by.name)
                self.annotationhistory['ItWas'] = np.append(
                    self.annotationhistory['ItWas'], as_being_dict[as_being])
                self.annotationhistory['At_X'].append(at_x)
                self.annotationhistory['At_Y'].append(at_y)
                self.annotationhistory['PL'] = np.append(
                    self.annotationhistory['PL'], by.PL)
                self.annotationhistory['PD'] = np.append(
                    self.annotationhistory['PD'], by.PD)
                self.annotationhistory['At_Time'].append(at_time)

        # Deal with active subjects. Ignore the classifier until they
        # have seen NT > a_few_at_the_start (ie they've had a
        # certain amount of training - at least one training image, for example):

            if by.NT > a_few_at_the_start:

                # Calculate likelihood for all Ntrajectory trajectories, generating as many binomial deviates
                if realize_confusion:

                    PL_realization = by.get_PL_realization(Ntrajectory)
                    PD_realization = by.get_PD_realization(Ntrajectory)
                else:
                    PL_realization = np.ones(Ntrajectory) * by.PL
                    PD_realization = np.ones(Ntrajectory) * by.PD
                prior_probability = self.probability * 1.0
                # TODO: not used?!

                if as_being == 'LENS':
                    likelihood = PL_realization
                    likelihood /= (PL_realization * self.probability +
                                   (1 - PD_realization) *
                                   (1 - self.probability))
                    as_being_number = 1

                elif as_being == 'NOT':
                    likelihood = (1 - PL_realization)
                    likelihood /= ((1 - PL_realization) * self.probability +
                                   PD_realization * (1 - self.probability))
                    as_being_number = 0

                else:
                    raise Exception("Unrecognised classification result: " +
                                    as_being)

                if online:
                    # Update subject:
                    self.probability = likelihood * self.probability
                    idx = np.where(self.probability < swap.pmin)
                    self.probability[idx] = swap.pmin
                    #if self.probability < swap.pmin: self.probability = swap.pmin
                    posterior_probability = self.probability * 1.0

                    self.trajectory = np.append(self.trajectory,
                                                self.probability)

                    self.exposure += 1

                    self.update_state(at_time)

                    # Update agent - training history is taken care of in agent.heard(),
                    # which also keeps agent.skill up to date.
                    if self.kind == 'test' and record:

                        by.testhistory['ID'] = np.append(
                            by.testhistory['ID'], self.ID)
                        by.testhistory['I'] = np.append(
                            by.testhistory['I'],
                            swap.informationGain(self.mean_probability, by.PL,
                                                 by.PD, as_being))
                        by.testhistory['Skill'] = np.append(
                            by.testhistory['Skill'], by.skill)
                        by.testhistory['ItWas'] = np.append(
                            by.testhistory['ItWas'], as_being_number)
                        by.testhistory['At_Time'] = np.append(
                            by.testhistory['At_Time'], at_time)
                        by.contribution += by.skill

                else:
                    # offline
                    return likelihood

            else:
                # Still advance exposure, even if by.NT <= ignore:
                # it would be incorrect to calculate mean classns/retirement
                # different from strict and alt-strict:
                self.exposure += 1

        return