コード例 #1
0
ファイル: rule.py プロジェクト: jdiec/simple_gui
 def permissable_change(self, old, new):
     b = (abs(utility.mean(old, self.get_strength) - self.mean) >
          abs(utility.mean(new, self.get_strength) - self.mean))
     if self.check(new) and not b:
         # return 2 here so that caller can distinquish if they
         # care that we have "worsened" but are still within
         # tolerance
         return 2
     else:
         return b
コード例 #2
0
 def permissable_change(self, old, new):
     b = (abs(utility.mean(old, self.get_strength) - self.mean) >
          abs(utility.mean(new, self.get_strength) - self.mean))
     if self.check(new) and not b:
         # return 2 here so that caller can distinquish if they
         # care that we have "worsened" but are still within
         # tolerance
         return 2
     else:
         return b
コード例 #3
0
 def simulate(self):
     for job in self.design:
         self.setJobParameters(job)
         job_outputs = collections.OrderedDict()  # a dictionary accessed by output variable name
         # initialize empty lists to track repetition outputs for each output variable
         for output in self.output_getters:
             job_outputs[output] = []
         for i in range(self.job_repetitions):
             self.simInitFunc()
             while not self.simStopFunc():
                 self.simStepFunc()
             outputs = self.getOutputs()
             for output in outputs:
                 job_outputs[output].append(outputs[output])
             self.outputFile.write("\n")
             self.fileWriteOutputs(outputs)
             
         # write statistics to file
         averages = collections.OrderedDict()
         stddevs = collections.OrderedDict()
         for variable in job_outputs:
             averages[variable] = U.mean(job_outputs[variable])
             stddevs[variable] = U.standardDeviation(job_outputs[variable])
         self.output("\naverages: ")
         self.fileWriteOutputs(averages)
         self.output("\nstandard deviations: ")
         self.fileWriteOutputs(stddevs)
コード例 #4
0
ファイル: experiment.py プロジェクト: strangeintp/common
    def simulate(self):
        for job in self.design:
            self.setJobParameters(job)
            job_outputs = collections.OrderedDict()  # a dictionary accessed by output variable name
            # initialize empty lists to track repetition outputs for each output variable
            for output in self.output_getters:
                job_outputs[output] = []
            for i in range(self.job_repetitions):
                self.datetime = U.getTimeStampString()
                self.initiateSim()
                while not self.stopSim():
                    self.stepSim()
                outputs = self.getOutputs()
                for output in outputs:
                    job_outputs[output].append(outputs[output])
                self.outputFile.write("\n %s, %d"%(self.datetime, job["job_id"]))
                self.fileWriteJobParameters(job)
                self.fileWriteOutputs(outputs)

            # write statistics to file
            averages = collections.OrderedDict()
            stddevs = collections.OrderedDict()
            for variable in job_outputs:
                averages[variable] = U.mean(job_outputs[variable])
                stddevs[variable] = U.popStdDev(job_outputs[variable])
            self.summary_avgs[job["job_id"]] = averages
            self.summary_stds[job["job_id"]] = stddevs
            self.output("\naverages: ")
            self.fileWriteJobParameters(job)
            self.fileWriteOutputs(averages)
            self.output("\nstandard deviations: ")
            self.fileWriteJobParameters(job)
            self.fileWriteOutputs(stddevs)
コード例 #5
0
    def simulate(self):
        for job in self.design:
            self.setJobParameters(job)
            job_outputs = collections.OrderedDict(
            )  # a dictionary accessed by output variable name
            # initialize empty lists to track repetition outputs for each output variable
            for output in self.output_getters:
                job_outputs[output] = []
            for i in range(self.job_repetitions):
                self.initiateSim()
                while not self.stopSim():
                    self.stepSim()
                outputs = self.getOutputs()
                for output in outputs:
                    job_outputs[output].append(outputs[output])
                self.outputFile.write("\n %d" % job["job_id"])
                self.fileWriteJobParameters(job)
                self.fileWriteOutputs(outputs)

            # write statistics to file
            averages = collections.OrderedDict()
            stddevs = collections.OrderedDict()
            for variable in job_outputs:
                averages[variable] = U.mean(job_outputs[variable])
                stddevs[variable] = U.popStdDev(job_outputs[variable])
            self.summary_avgs[job["job_id"]] = averages
            self.summary_stds[job["job_id"]] = stddevs
            self.output("\naverages: ")
            self.fileWriteJobParameters(job)
            self.fileWriteOutputs(averages)
            self.output("\nstandard deviations: ")
            self.fileWriteJobParameters(job)
            self.fileWriteOutputs(stddevs)
コード例 #6
0
ファイル: rule.py プロジェクト: jdiec/simple_gui
    def _init(self, attribute, course, value = 'all', weight = None, tol = None,
                 **kwargs):
        self.mean = utility.mean(course.students, self.get_strength)
        std = utility.std(course.students, self.get_strength)

        if not tol:
            # default to tolerance of half a standard deviation
            tol = .5
        self.tol = std*tol
コード例 #7
0
ファイル: games.py プロジェクト: strangeintp/game-theory
 def computeMetrics(self):
     scores = [agent.score for agent in self.agent_set]
     self.mean = u.mean(scores)
     self.std = u.popStdDev(scores)
     self.minscore = min(scores)
     self.medscore = u.median(scores)
     self.maxscore =max(scores)
     agents_sorted = sorted(self.agent_set, key=lambda agent:agent.score)
     self.minscoreHfraction = agents_sorted[0].getHFraction()
     self.maxscoreHfraction = agents_sorted[-1].getHFraction()
     self.medscoreHfraction = agents_sorted[int(len(self.agent_set)/2)].getHFraction()
     hfractions = [agent.getHFraction() for agent in self.agent_set]
     self.medHfraction = u.median(hfractions)
     self.avgHfraction = u.mean(hfractions)
     self.minHfraction = min(hfractions)
     self.maxHfraction = max(hfractions)
     self.sumHfractions = sum(hfractions)
     dhfractions = [abs(hfractions[i] - self.oldHfractions[i]) for i in range(len(hfractions))]
     self.sumdH = sum(dhfractions)
コード例 #8
0
    def _fix(self, student, groups, students):
        group = student.group
        if (student, utility.mean(group, self.get_strength) - self.mean) > 0:
            test = lambda x: utility.mean(x, self.get_strength) < self.mean
        else:
            test = lambda x: utility.mean(x, self.get_strength) > self.mean

        targets = filter(lambda g: test(g), groups)

        short_list = filter(
            lambda g: abs(utility.mean(g, self.get_strength) - self.mean) >
            self.tol, targets)

        try:
            if find_target_and_swap(student, short_list):
                return True
            elif find_target_and_swap(student, targets):
                return True
            elif find_target_and_swap(student, groups):
                return True
        except SwapButNotFix:
            return False
コード例 #9
0
ファイル: rule.py プロジェクト: jdiec/simple_gui
    def _fix(self, student, groups, students):
        group = student.group
        if (student,utility.mean(group, self.get_strength) - self.mean) > 0:
            test = lambda x: utility.mean(x, self.get_strength) < self.mean
        else:
            test = lambda x: utility.mean(x, self.get_strength) > self.mean

        targets = filter(lambda g: test(g), groups)

        short_list = filter(lambda g: abs(utility.mean(g,
                                                       self.get_strength) -
                                          self.mean) > self.tol, targets)  

        try:
            if find_target_and_swap(student, short_list):
                return True
            elif find_target_and_swap(student, targets):
                return True
            elif find_target_and_swap(student, groups):
                return True
        except SwapButNotFix:
            return False
コード例 #10
0
def calculatePacking():
    num_neighbors = []
    for agent in world.agents:
        x=agent.x
        y=agent.y
        neighbors = 0
        for dx in [-1,0,1]:
            for dy in [-1,0,1]:
                if 0<=x+dx<width and 0<=y+dy<height:
                    neighbors += len(world.patch_at[(x+dx,y+dy)].agents_here)
        neighbors -= 1 # need to decrement where the agent is
        num_neighbors.append(neighbors)
    return U.mean(num_neighbors)/8
コード例 #11
0
    def _init(self,
              attribute,
              course,
              value='all',
              weight=None,
              tol=None,
              **kwargs):
        self.mean = utility.mean(course.students, self.get_strength)
        std = utility.std(course.students, self.get_strength)

        if not tol:
            # default to tolerance of half a standard deviation
            tol = .5
        self.tol = std * tol
コード例 #12
0
 def regrowth(self):
     hoovers = []
     for x in range(L.width):
         for y in range(L.width):
             if self.foraging_resources[(x,y)] <= 0 :
                 # landscape resources can't actually go to zero or it won't regrow
                 self.foraging_resources[(x,y)] = resource_zero 
             self.foraging_resources[(x,y)] *= self.regrowth_rate[x,y]
             if self.foraging_resources[(x,y)] > self.max_foraging_resources[(x,y)]:
                 self.foraging_resources[(x,y)] = self.max_foraging_resources[(x,y)]
             #compute the local hoover index
             residents = self.getNeighborsAround2((x,y), radius=1)
             stored_amounts = []
             for resident in residents:
                 stored_amounts.append(resident.food_storage)
             local_hoover = U.HooverIndex(stored_amounts)
             hoovers.append(local_hoover)
     self.max_hoover.append(max(hoovers))
     self.avg_hoover.append(U.mean(hoovers))
コード例 #13
0
 def regrowth(self):
     hoovers = []
     for x in range(L.width):
         for y in range(L.width):
             if self.foraging_resources[(x, y)] <= 0:
                 # landscape resources can't actually go to zero or it won't regrow
                 self.foraging_resources[(x, y)] = resource_zero
             self.foraging_resources[(x, y)] *= self.regrowth_rate[x, y]
             if self.foraging_resources[(
                     x, y)] > self.max_foraging_resources[(x, y)]:
                 self.foraging_resources[(
                     x, y)] = self.max_foraging_resources[(x, y)]
             #compute the local hoover index
             residents = self.getNeighborsAround2((x, y), radius=1)
             stored_amounts = []
             for resident in residents:
                 stored_amounts.append(resident.food_storage)
             local_hoover = U.HooverIndex(stored_amounts)
             hoovers.append(local_hoover)
     self.max_hoover.append(max(hoovers))
     self.avg_hoover.append(U.mean(hoovers))
コード例 #14
0
 def avg_pop(self):
     return U.mean(self.theWorld.populations[self.metrics_start:])
コード例 #15
0
def datalogger_to_dict(data_dict, key_dict, data_dir):
    """
    Removes the datalogger entry from cal and data dicts, replacing with new fields for each logged value.

    Parameters:
        cal_dict - Dictionary of CALMIT calibration data
        data_dict - Dicitonary of CALMIT scandata (not cal)
        key_dict - A key dictionary created via create_key_dict()

    Returns:
        data_dict, key_dict - Modified dictionaries.
    """
    data_datalogger = data_dict[key_dict['Data Logger']]
    num_entries = None
    for data_str in data_datalogger:
        split_entry = split_datalogger_entry(data_str)
        if data_str != '':
            num_entries = len(split_entry)
            break

    # If no entries were found, just remove the datalogger field alltogether and move on
    if num_entries is None:
        del data_dict[key_dict['Data Logger']]
        return data_dict

    # Split out all of the data from the datalogger strings. Each var is it's own list.
    data_entries = unzip_dlogger_entries(split_datalogger_entries(data_datalogger), num_entries=num_entries)

    # TODO rename temperature 1 and 2.
    entry_names = ['Battery Voltage', 'Temperature 1', 'Temperature 2']
    if num_entries == 4:
        entry_names.append('Pyronometer')
    elif num_entries == 5:
        # Either Pyronometer then Quantum Sensor or None then Pyronometer.
        if all(float(val) < 0 for val in data_entries[3]) and all(float(val) < 0 for val in data_entries[4]):
            entry_names.extend([None, 'Pyronometer'])
            entry_names[2] = None  # Temperature 2 also becomes None.
        else:
            if mean(data_entries[3]) > mean(data_entries[4]):
                err_str = "\n WARNING: PYRONOMETER VALUES FOUND HIGHER THAN QUANTUM SENSOR. MAYBE " \
                          "UNKNOWN DATALOGGER TYPE {0}. Proceeding anyway. \n".format(data_dir)
                print(err_str)

            entry_names.extend(['Pyronometer', 'Quantum Sensor'])

    elif num_entries == 6 and all(float(val) < 0 for val in data_entries[5]):  # Last value is -99999
        # battery volt, temp1, temp2, Pyronometer, Quantum Sensor, None.
        entry_names.extend(['Pyronometer', 'Quantum Sensor', None])

    elif num_entries == 3:
        # Just battery voltage, temp1, temp2.
        pass

    else:
        # TODO Implement other datalogger types (if there are any others...)
        import pdb
        pdb.set_trace()
        raise NotImplementedError('Unrecognized Datalogger string. Sorry!')

    # Create an entry in the data and cal dicts for the split datalogger data.
    for name in entry_names:
        if name is not None:
            key_dict[name] = name  # Add this to the key dict, for consistency (other functs rely on it).
            data_dict[name] = []

    # Add the data to the data dict
    for name, values in zip(entry_names, data_entries):
        if name is not None:
            # Check for a list of nodata.
            unique_vals = [val for val in values if val != '']
            unique_vals = set(values)
            # Datalogger should not have negative values.
            if all(float(val) < 0 for val in unique_vals):
                # Don't add to the data_dict.
                pass
            else:
                data_dict[name] = []
                for value in values:
                    # We assume DL values less than 0 are bad/nodata values.
                    if value == '':
                        data_dict[name].append('-9999')
                    elif float(value) < 0:
                        # TODO standardize nodata value. For now, use -9999
                        data_dict[name].append('-9999')
                    elif name in {'Temperature 1', 'Temperature 2'} and float(value) > 250:
                        data_dict[name].append('-9999')
                    else:
                        data_dict[name].append(value)

    del data_dict[key_dict['Data Logger']]
    return data_dict
コード例 #16
0
 def avg_shared(self):
     return  U.mean(self.theWorld.food_shared[self.metrics_start:])
コード例 #17
0
 def avg_median_stored(self):
     return  U.mean(self.theWorld.median_storage[self.metrics_start:])
コード例 #18
0
 def adult_avg_life(self):
     return  U.mean(self.theWorld.adult_ages_at_death)
コード例 #19
0
 def stepSim(self):
     self.time += 1
     self.theWorld.step()
     self.max_prestige.append(max(self.theWorld.hh_prestige))
     self.avg_prestige.append(U.mean(self.theWorld.hh_prestige))
コード例 #20
0
 def avg_shared(self):
     return U.mean(self.theWorld.food_shared[self.metrics_start:])
コード例 #21
0
 def avg_median_stored(self):
     return U.mean(self.theWorld.median_storage[self.metrics_start:])
コード例 #22
0
 def adult_avg_life(self):
     return U.mean(self.theWorld.adult_ages_at_death)
コード例 #23
0
 def computeWealthMetrics(self):
     self.median_storage.append(U.median(self.hh_food_stored))
     self.avg_food_stored.append(U.mean(self.hh_food_stored))
コード例 #24
0
    def step(self):
        emptyhouses = []
        self.food_shared_step = 0

        #activation order
        if F.homogeneous(
        ):  # completely random activation order if homogeneous foraging abilities
            rnd.shuffle(self.households)
        else:  # activate based on foraging ability with some randomness
            activation_order = lambda hh: hh.foragingAbility(
            ) * U.GenBoundedRandomNormal(1, 0.2, 0.5, 1.5)
            self.households = sorted(self.households,
                                     key=lambda hh: activation_order(hh))
        avg_x = 0
        avg_y = 0
        self.population = 0
        self.kinship_spans = []
        self.hh_food_stored = []
        self.pop_expertise = []
        self.hh_prestige = []

        self.brn_sharing.append(0)
        self.grn_sharing.append(0)
        self.com_sharing.append(0)
        for hh in self.households:
            hh.step()

            if hh.hasDied():
                emptyhouses.append(hh)
                self.tot_hh_age += hh.age
                self.dead_houses += 1
            else:
                self.population += hh.size()
                x, y = self.hh_locations[hh]
                avg_x += x
                avg_y += y
                for member in hh.members():
                    self.kinship_spans.append(member.kinship_span)
                    self.pop_expertise.append(member.innate_foraging_expertise)
                self.hh_prestige.append(hh.prestige())
                self.hh_food_stored.append(hh.food_storage)

        for hh in emptyhouses:
            self.removeHousehold(hh)

        self.regrowth()

        #metrics
        self.avg_hh_age.append(self.tot_hh_age / self.dead_houses)
        #         self.avg_hh_age.append(self.tot_hh_age/len(self.dead_houses))
        if len(self.households) > 0:
            self.avg_hh_size.append(self.population / len(self.households))
        else:
            self.avg_hh_size.append(0)
        self.food_shared.append(self.food_shared_step)
        self.food_shared_total += self.food_shared_step
        self.food_shared_totals.append(self.food_shared_total)

        self.populations.append(self.population)
        self.avg_pop.append(sum(self.populations) / len(self.populations))
        if len(self.populations) < 100:
            self.avg_pop_100.append(
                sum(self.populations) / len(self.populations))
        else:
            self.avg_pop_100.append(sum(self.populations[-100:]) / 100)

        self.avg_ages.append(U.mean(self.ages_at_death))
        self.avg_adult_ages.append(U.mean(self.adult_ages_at_death))
        self.computeWealthMetrics()
コード例 #25
0
ファイル: rule.py プロジェクト: jdiec/simple_gui
 def _check(self, students):
     return abs(utility.mean(students, self.get_strength) - self.mean) < self.tol
コード例 #26
0
 def avg_run_prestige(self):
     return U.mean(self.avg_prestige[self.metrics_start:])
コード例 #27
0
 def _check(self, students):
     return abs(utility.mean(students, self.get_strength) -
                self.mean) < self.tol
コード例 #28
0
 def avg_max_hoover(self):
     return U.mean(self.theWorld.max_hoover[self.metrics_start:])
コード例 #29
0
 def computeWealthMetrics(self):
     self.median_storage.append(U.median(self.hh_food_stored))
     self.avg_food_stored.append(U.mean(self.hh_food_stored))
コード例 #30
0
 def brn_shared(self):
     return U.mean(self.theWorld.brn_sharing[self.metrics_start:])
コード例 #31
0
 def stepSim(self):
     self.time += 1
     self.theWorld.step()
     self.max_prestige.append(max(self.theWorld.hh_prestige))
     self.avg_prestige.append(U.mean(self.theWorld.hh_prestige))
コード例 #32
0
    def step(self):
        emptyhouses = []
        self.food_shared_step = 0
        
        #activation order
        if F.homogeneous():  # completely random activation order if homogeneous foraging abilities
            rnd.shuffle(self.households)
        else:   # activate based on foraging ability with some randomness
            activation_order = lambda hh : hh.foragingAbility()*U.GenBoundedRandomNormal(1, 0.2, 0.5, 1.5)
            self.households = sorted(self.households, key=lambda hh: activation_order(hh))
        avg_x=0
        avg_y=0
        self.population = 0
        self.kinship_spans = []
        self.hh_food_stored = []
        self.pop_expertise = []
        self.hh_prestige = []
        
        self.brn_sharing.append(0)
        self.grn_sharing.append(0)
        self.com_sharing.append(0)
        for hh in self.households:
            hh.step()
            
            if hh.hasDied():
                emptyhouses.append(hh)
                self.tot_hh_age += hh.age
                self.dead_houses += 1
            else:
                self.population += hh.size()
                x, y = self.hh_locations[hh]
                avg_x += x
                avg_y += y
                for member in hh.members():
                    self.kinship_spans.append(member.kinship_span)
                    self.pop_expertise.append(member.innate_foraging_expertise)
                self.hh_prestige.append(hh.prestige())
                self.hh_food_stored.append(hh.food_storage)
        
        for hh in emptyhouses:
            self.removeHousehold(hh)
        
        self.regrowth()
        
        #metrics
        self.avg_hh_age.append(self.tot_hh_age/self.dead_houses)
#         self.avg_hh_age.append(self.tot_hh_age/len(self.dead_houses))
        if len(self.households)>0:
            self.avg_hh_size.append(self.population/len(self.households))
        else :
            self.avg_hh_size.append(0)
        self.food_shared.append(self.food_shared_step)
        self.food_shared_total += self.food_shared_step
        self.food_shared_totals.append(self.food_shared_total)
        
        self.populations.append(self.population)
        self.avg_pop.append(sum(self.populations)/len(self.populations))
        if len(self.populations) < 100:
            self.avg_pop_100.append(sum(self.populations)/len(self.populations))
        else:
            self.avg_pop_100.append(sum(self.populations[-100:])/100)
            
        self.avg_ages.append(U.mean(self.ages_at_death))
        self.avg_adult_ages.append(U.mean(self.adult_ages_at_death))
        self.computeWealthMetrics()
コード例 #33
0
ファイル: metrics.py プロジェクト: philiplessner/FunctionalML
def de_mean(x):
    """translate x by subtracting its mean (so the result has mean 0)"""
    xbar = mean(x)
    return [xi - xbar for xi in x]
コード例 #34
0
 def avg_run_prestige(self):
     return U.mean(self.avg_prestige[self.metrics_start:])
コード例 #35
0
ファイル: metrics.py プロジェクト: philiplessner/FunctionalML
def de_mean(x):
    """translate x by subtracting its mean (so the result has mean 0)"""
    xbar = mean(x)
    return [xi - xbar for xi in x]
コード例 #36
0
 def avg_max_hoover(self):
     return  U.mean(self.theWorld.max_hoover[self.metrics_start:])
コード例 #37
0
def run(input_deck):
    """
    Run GroupEng as specified by input_deck

    Parameters
    ----------
    input_deck: basestring: filename
        Input file specifying class information and grouping rules

    Output
    ------
    Output files determined by Input deck
    """
    dek = parser.read_input(input_deck)

    students = load_classlist(dek['classlist'], dek.get('student_identifier'))
    identifier = students[0].identifier
    course = Course(students, dek['group_size'], dek.get('uneven_size'))

    rules = [make_rule(r, course) for r in dek['rules']]

    balance_rules = filter(lambda x: isinstance(x, Balance), rules)

    groups = group.make_initial_groups(course, balance_rules)

    # Add a rule to distribute phantoms to avoid having more than one phantom
    # per group, put it first so that it is highest priority
    # we have to add this after the phantoms are created by
    # group.make_initial_groups so that it can see the phantoms
    rules = [Distribute(identifier, course, 'phantom')] + rules

    suceeded = apply_rules_list(rules, groups, course.students)

    groups.sort(key=attrgetter('group_number'))

    def failures(r):
        return reduce(lambda x, y: x + (1 - r.check(y)), groups, 0)

    if failures(rules[0]) != 0:
        raise UnevenGroups()

    ############################################################################
    # Output
    ############################################################################

    run_name = os.path.splitext(input_deck)[0]

    outdir = 'groups_{0}_{1}'.format(run_name,
                                     time.strftime('%Y-%m-%d_%H-%M-%S'))

    os.mkdir(outdir)
    os.chdir(outdir)

    def outfile(o):
        return file('{0}_{1}'.format(run_name, o), 'w')

    group_output(groups, outfile('groups.csv'), identifier)
    group_output(groups, outfile('groups.txt'), identifier, sep='\n')
    student_full_output(students, identifier, outfile('classlist.csv'))

    report = outfile('statistics.txt')

    report.write('Ran GroupEng on: {0} with students from {1}\n\n'.format(
        input_deck, dek['classlist']))

    report.write('Made {0} groups\n\n'.format(len(groups)))

    for r in rules[1:]:
        n_fail = failures(r)
        if isinstance(r, Balance):
            group_means = sorted([mean(g, r.get_strength) for g in groups])
            attr = r.attribute
            report.write('{0} groups failed:'.format(n_fail))
            report.write('{0}: '.format(r))
            report.write('Class {0} Mean: {1:3.2f}, '.format(
                attr, mean(students, r.get_strength)))
            report.write('Class {0} Std Dev: {1:3.2f}, '.format(
                attr, std(students, r.get_strength)))
            report.write('Std Dev of Group {0} Means: {1:3.2f}'.format(
                attr, std(group_means)))
            report.write('\n\n')
        else:
            report.write('{0} groups failed: {1}\n\n'.format(n_fail, r))

    report.write('Group Summaries\n')
    report.write('---------------\n')

    for g in groups:
        report.write('Group {0}: '.format(g.group_number))
        items = []
        for r in balance_rules:
            items.append('<{0} Mean: {1:3.2f}>'.format(r.attribute,
                                                       mean(g,
                                                            r.get_strength)))
        for r in rules:
            if not r.check(g):
                items.append('Failed {0}'.format(r))
        report.write(', '.join(items))
        report.write('\n')

    report.write('\n')

    return groups, suceeded, outdir
コード例 #38
0
 def brn_shared(self):
     return  U.mean(self.theWorld.brn_sharing[self.metrics_start:])        
コード例 #39
0
def datalogger_to_dict(data_dict, key_dict, data_dir):
    """
    Removes the datalogger entry from cal and data dicts, replacing with new fields for each logged value.

    Parameters:
        cal_dict - Dictionary of CALMIT calibration data
        data_dict - Dicitonary of CALMIT scandata (not cal)
        key_dict - A key dictionary created via create_key_dict()

    Returns:
        data_dict, key_dict - Modified dictionaries.
    """
    data_datalogger = data_dict[key_dict['Data Logger']]
    num_entries = None
    for data_str in data_datalogger:
        split_entry = split_datalogger_entry(data_str)
        if data_str != '':
            num_entries = len(split_entry)
            break

    # If no entries were found, just remove the datalogger field alltogether and move on
    if num_entries is None:
        del data_dict[key_dict['Data Logger']]
        return data_dict

    # Split out all of the data from the datalogger strings. Each var is it's own list.
    data_entries = unzip_dlogger_entries(
        split_datalogger_entries(data_datalogger), num_entries=num_entries)

    # TODO rename temperature 1 and 2.
    entry_names = ['Battery Voltage', 'Temperature 1', 'Temperature 2']
    if num_entries == 4:
        entry_names.append('Pyronometer')
    elif num_entries == 5:
        # Either Pyronometer then Quantum Sensor or None then Pyronometer.
        if all(float(val) < 0 for val in data_entries[3]) and all(
                float(val) < 0 for val in data_entries[4]):
            entry_names.extend([None, 'Pyronometer'])
            entry_names[2] = None  # Temperature 2 also becomes None.
        else:
            if mean(data_entries[3]) > mean(data_entries[4]):
                err_str = "\n WARNING: PYRONOMETER VALUES FOUND HIGHER THAN QUANTUM SENSOR. MAYBE " \
                          "UNKNOWN DATALOGGER TYPE {0}. Proceeding anyway. \n".format(data_dir)
                print(err_str)

            entry_names.extend(['Pyronometer', 'Quantum Sensor'])

    elif num_entries == 6 and all(
            float(val) < 0 for val in data_entries[5]):  # Last value is -99999
        # battery volt, temp1, temp2, Pyronometer, Quantum Sensor, None.
        entry_names.extend(['Pyronometer', 'Quantum Sensor', None])

    elif num_entries == 3:
        # Just battery voltage, temp1, temp2.
        pass

    else:
        # TODO Implement other datalogger types (if there are any others...)
        import pdb
        pdb.set_trace()
        raise NotImplementedError('Unrecognized Datalogger string. Sorry!')

    # Create an entry in the data and cal dicts for the split datalogger data.
    for name in entry_names:
        if name is not None:
            key_dict[
                name] = name  # Add this to the key dict, for consistency (other functs rely on it).
            data_dict[name] = []

    # Add the data to the data dict
    for name, values in zip(entry_names, data_entries):
        if name is not None:
            # Check for a list of nodata.
            unique_vals = [val for val in values if val != '']
            unique_vals = set(values)
            # Datalogger should not have negative values.
            if all(float(val) < 0 for val in unique_vals):
                # Don't add to the data_dict.
                pass
            else:
                data_dict[name] = []
                for value in values:
                    # We assume DL values less than 0 are bad/nodata values.
                    if value == '':
                        data_dict[name].append('-9999')
                    elif float(value) < 0:
                        # TODO standardize nodata value. For now, use -9999
                        data_dict[name].append('-9999')
                    elif name in {'Temperature 1', 'Temperature 2'
                                  } and float(value) > 250:
                        data_dict[name].append('-9999')
                    else:
                        data_dict[name].append(value)

    del data_dict[key_dict['Data Logger']]
    return data_dict
コード例 #40
0
 def avg_pop(self):
     return U.mean(self.theWorld.populations[self.metrics_start:])
コード例 #41
0
ファイル: controller.py プロジェクト: jdiec/simple_gui
def run(input_deck):
    """
    Run GroupEng as specified by input_deck

    Parameters
    ----------
    input_deck: basestring: filename
        Input file specifying class information and grouping rules

    Output
    ------
    Output files determined by Input deck
    """
    dek = parser.read_input(input_deck)
    
    students = load_classlist(dek['classlist'], dek.get('student_identifier'))
    identifier = students[0].identifier
    course = Course(students, dek['group_size'], dek.get('uneven_size'))

    rules = [make_rule(r, course) for r in dek['rules']]

    balance_rules = filter(lambda x: isinstance(x, Balance), rules)

    groups = group.make_initial_groups(course, balance_rules)

    # Add a rule to distribute phantoms to avoid having more than one phantom
    # per group, put it first so that it is highest priority
    # we have to add this after the phantoms are created by
    # group.make_initial_groups so that it can see the phantoms
    rules = [Distribute(identifier, course, 'phantom')] + rules

    suceeded = apply_rules_list(rules, groups, course.students)

    groups.sort(key = attrgetter('group_number'))


    def failures(r):
        return reduce(lambda x, y: x+(1-r.check(y)), groups, 0)

    if failures(rules[0]) !=  0:
        raise UnevenGroups()

    ############################################################################
    # Output
    ############################################################################

    run_name = os.path.splitext(input_deck)[0]
    
    outdir = 'groups_{0}_{1}'.format(run_name,
                                     time.strftime('%Y-%m-%d_%H-%M-%S'))

    os.mkdir(outdir)
    os.chdir(outdir)
    
    def outfile(o):
        return file('{0}_{1}'.format(run_name,o),'w')
    
    group_output(groups, outfile('groups.csv'), identifier)
    group_output(groups, outfile('groups.txt'), identifier, sep = '\n')
    student_full_output(students, identifier, outfile('classlist.csv'))

        
    report = outfile('statistics.txt')
        
    report.write('Ran GroupEng on: {0} with students from {1}\n\n'.format(
            input_deck, dek['classlist']))
    
    report.write('Made {0} groups\n\n'.format(len(groups)))
    
    for r in rules[1:]:
        n_fail = failures(r)
        if isinstance(r, Balance):
            group_means = sorted([mean(g, r.get_strength) for g in groups])
            attr = r.attribute
            report.write('{0} groups failed:'.format(n_fail))
            report.write('{0}: '.format(r))
            report.write('Class {0} Mean: {1:3.2f}, '.format(
                    attr, mean(students, r.get_strength)))
            report.write('Class {0} Std Dev: {1:3.2f}, '.format(
                        attr, std(students, r.get_strength)))
            report.write('Std Dev of Group {0} Means: {1:3.2f}'.format(
                    attr, std(group_means)))
            report.write('\n\n')
        else:
            report.write('{0} groups failed: {1}\n\n'.format(n_fail, r))
    
    report.write('Group Summaries\n')
    report.write('---------------\n')
        
    for g in groups:
        report.write('Group {0}: '.format(g.group_number))
        items = []
        for r in balance_rules:
            items.append('<{0} Mean: {1:3.2f}>'.format(
                    r.attribute, mean(g, r.get_strength)))
        for r in rules:
            if not r.check(g):
                items.append('Failed {0}'.format(r))
        report.write(', '.join(items))
        report.write('\n')
                
    report.write('\n')
        
    return groups, suceeded, outdir