示例#1
0
def lin_pos(clist):
    xlist = (x['x'] for x in clist)
    elist = [x['dedx'] for x in clist]
    x = average( xlist, elist )
    ylist = (y['y'] for y in clist)
    y = average( ylist, elist )
    return (x,y)
示例#2
0
def calculSD(li, liref):
    """
    Calcul de l'écart-type (quantifier le montant de la dispersion) de deux ensembles de données.

    :param list li: une liste de valeurs qu'a été identifie par le logiciel
    :param list liref: liste de valeurs de référence pour les valeurs étudiées
    :return: l'écart-type des valeurs étudiées et des valeurs de référence respectivement
    :rtype: (float, float)
    
    """
    n = len(li)
    m = len(liref)

    x_li = 0
    x_ref = 0

    x_a = average(li)
    x_aref = average(liref)

    #Calcul of SD - formula Wikipédia Ecart-type
    for x_i, x_iref in zip(li, liref):
        x_li += x_i**2
        x_ref += x_iref**2

    s = ((x_li / n) - x_a**2)**0.5
    sref = ((x_ref / m) - x_aref**2)**0.5

    return s, sref
示例#3
0
 def signal_to_symbol(self, values):
   ret = []
   for sym_i in values:
     sym_values = values.get(sym_i)
     avg_value = average([average(sym_values.get(coord))
                          for coord in sym_values])
     keys = sorted(self._inv_thresholds.keys())
     ret.append(int(self._inv_thresholds[keys[bsearch(keys, avg_value)]]))
   return ret
示例#4
0
 def signal_to_symbol(self, values):
     ret = []
     for sym_i in values:
         sym_values = values.get(sym_i)
         avg_value = average(
             [average(sym_values.get(coord)) for coord in sym_values])
         keys = sorted(self._inv_thresholds.keys())
         ret.append(
             int(self._inv_thresholds[keys[bsearch(keys, avg_value)]]))
     return ret
示例#5
0
    def tempero_spatial_pooling(self, learning_enabled=True):
        '''
        Temporal-Spatial pooling routine
        --------------
        Takes input and calculates active columns (sparse representation)
        This is a representation of input in context of previous input
        (same region and regions above -- top-down predictions)
        '''

        # Phase 1: Overlap
        self.overlap = self.do_overlap()
        log("%s << Overlap (normalized)" % printarray(self.overlap, continuous=True), level=2)

        # Phase 2: Inhibition
        activating = self.do_inhibition()
        log("%s << Activating (inhibited)" % printarray(activating, continuous=True), level=2)

        # Phase 3: Learning
        if learning_enabled:
            log("%s << Active Duty Cycle" % printarray(self.active_duty_cycle, continuous=True), level=2)
            log("%s << Overlap Duty Cycle" % printarray(self.overlap_duty_cycle, continuous=True), level=2)
            self.do_learning(activating)


        # Phase 4: Calculate new activations
        # Save pre-step activations
        self.last_activation = [cell.activation for cell in self.cells]
        # Update activations
        for i, cell in enumerate(self.cells):
            if activating[i]:
                cell.activation = 1.0  # Max out
            else:
                cell.activation -= cell.fade_rate
            if cell.activation < 0:
                cell.activation = 0.0
        log("%s << Activations" % self.print_cells(), level=2)


        # Phase 5: Calculate Distal Biases (TM?)
        self.last_bias = np.copy(self.bias)
        self.bias = self.calculate_biases()
        log("%s << Bias" % printarray(self.bias, continuous=True), level=2)

        if VERBOSITY >= 1:
            # Log average synapse permanence in region
            permanences = []
            n_connected = 0
            n_synapses = 0
            for cell in self.cells:
                for seg in cell.distal_segments:
                    permanences.append(util.average(seg.syn_permanences))
                    n_synapses += seg.n_synapses()
                    n_connected += len(filter(lambda x : x > CONNECTED_PERM, seg.syn_permanences))
            ave_permanence = util.average(permanences)
            log("R%d - average distal synapse permanence: %.1f (%.1f%% connected of %d)" % (self.index, ave_permanence, (n_connected/float(n_synapses))*100., n_synapses), level=1)
示例#6
0
def average_of_values(key):
    """Returns the average of some value from all data.

    :param key: The key to the values of interest.
    :returns: The averages of some values for HackerNews and DataTau
    :rtype: dict

    """
    return {
        HN_KEY: average(_vals_on_all_datetimes(key, dt=False)),
        DT_KEY: average(_vals_on_all_datetimes(key, hn=False))
    }
示例#7
0
def averages_on_datetimes(key):
    """Returns the averages of some values on every datetime

    :param key: The key to the value of interest.
    :returns: A list of averages for both HackerNews and DataTau
    :rtype: dict

    """
    averages = {HN_KEY: [], DT_KEY: []}

    for dt in _get_datetimes():
        averages[HN_KEY].append(average(_get_data()[dt][HN_KEY][key]))
        averages[DT_KEY].append(average(_get_data()[dt][DT_KEY][key]))

    return averages
示例#8
0
def log_pos(clist,a=4.0):
    #return tuple of x,y
    bump = find_bump(clist)
    E_bump = bump['dedx']
    E_cutoff = E_bump*exp(-1*a)
    wlist = []
    for c in clist:
        E = c['dedx']
        #this is to avoid log of 0 evaluation
        #also there are some event with energy less than 1.0
        w = 0 if E < E_cutoff or E_bump < 10.0 else a+log(E/E_bump)
        wlist.append(w)
    x = average((x['x'] for x in clist), wlist)
    y = average((y['y'] for y in clist), wlist)
    return (x,y)
示例#9
0
def list_resources(resource_type=None, query=None, tag=None):
    """return all resources"""

    reliability_values = []
    first_run = None
    last_run = None

    response = {
        'total': 0,
        'success': {
            'number': 0,
            'percentage': 0
        },
        'fail': {
            'number': 0,
            'percentage': 0
        },
        'first_run': None,
        'last_run': None,
        'reliability': 0
    }

    if resource_type is not None:
        response['resources'] = models.Resource.query.filter_by(
            resource_type=resource_type).all()

    if query is not None:
        field, term = get_query_field_term(query)
        response['resources'] = models.Resource.query.filter(
            field.ilike(term)).all()

    if tag is not None:
        response['resources'] = models.Resource.query.filter(
            models.Resource.tags.any(models.Tag.name.in_([tag]))).all()

    if 'resources' not in response:
        # No query nor resource_type provided: fetch all resources
        response['resources'] = models.Resource.query.all()

    response['total'] = len(response['resources'])
    for resource in response['resources']:
        if resource.first_run < first_run or first_run is None:
            first_run = resource.first_run
        if resource.last_run < last_run or last_run is None:
            last_run = resource.last_run
        response['first_run'] = first_run
        response['last_run'] = last_run
        if resource.last_run.success:
            response['success']['number'] += 1
        else:
            response['fail']['number'] += 1
        reliability_values.append(resource.reliability)

    response['success']['percentage'] = util.percentage(
        response['success']['number'], response['total'])
    response['fail']['percentage'] = util.percentage(
        response['fail']['number'], response['total'])
    response['reliability'] = util.average(reliability_values)

    return response
示例#10
0
  def signal_to_symbol(self, values):
    thresholds = { # Well, nine (we add one for "black").
      '0': 235,
      '1': 207,
      '2': 179,
      '3': 151,
      '4': 123,
      '5': 95,
      '6': 67,
      '7': 39,
      '8': 11,
      }

    _inv_thresholds = dict((v,k) for k, v in thresholds.iteritems())
    _keys = sorted(_inv_thresholds.keys())

    ret = []
    for sym_i in values:
      sym_values = values.get(sym_i)
      _values = []
      for coord in sym_values:
        _values.append(sym_values.get(coord))
      avg_value = average(_values)
      _bs = bsearch(_keys, avg_value)
      _key = _keys[_bs]
      base8 = _inv_thresholds[_key]
      ret.append(base8)
    return ret
示例#11
0
    def signal_to_symbol(self, values):
        thresholds = { # Well, nine (we add one for "black").
          '0': 235,
          '1': 207,
          '2': 179,
          '3': 151,
          '4': 123,
          '5': 95,
          '6': 67,
          '7': 39,
          '8': 11,
          }

        _inv_thresholds = dict((v, k) for k, v in thresholds.iteritems())
        _keys = sorted(_inv_thresholds.keys())

        ret = []
        for sym_i in values:
            sym_values = values.get(sym_i)
            _values = []
            for coord in sym_values:
                _values.append(sym_values.get(coord))
            avg_value = average(_values)
            _bs = bsearch(_keys, avg_value)
            _key = _keys[_bs]
            base8 = _inv_thresholds[_key]
            ret.append(base8)
        return ret
示例#12
0
    def do_learning(self):
        for col in self._get_active_columns():
            for s in col.segment.potential_synapses:
                if s.active():
                    s.permanence += self.permanence_inc
                    s.permanence = min(1.0, s.permanence)
                else:
                    s.permanence -= self.permanence_dec
                    s.permanence = max(0.0, s.permanence)

        all_field_sizes = []
        for c, col in enumerate(self.columns):
            min_duty_cycle = 0.01 * self._max_duty_cycle(self.neighbors[c])
            column_active = self.active_columns[-1][c]
            sufficient_overlap = self.overlap[c] > self.brain.min_overlap
            col.update_duty_cycles(active=column_active, overlap=sufficient_overlap)
            self.boost[c] = self._boost_function(c, min_duty_cycle)  # Updates boost value for column (higher if below min)

            # Check if overlap duty cycle less than minimum (note: min is calculated from max *active* not overlap)
            if self.overlap_duty_cycle[c] < min_duty_cycle:
                self._increase_permanences(c, 0.1 * CONNECTED_PERM)

            all_field_sizes.append(self.columns[c].connected_receptive_field_size())

        # Update inhibition radius (based on updated active connections in each column)
        self.inhibition_radius = util.average(all_field_sizes)
示例#13
0
文件: Hud.py 项目: Peaker/pyun
 def _host_color(self, host):
     def player_colors_component(x):
         return [player.worm.color[x] for player in host.players]
     r, g, b = tuple([average(player_colors_component(i))/4.0
                      for i in xrange(3)])
     br, bg, bb = config.HUD_HOST_BASE_COLOR
     return r+br, g+bg, b+bb
示例#14
0
def finalize_attempt_data(attempt, start_time):
    t_, e_, d_, s_, _ = attempt['last_line'].split('\t', 4)
    s_ = datetime.datetime.strptime(t_, '%m-%d-%y-%H-%M-%S')
    s_ = (s_ - epoch).total_seconds()
    s_ = s_ - start_time
    del(attempt['last_line'])
    attempt['end_date'] = t_
    attempt['total_time'] = (float(s_) - attempt['start_time']) #/ 60.0
    me_total = 0.0
    for i in ['tests','submissions','replays']:
        me_total += attempt['me_'+i]
    if attempt['total_time']:
        attempt['r_me_per_minute'] = me_total/(attempt['total_time']/60.0)
    for i in ['tests', 'submissions', 'replays']:
        if attempt['total_time']:
            attempt['r_me_'+i+'_per_minute'] = 1.0*attempt['me_'+i] / (attempt['total_time'] / 60.0)
        if me_total:
            attempt['r_me_' + i] = 1.0*attempt['me_' + i] / me_total
    for i in ['_tests', '_submissions','']:
        lst_ = []
        for j in attempt['_t_d_component_dragged_me_lst']:
            if j[0]==i or not i:
                lst_.append(j[1])
        if lst_:
            attempt['t_d_component_dragged_me' + i + '_avg'] = util.average(lst_)
            attempt['t_d_component_dragged_me' + i + '_min'] = min(lst_)

    '''
示例#15
0
 def get_stats_based_on_entities(self,tag_to_compare_against=TaggableContainer.TAG_CHARACTER_SYMBOL):
     characters = Entity.filter_characters(self.entities)
     characters_uniq = set([i.symbol for i in characters if ',' not in i.symbol])
     coref_groups_with_characters = [i for i in self.coreference_groups if i.contains_a_character()]
     coref_groups_with_characters_c_gr = [i.number_of_distinct_characters(tag_to_compare_against) for i in coref_groups_with_characters]
     coref_groups_per_character_gr_c = [i.number_of_distinct_coref_groups(tag_to_compare_against) for i in characters]
     return [len(self.entities),
             len(characters),
             len(characters_uniq),
             sum([len(self.get_coreference_group_by_id(i.id).mentions) for i in self.entities]),
             sum([len(self.get_coreference_group_by_id(i.id).mentions) for i in characters]),
             sum([len(self.get_coreference_group_by_id(i.id).mentions) for i in characters])/len(characters) if characters else 0,
             len(self.coreference_groups),
             len(coref_groups_with_characters),
             util.average(coref_groups_with_characters_c_gr),
             util.average(coref_groups_per_character_gr_c)
     ]
    def single_sensor_accuracy(self):
        sensor = []
        accuracy = []
        std = []
        for s in self.dataset.get_sensors:
            if s != "activityrecognition":
                print(s)
                features = self.dataset.get_sensor_features(s)
                train = self.dataset.get_train.copy()
                test = self.dataset.get_test.copy()

                train_features, train_classes, test_features, test_classes = self.__get_sets_for_classification(
                    train, test, features)
                singleAcc = []
                for i in range(const.REPEAT):
                    # build classifier
                    classifier_forest = RandomForestClassifier(
                        n_estimators=const.PAR_RF_ESTIMATOR)
                    classifier_forest.fit(train_features, train_classes)
                    test_prediction_forest = classifier_forest.predict(
                        test_features)
                    acc_forest = accuracy_score(test_classes,
                                                test_prediction_forest)
                    singleAcc.append(acc_forest)
                accM = util.average(singleAcc)
                variance = list(map(lambda x: (x - accM)**2, singleAcc))
                standard_deviation = math.sqrt(util.average(variance))
                print(s, accM, standard_deviation)

                accuracy.append(accM)
                std.append(standard_deviation)
                sensor.append(s)
        df_single_sensor_acc = pd.DataFrame({
            'sensor': sensor,
            'accuracy': accuracy,
            'dev_standard': std
        })
        df_single_sensor_acc = df_single_sensor_acc.sort_values(
            by='accuracy', ascending=False)

        if not os.path.exists(const.DIR_RESULTS):
            os.makedirs(const.DIR_RESULTS)
        df_single_sensor_acc.to_csv(const.DIR_RESULTS + "/" +
                                    const.FILE_SINGLE_SENSOR_ANALYSIS,
                                    index=False)
示例#17
0
 def get_stats(self,tag_to_compare_against=TaggableContainer.TAG_CHARACTER_SYMBOL):
     characters = [i for i in self.document.get_all_mentions() if i.is_independent and 'CH' in i.get_taxonomy(TaxonomyContainer.TAXONOMY_NONCHARACTER)]
     characters_uniq = set(util.flatten([i.get_tag(TaggableContainer.TAG_CHARACTER_SYMBOL) for i in characters]))
     coref_groups_with_characters = [i for i in self.coreference_groups if i.contains_a_character()]
     coref_groups_with_characters_c_gr = [i.number_of_distinct_characters(tag_to_compare_against) for i in coref_groups_with_characters]
     coref_groups_per_character_gr_c = [self.number_of_distinct_coref_groups(i) for i in characters_uniq]
     #coref_groups_per_character_gr_c = [i.number_of_distinct_coref_groups(tag_to_compare_against) for i in characters]
     return [len(self.entities),
             len(characters),
             len(characters_uniq),
             sum([len(self.get_coreference_group_by_id(i.id).mentions) for i in self.entities]),
             #sum([len(self.get_coreference_group_by_id(i.id).mentions) for i in characters]),
             #sum([len(self.get_coreference_group_by_id(i.id).mentions) for i in characters])/len(characters) if characters else 0,
             len(self.coreference_groups),
             len(coref_groups_with_characters),
             util.average(coref_groups_with_characters_c_gr),
             util.average(coref_groups_per_character_gr_c)
     ]
示例#18
0
    def cleanup(self):
        # TODO: decouple printer and connection.
        self.printer.terminate()
        print("Terminating threads...")

        self.printer.join()
        if "-d" in sys.argv and self.buff.log:
            print("%d high latency events recorded, max=%r, avg=%r" %
                  (len(self.buff.log), max(
                      self.buff.log), util.average(self.buff.log)))
示例#19
0
def list_resources(resource_type=None, query=None):
    """return all resources"""

    reliability_values = []
    first_run = None
    last_run = None

    response = {
        'total': 0,
        'success': {
            'number': 0,
            'percentage': 0
        },
        'fail': {
            'number': 0,
            'percentage': 0
        },
        'first_run': None,
        'last_run': None,
        'reliability': 0
    }

    if resource_type is not None:
        response['resources'] = models.Resource.query.filter_by(
            resource_type=resource_type).all()
    if query is not None:
        field, term = get_query_field_term(query)
        response['resources'] = models.Resource.query.filter(
            field.ilike(term)).all()
    else:
        response['resources'] = models.Resource.query.all()

    response['total'] = len(response['resources'])
    for resource in response['resources']:
        if resource.first_run < first_run or first_run is None:
            first_run = resource.first_run
        if resource.last_run < last_run or last_run is None:
            last_run = resource.last_run
        response['first_run'] = first_run
        response['last_run'] = last_run
        if resource.last_run.success:
            response['success']['number'] += 1
        else:
            response['fail']['number'] += 1
        reliability_values.append(resource.reliability)

    response['success']['percentage'] = util.percentage(
        response['success']['number'], response['total'])
    response['fail']['percentage'] = util.percentage(
        response['fail']['number'], response['total'])
    response['reliability'] = util.average(reliability_values)

    return response
示例#20
0
def calculStandardDeviation(data):
    """
    Calculer l'écart-type d'un ensemble de données.

    :param list data: liste de valeurs
    :return: l'écart-type de l'ensemble de valeurs
    :rtype: float
    
    """
    av = average(data)
    n = len(data)
    s = (sum((val - av)**2 for val in data) / (n - 1))**0.5
    return s
示例#21
0
def search(f, neg_point, pos_point):
    midpoint = average(neg_point, pos_point)

    if close_enough(neg_point, pos_point):
        return midpoint

    test_value = f(midpoint)

    if test_value > 0:
        return search(f, neg_point, midpoint)
    elif test_value < 0:
        return search(f, midpoint, pos_point)
    else:
        return midpoint
示例#22
0
    def cleanup(self):
        # TODO: decouple printer and connection.
        self.printer.terminate()
        print("Terminating threads...")

        self.printer.join()
        if "-d" in sys.argv and self.buff.log:
            print(
                "%d high latency events recorded, max=%r, avg=%r" % (
                    len(self.buff.log),
                    max(self.buff.log),
                    util.average(self.buff.log)
                )
            )
示例#23
0
def analyse(path, filter_fn, field_name, print_csv=False):
    data = load_data(path, filter_fn)
    occurrences = data['days']
    day_of_cycle = data['day_of_cycle']
    weekdays = data['weekdays']
    day_of_cycle_total = sum([day_of_cycle[x] for x in day_of_cycle])

    if len(occurrences) == 0:
        print "No tags found. Are you sure '%s' is the correct tag?" % tag
        return

    deltas = []
    for d in xrange(len(occurrences)-1):
        delta = occurrences[d+1] - occurrences[d]
        if delta.days > 2:
            deltas.append(delta.days)

    if print_csv:
        print "date,%s" % field_name
        for d in date_range(occurrences[0], occurrences[len(occurrences)-1]):
            if d in occurrences:
                print str(d) + ",1"
            else:
                print str(d) + ",0"
        return

    print "==============="
    print "Day of cycle distribution"
    previous = None
    for k in sorted(day_of_cycle.keys()):
        if previous:
            if k - previous > 1:
                print ".\n."
        previous = k
        print ("Day %s:" % k).ljust(10), str(day_of_cycle[k]).ljust(4), round(day_of_cycle[k] / float(day_of_cycle_total), 2)
    print "==============="
    print "Weekday distribution"
    for k in sorted(weekdays.keys()):
        print weekday_from_int(k).ljust(5), weekdays[k]
    print "==============="
    print "Total amount of days with %s: " % field_name, len(occurrences)
    print "Average amount of days between %s: " % field_name, average(deltas)
    print "Std dev: ", std_dev(deltas)
    print "Last day with %s: " % field_name, occurrences[len(occurrences)-1]
    print "Days between today and last day with %s: " % field_name, (datetime.datetime.today().date() - occurrences[len(occurrences)-1].date()).days
    print "==============="
示例#24
0
def list_resources(resource_type=None):
    """return all resources"""

    reliability_values = []

    response = {
        'total': 0,
        'success': {
            'number': 0,
            'percentage': 0
        },
        'fail': {
            'number': 0,
            'percentage': 0
        },
        'reliability': 0
    }

    if resource_type is not None:
        response['resources'] = models.Resource.query.filter_by(
            resource_type=resource_type).all()
    else:
        response['resources'] = models.Resource.query.all()

    response['total'] = len(response['resources'])
    for resource in response['resources']:
        if resource.last_run.success:
            response['success']['number'] += 1
        else:
            response['fail']['number'] += 1
        reliability_values.append(resource.reliability)

    response['success']['percentage'] = util.percentage(
        response['success']['number'], response['total'])
    response['fail']['percentage'] = util.percentage(
        response['fail']['number'], response['total'])
    response['reliability'] = util.average(reliability_values)

    return response
示例#25
0
def list_resources(resource_type=None):
    """return all resources"""

    reliability_values = []

    response = {
        'total': 0,
        'success': {
            'number': 0,
            'percentage': 0
        },
        'fail': {
            'number': 0,
            'percentage': 0
        },
        'reliability': 0
    }

    if resource_type is not None:
        response['resources'] = models.Resource.query.filter_by(
            resource_type=resource_type).all()
    else:
        response['resources'] = models.Resource.query.all()

    response['total'] = len(response['resources'])
    for resource in response['resources']:
        if resource.last_run.success:
            response['success']['number'] += 1
        else:
            response['fail']['number'] += 1
        reliability_values.append(resource.reliability)

    response['success']['percentage'] = util.percentage(
        response['success']['number'], response['total'])
    response['fail']['percentage'] = util.percentage(
        response['fail']['number'], response['total'])
    response['reliability'] = util.average(reliability_values)

    return response
示例#26
0
def run(window):
    timer = pygame.time.Clock()
    width = window.get_width()
    height = window.get_height()

    font = pygame.freetype.SysFont('Comic Sans MS', 18)

    boundary = Rectangle((0, 0), (width, height))

    particles = []

    using_quadtree = False
    for i in range(0, 200):
        particles.append(
            Particle([random_zero_to_max(width),
                      random_zero_to_max(height)], 2))

    running = True
    frame_rate = []
    while running:
        window.fill((0, 0, 0))
        dt = timer.tick()
        qtree = Quadtree(boundary, 4)

        for particle in particles:
            qtree.insert(particle)

        for particle in particles:
            if not particle.highlight:
                if using_quadtree:
                    region = Circle([particle.x, particle.y],
                                    particle.r * 2 - 1)
                    others = qtree.query(region)
                    particle.check_collision(others)
                else:
                    particle.check_collision(particles)
            particle.render(window)
            particle.move(width, height)

        if len(frame_rate) > 10:
            frame_rate.pop(0)
        frame_rate.append(1000 / dt)

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                running = False

            if event.type == pygame.MOUSEBUTTONUP:
                using_quadtree = not using_quadtree

        if using_quadtree:
            text_render, _ = font.render('Using QuadTree', (255, 255, 255))
        else:
            text_render, _ = font.render('Not Using QuadTree', (255, 255, 255))
        rect = text_render.get_rect(center=(400 + 100 / 2, 300 + 20 / 2))
        window.blit(text_render, rect)
        text_render, _ = font.render(
            '{} FPS'.format(round(average(frame_rate), 1)), (255, 255, 255))
        rect = text_render.get_rect(center=(400 + 100 / 2, 350 + 20 / 2))
        window.blit(text_render, rect)

        pygame.display.update()
示例#27
0
 def improve(guess):
     return average(guess, (x / guess))
示例#28
0
def list_resources(resource_type=None, query=None, tag=None):
    """return all resources"""

    reliability_values = []
    first_run = None
    last_run = None

    response = {
        'total': 0,
        'success': {
            'number': 0,
            'percentage': 0
        },
        'fail': {
            'number': 0,
            'percentage': 0
        },
        'first_run': None,
        'last_run': None,
        'reliability': 0
    }

    if resource_type is not None:
        response['resources'] = models.Resource.query.filter_by(
            resource_type=resource_type).all()

    if query is not None:
        field, term = get_query_field_term(query)
        response['resources'] = models.Resource.query.filter(
            field.ilike(term)).all()

    if tag is not None:
        response['resources'] = models.Resource.query.filter(
            models.Resource.tags.any(models.Tag.name.in_([tag]))).all()

    if 'resources' not in response:
        # No query nor resource_type provided: fetch all resources
        response['resources'] = models.Resource.query.all()

    response['total'] = len(response['resources'])
    response['success']['percentage'] = 0
    response['fail']['percentage'] = 0
    response['reliability'] = 0
    for resource in response['resources']:
        if resource.runs.count() > 0:
            # View should work even without Runs
            if resource.first_run < first_run or first_run is None:
                first_run = resource.first_run
            if resource.last_run < last_run or last_run is None:
                last_run = resource.last_run
            response['first_run'] = first_run
            response['last_run'] = last_run
            if resource.last_run.success:
                response['success']['number'] += 1
            else:
                response['fail']['number'] += 1

            reliability_values.append(resource.reliability)

    response['success']['percentage'] = util.percentage(
        response['success']['number'], response['total'])
    response['fail']['percentage'] = util.percentage(
        response['fail']['number'], response['total'])
    response['reliability'] = util.average(reliability_values)

    return response
示例#29
0
 def test_empty_list(self):
     with self.assertRaises(ValueError):
         avg = average([])
示例#30
0
        data[folder][sub['name']] = []
        for data_path in paths:
            json_data = util.open_json_result(os.path.join(path, folder, sub['name'], data_path))
            if type(json_data) is list:
                d = json_data[0]
            else:
                d = json_data
            data[folder][sub['name']].append(d)


#Average and find breakeven points, for final series.
breakeven_points = {}
for folder in folders:
    breakeven_points[folder] = []
    for sub in sub_folders:
        pr_avg = util.average(data[folder][sub['name']], pr_key_y, pr_key_x)
        breakeven = util.find_breakeven(pr_avg, samples=4)
        breakeven_points[folder].append({"x": sub['value'], "y": breakeven[1]})
print breakeven_points
#series.append({"name": folder, "data": pr_avg})
#

#Summary figure for MSE loss
loss_points = {}
for folder in folders:
    loss_points[folder] = []
    for sub in sub_folders:
        loss_avg = util.average(data[folder][sub['name']], 'events', lc_key_x)
        last_loss = loss_avg[-1][lc_key_y]
        loss_points[folder].append({"x": sub['value'], "y": last_loss})
示例#31
0
def list_resources(resource_type=None, query=None, tag=None):
    """return all resources"""

    reliability_values = []
    first_run = None
    last_run = None

    response = {
        'total': 0,
        'success': {
            'number': 0,
            'percentage': 0
        },
        'fail': {
            'number': 0,
            'percentage': 0
        },
        'first_run': None,
        'last_run': None,
        'reliability': 0
    }

    filters = ()

    if resource_type is not None:
        filters = filters + ("resource_type = '%s'" % resource_type,)

    if query is not None:
        field, term = get_query_field_term(query)
        filters = filters + (field.ilike(term),)

    if tag is not None:
        tag_filter = (models.Resource.tags.any(models.Tag.name.in_([tag])),)
        filters = filters + tag_filter

    response['resources'] = models.Resource.query.filter(*filters).all()

    response['total'] = len(response['resources'])
    response['success']['percentage'] = 0
    response['fail']['percentage'] = 0
    response['reliability'] = 0
    for resource in response['resources']:
        if resource.runs.count() > 0:
            # View should work even without Runs
            if resource.first_run < first_run or first_run is None:
                first_run = resource.first_run
            if resource.last_run < last_run or last_run is None:
                last_run = resource.last_run
            response['first_run'] = first_run
            response['last_run'] = last_run
            if resource.last_run.success:
                response['success']['number'] += 1
            else:
                response['fail']['number'] += 1

            reliability_values.append(resource.reliability)

    response['success']['percentage'] = int(round(util.percentage(
        response['success']['number'], response['total'])))
    response['fail']['percentage'] = 100 - response['success']['percentage']
    response['reliability'] = round(util.average(reliability_values), 1)

    return response
示例#32
0
 def average_response_time(self):
     result = 0
     if self.runs.count() > 0:
         query = [run.response_time for run in self.runs]
         result = util.average(query)
     return result
        else:
            d = json_data
        data[folders[t]].append(d)

series = []

manual_breakeven = [
    [0.513, 0.595, 0.687, 0.712],
    [0.337, 0.411, 0.462, 0.524],
]  # Very uneven curves is hard to approximate by polyfit. (finds breakeven automatically)
compare_series = []
for j, pr in enumerate(folders[1:]):
    pr_per_epoch = []
    for i, curve in enumerate(data[pr]):
        samples = 10
        if i < len(manual_breakeven[j]):
            breakeven_points = [0, manual_breakeven[j][i]]
        else:
            breakeven_points = util.find_breakeven(curve[pr_key_y], samples=samples)
        print(folder, breakeven_points)
        name = (pr_epoch * i) + 5
        series.append({"name": "Epoch " + str(name), "data": curve[pr_key_y], "breakeven": breakeven_points})
        pr_per_epoch.append({"epoch": name, "breakeven": breakeven_points[-1]})
    compare_series.append({"name": pr, "data": pr_per_epoch, "y_key": "breakeven"})
util.display_precision_recall_plot(series)

series = []
loss_avg = util.average(data[folders[0]], "events", lc_key_x)
series.append({"name": folders[0], "data": loss_avg, "y_key": lc_key_y})
util.display_two_axis_plot(series, compare_series)
    [0.513, 0.595, 0.687, 0.712], [0.337, 0.411, 0.462, 0.524]
]  #Very uneven curves is hard to approximate by polyfit. (finds breakeven automatically)
compare_series = []
for j, pr in enumerate(folders[1:]):
    pr_per_epoch = []
    for i, curve in enumerate(data[pr]):
        samples = 10
        if i < len(manual_breakeven[j]):
            breakeven_points = [0, manual_breakeven[j][i]]
        else:
            breakeven_points = util.find_breakeven(curve[pr_key_y],
                                                   samples=samples)
        print(folder, breakeven_points)
        name = (pr_epoch * i) + 5
        series.append({
            "name": "Epoch " + str(name),
            "data": curve[pr_key_y],
            "breakeven": breakeven_points
        })
        pr_per_epoch.append({'epoch': name, 'breakeven': breakeven_points[-1]})
    compare_series.append({
        'name': pr,
        "data": pr_per_epoch,
        'y_key': 'breakeven'
    })
util.display_precision_recall_plot(series)

series = []
loss_avg = util.average(data[folders[0]], 'events', lc_key_x)
series.append({"name": folders[0], "data": loss_avg, "y_key": lc_key_y})
util.display_two_axis_plot(series, compare_series)
示例#35
0
def main(args):
    # Determine which algorithms to perform
    algorithms = []
    if args.bf:
        algorithms.append(wrp.AlgorithmWrapper(bf.CONTENT))
    if args.nn:
        algorithms.append(wrp.AlgorithmWrapper(nn.CONTENT))
    if args.ni:
        algorithms.append(wrp.AlgorithmWrapper(ni.CONTENT))
    if args.mst:
        algorithms.append(wrp.AlgorithmWrapper(mst.CONTENT))
    if args.ci:
        algorithms.append(wrp.AlgorithmWrapper(ci.CONTENT))

    # Initialize plots
    fig_correct, fig_complex, plot_correct, plot_complex = init_plots(
        algorithms)

    # Execute correct command
    if args.cmd == 'read':
        datasets = dataset.read(args.path)
        for ds in datasets:
            for algorithm in algorithms:
                y1, y2 = analyse_algorithm(ds.adj, ds.order, algorithm,
                                           args.repeat)
                plot_correct.scatter(ds.order,
                                     y2,
                                     color=algorithm.color,
                                     alpha=0.5,
                                     s=0.5)
                plot_complex.scatter(ds.order,
                                     y1,
                                     color=algorithm.color,
                                     alpha=0.5,
                                     s=0.5)

    elif args.cmd == 'random':
        if args.write:
            if not os.path.exists('datasets'):
                os.makedirs('datasets')

        order = args.order  # reset n
        while order <= args.max:
            for i in range(args.trials):
                path = None
                if args.write:
                    path = "datasets/order_{}_trial_{}.dat".format(order, i)
                adj = dataset.generate(order, args.spread, path)
                for algorithm in algorithms:
                    y1, y2 = analyse_algorithm(adj, order, algorithm,
                                               args.repeat)
                    algorithm.x.append(order)
                    algorithm.complex.append(y1)
                    algorithm.working_complex.append(y1)
                    algorithm.correct.append(y2)
                    algorithm.working_correct.append(y2)

            for algorithm in algorithms:
                algorithm.avg_correct.append(
                    util.average(algorithm.working_correct))
                algorithm.avg_complex.append(
                    util.average(algorithm.working_complex))
                algorithm.avg_x.append(order)
                algorithm.working_correct.clear()
                algorithm.working_complex.clear()

            order += 1

        if args.plot:
            for algorithm in algorithms:
                # Plot correctness measure
                plot_correct.scatter(algorithm.x,
                                     algorithm.correct,
                                     color=algorithm.color,
                                     alpha=0.5,
                                     s=0.5)
                plot_correct.plot(algorithm.avg_x,
                                  algorithm.avg_correct,
                                  '-',
                                  color=algorithm.color,
                                  linewidth=0.5)
                fig_correct.savefig('Correctness',
                                    dpi=300,
                                    bbox_inches='tight')

                # Plot complexity measure
                plot_complex.scatter(algorithm.x,
                                     algorithm.complex,
                                     color=algorithm.color,
                                     alpha=0.5,
                                     s=0.5)
                plot_complex.plot(algorithm.avg_x,
                                  algorithm.avg_complex,
                                  '-',
                                  color=algorithm.color,
                                  linewidth=0.5)
                fig_complex.savefig('Complexity', dpi=300, bbox_inches='tight')
示例#36
0
'''
This tool creates a loss figure containing, training, test and validation error. It averages experiment runs before
constructing a figure.
'''

path = '/home/olav/Documents/Results/E7_road_detection_performance'
folder = 'Network M1'
lc_key_x = 'epoch'

print("Creating averaged loss figures")
paths = os.listdir(os.path.join(path, folder))

data = []

for data_path in paths:
    json_data = util.open_json_result(os.path.join(path, folder,data_path))
    if type(json_data) is list:
            d = json_data[0]
    else:
        d = json_data
    data.append(d)


loss_avg = util.average(data, 'events', lc_key_x)
series = [
    {"name": "Training loss", "data": loss_avg, "y_key":  "training_loss"},
    #{"name": "Validation loss", "data": loss_avg, "y_key":  "validation_loss"},
    {"name": "Test loss", "data": loss_avg, "y_key":  "test_loss"}
]
util.display_loss_curve_plot(series)
示例#37
0
def average_link_overlap():
    return average(link_overlap_on_datetimes())
示例#38
0
def midpoint_segment(segment):
    return cons(
        average(x_point(start_segment(segment)),
                x_point(end_segment(segment))),
        average(y_point(start_segment(segment)),
                y_point(end_segment(segment))))
示例#39
0
def sqrt(x):
    return fixed_point(lambda y: average(y, x / y), 1.0)
示例#40
0
 def average_response_time(self):
     query = [run.response_time for run in self.runs]
     return util.average(query)
示例#41
0
    def do_learning(self, activating):
        '''
        Update permanences
        On activating cells, increase permenences for each excitatory synapse above a min. contribution
        On non-activating cells, increase permenences for each inhibitory synapse above a min. contribution
        '''
        n_increased_prox = n_decreased_prox = n_increased_dist = n_decreased_dist = n_conn_prox = n_discon_prox = n_conn_dist = n_discon_dist = 0
        for i, is_activating in enumerate(activating):
            cell = self.cells[i]
            # Proximal
            # TODO: Also hold pre-learn segment activation for proximal
            if is_activating:
                for seg in cell.proximal_segments:
                    ni, nd, nc, ndc = self.learn_segment(seg, is_activating=is_activating)
                    n_increased_prox += ni
                    n_decreased_prox += nd
                    n_conn_prox += nc
                    n_discon_prox += ndc
            # Distal
            any_active = any([seg.active() for seg in cell.distal_segments])
            most_active = None
            if not any_active:
                most_active = cell.most_active_distal_segment()
            for seg in cell.distal_segments:
                active = seg.active_before_learning
                do_learn = is_activating and (active or not any_active)
                if do_learn:
                    ni, nd, nc, ndc = self.learn_segment(seg, is_activating=is_activating, distal=True)
                    n_increased_dist += ni
                    n_decreased_dist += nd
                    n_conn_dist += nc
                    n_discon_dist += ndc
                else:
                    # Re-initialize change state
                    seg.decay_permanences()
                    seg.syn_last_change = [0 for x in seg.syn_last_change]


        log("Distal: +%d/-%d (%d connected, %d disconnected)" % (n_increased_dist, n_decreased_dist, n_conn_dist, n_discon_dist))

        n_boosted = 0
        all_field_sizes = []
        for i, cell in enumerate(self.cells):
            neighbors = self._neighbors_of(cell)
            min_duty_cycle = 0.01 * self._max_duty_cycle(neighbors) # Based on active duty
            cell_active = activating[i]
            sufficient_overlap = self.overlap[i] > self.brain.min_overlap
            cell.update_duty_cycles(active=cell_active, overlap=sufficient_overlap)
            if DO_BOOSTING and self.brain.t > T_START_BOOSTING:
                self.boost[i] = self._boost_function(i, min_duty_cycle)  # Updates boost value for cell (higher if below min)

                # Check if overlap duty cycle less than minimum (note: min is calculated from max *active* not overlap)
                if self.overlap_duty_cycle[i] < min_duty_cycle:
                    # log("Increasing permanences for cell %s in region %d due to overlap duty cycle below min: %s" % (i, self.index, min_duty_cycle))
                    self._increase_permanences(i, 0.1 * CONNECTED_PERM, type="proximal")
                    n_boosted += 1

                # TODO: Boost distal here if active_duty_cycle low?

            all_field_sizes.append(self.cells[i].connected_receptive_field_size())

        if n_boosted:
            log("Boosting %d due to low overlap duty cycle" % n_boosted)

        # Update inhibition radius (based on updated active connections in each column)
        self.inhibition_radius = util.average(all_field_sizes) * INHIBITION_RADIUS_DISCOUNT
        min_positive_radius = 1.0
        if self.inhibition_radius and self.inhibition_radius < min_positive_radius:
            self.inhibition_radius = min_positive_radius
exact_x = np.array([39.0 / 40, -13.0 / 40, 12.0 / 40])

for i in range(100):
    u = np.random.uniform(-10, 10, 3)
    gs_x_n, gs_iteration, gs_error = iterative_methods.gauss_seidel(A_b_aug, u, e, m)
    j_x_n, j_iteration, j_error = iterative_methods.jacobi(A_b_aug, u, e, m)
    gs_results.append(gs_x_n)
    gs_iterations.append(gs_iteration)

    initial_errors.append(util.norm_inf(u - exact_x))

    jacobi_results.append(j_x_n)
    jacobi_iterations.append(j_iteration)

gs_approx_x = util.average(gs_results)
jacobi_approx_x = util.average(jacobi_results)

gs_error = util.norm_inf(gs_approx_x - exact_x)
jacobi_error = util.norm_inf(jacobi_approx_x - exact_x)

jacobi_gs_ratios = [float(j) / gs for j, gs in zip(gs_iterations, jacobi_iterations)]
average_ratio = sum(jacobi_gs_ratios) / len(jacobi_gs_ratios)

for error, gs_iteration, j_iteration in zip(initial_errors, gs_iterations, jacobi_iterations):
    print('%f, %d, %d' % (error, gs_iteration, j_iteration))

print('')

print('Average Gauss-Seidel Error: %f' % gs_error)
print('Average Jacobi Error: %f' % jacobi_error)
示例#43
0
 def average_response_time(self):
     result = 0
     if self.runs.count() > 0:
         query = [run.response_time for run in self.runs]
         result = util.average(query)
     return result
示例#44
0
def run(window):
    timer = pygame.time.Clock()
    width = window.get_width()
    height = window.get_height()

    font = pygame.freetype.SysFont('Comic Sans MS', 18)

    boundary = Rectangle((0, 0), (width, height))

    bodies = []

    using_quadtree = False
    for _ in range(0, 10):
        bodies.append(
            Body([random_zero_to_max(width),
                  random_zero_to_max(height)], random(2, 10)))
    _, _, total_energy = calculate_energy(bodies)
    running = True
    frame_rate = []
    while running:
        window.fill((0, 0, 0))
        dt = timer.tick()
        qtree = Quadtree(boundary, 4)

        for body in bodies:
            qtree.insert(Point([body.x[0], body.x[1]]))

        for body in bodies:
            if not body.highlight:
                if using_quadtree:
                    region = Circle([body.x[0], body.x[1]], body.r * 2 - 1)
                    others = qtree.query(region)
                    body.physics(others)
                else:
                    body.physics(bodies)
            body.render(window)
            body.move(dt)
        ajust_speeds(bodies, total_energy, 0.001, 1)

        if len(frame_rate) > 10:
            frame_rate.pop(0)
        frame_rate.append(1000 / dt)

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                running = False

            if event.type == pygame.MOUSEBUTTONUP:
                using_quadtree = not using_quadtree

        if using_quadtree:
            text_render, _ = font.render('Using QuadTree', (255, 255, 255))
        else:
            text_render, _ = font.render('Not Using QuadTree', (255, 255, 255))
        rect = text_render.get_rect(center=(400 + 100 / 2, 300 + 20 / 2))
        window.blit(text_render, rect)
        text_render, _ = font.render(
            '{} FPS'.format(round(average(frame_rate), 1)), (255, 255, 255))
        rect = text_render.get_rect(center=(400 + 100 / 2, 350 + 20 / 2))
        window.blit(text_render, rect)

        pygame.display.update()
示例#45
0
def get_features(interval_data, files, persistent_data, path):
    # Given the telemetry logs in a time window, get features
    data = {'levels': 0, 'me': 0, 'uploaded': 0,'seq':[], 'feature_vectors':[] }
    feature_vector = dict( [(i, 0) for i in get_feature_labels()] )
  
    ir_coord = ( ) # BeginReposition
    fr_coord = ( ) # EndReposition 

    il_coord = ( ) # BeginLink
    fl_coord = ( ) # LinkTo

    id_coord = ( ) # startDrag
    fd_coord = ( ) # endDrag

    #feature_vector['level'] = "slice"
    feature_vector['tutorial_time'] = 0.0

    if len(interval_data) == 0:
        return feature_vector
 
    date_, name_, data_, time_, x_, y_ = interval_data[0].split("\t")
    s_ = datetime.datetime.strptime(date_,'%m-%d-%y-%H-%M-%S')
    s_ = (s_ - epoch).total_seconds() 

    if not persistent_data['start_time']:
        persistent_data['start_time'] = s_

    feature_vector['start_date'] = date_
    feature_vector['start_time'] = float(s_ - persistent_data['start_time'])

    feature_vector['_t_d_component_dragged_me_lst'] = []
    feature_vector['_timestamp_last_dragged_component'] = 0.0

    start_time = None
    missing_files = []

    for int_data in interval_data: 
        date_, name_, data_, time_, x_, y_ = int_data.split("\t")
        s_ = datetime.datetime.strptime(date_,'%m-%d-%y-%H-%M-%S')
        s_ = (s_ - epoch).total_seconds()
        s_ = s_ - persistent_data['start_time']

        if name_ == 'TriggerLoadLevel':
            trackUsed = -1
            if data_:
                if data_.startswith('l'):
                    #print "Level (doesn't have "/data" associated with it): " , d_
                    level_num = data_.replace('level', '').replace('-', '')
                    if level_num: data['seq'].append(level_num)
                    data['levels'] += 1
                else:
                    # Read the data file, get all the components. Reset only when we start a new level (excluding reset).
                    persistent_data["comp_color_map"] = {}
                    persistent_data["comp_link_map"] = {}
                    persistent_data["comp_loc_map"] = {}
                    persistent_data["direction_layout"]  = []
                    persistent_data["color_layout"] = []
                    persistent_data["cur_track"] = -1
                    persistent_data["cur_mouse_comp"] = ""
                    persistent_data["cur_mouse_time"] = 0.0
                    persistent_data['linking'] = False

                    data['me'] += 1
                    if data_ in files:
                        dfile = open(path + '/data' + "/" + files[data_][0],'r')
                        dfiledata = [ d.strip("\n") for d in dfile.readlines() ]
#                        print "File, number of files (should be 1): {0},{1}".format(files[d_][0],len(files[d_]))
                        # Find components section, and get components
                        startParse = False
                        height = 0
                        width = 0
                        incr = 0
                        for line in dfiledata:
                            if line.startswith("board_width"):
                                width = int(line.split("\t")[1])
                            if line.startswith("board_height"):
                                height = int(line.split("\t")[1])
                            if line.startswith('DIRECTIONS'):
                                for i in range(incr+1,incr+height+1):
                                    persistent_data['direction_layout'].append(dfiledata[i])
                            if line.startswith('COLORS'):
                                for i in range(incr+1,incr+height+1):
                                    persistent_data['color_layout'].append(dfiledata[i])
                            if line.startswith('COMPONENTS'): 
                                startParse = True 
                            else:
                                if startParse:
                                    if len(line) == 0: # End of components section
                                        startParse = False 
                                    else:
                                        tmp = line.split("\t")
                                        compData = ast.literal_eval(tmp[6])
                                        if "color" in compData.keys():
                                            # CompID -> color
                                            persistent_data['comp_color_map'][tmp[0]] = int(compData["color"])
                                        if "link" in compData.keys():                             
                                            # CompID -> LinkedID
                                            persistent_data['comp_link_map'][tmp[0]] = str(compData["link"])
                            incr += 1
                        dfile.close()
                        data['uploaded'] += 1
                    else:
                        missing_files.append(d_)
            else:
                data['seq'].append('R')
                feature_vector['me_replays'] += 1
        elif name_ == 'endTutorial':
            feature_vector['tutorial_time'] = (float(s_) - feature_vector['start_time']) / 60.0
        elif name_ == 'SubmitCurrentLevelPlay':
            data['seq'].append('T')
            feature_vector['me_tests']+=1
            if feature_vector['_timestamp_last_dragged_component']:
                feature_vector['_t_d_component_dragged_me_lst'].append(('tests',float(s_)-feature_vector['_timestamp_last_dragged_component']))
        elif name_ =='SubmitCurrentLevelME':
            data['seq'].append('S')
            feature_vector['me_submissions'] += 1
            if feature_vector['_timestamp_last_dragged_component']:
                feature_vector['_t_d_component_dragged_me_lst'].append(('submissions',float(s_)-feature_vector['_timestamp_last_dragged_component']))
        elif name_ == 'tooltip':
            feature_vector['tooltips'] += 1
            feature_vector['tooltip_'+data_] = feature_vector.get('tooltip_'+data_,0)+1
        elif name_ == 'startDrag':
            feature_vector['dragged'] += 1
            feature_vector['dragged_'+data_] = feature_vector.get('dragged_'+data_,0)+1
            feature_vector['_timestamp_last_dragged_component'] = float(s_)
            # Coordinates for Start Drag 
            id_coord = ( float(x_), float(y_) )
        elif name_ == "endDrag":
            # Coordinates for End Drag
            fd_coord = ( float(x_), float(y_) )
            # Euclidean distance 
            dist = 0.0
            if ( len(id_coord) != 0 ): 
                dist = math.sqrt( math.pow((fd_coord[0]-id_coord[0]),2.0) + math.pow((fd_coord[1]-id_coord[1]),2.0) )
            else: 
                dist = 0.0
            id_coord = ()
            fd_coord = ()
            feature_vector['total_dragged_components_dist'] += dist
        elif name_ == 'Destroying':
            feature_vector['trashed'] += 1
            feature_vector['trashed_'+data_] = feature_vector.get('trashed_'+data_,0)+1
        elif name_ == 'OnHoverBehavior':
            # This implies that we are on a track...but we hover on an object though..?
            feature_vector['hover'] += 1
            feature_vector['hover_'+data_] = feature_vector.get('hover_'+data_,0)+1
        elif name_ == "BeginReposition":
            ir_coord = ( float(x_), float(y_) )
        elif name_ == "EndReposition":
            # Check to see if we swap threads. 
            if len(persistent_data['cur_mouse_comp']) == 0:
                continue;
            tmp = persistent_data['cur_mouse_comp'].split("/")
            compID = tmp[1]
            if tmp[1] not in persistent_data['comp_color_map'].keys(): 
                # Component was thrown away. 
                 continue

            feature_vector['num_tracks_used'] += 1
            color = persistent_data['comp_color_map'][tmp[1]]
            for c in persistent_data['comp_color_map'].keys():
                if color == persistent_data['comp_color_map'][c]:
                    feature_vector['total_components'] += 1 

            fr_coord = ( float(x_), float(y_) )
            # Euclidean distance ( don't actually use this, but keeping this in here for future use )
            #dist = 0.0
            #if ( len(ir_coord) != 0 ): 
            #    dist = math.sqrt( math.pow((fr_coord[0]-ir_coord[0]),2.0) + math.pow((fr_coord[1]-ir_coord[1]),2.0) )
            #else: 
            #    dist = 0.0
            ir_coord = ( )
            fr_coord = ( )
        elif name_ == "BeginLink":
            # Before we even link, we need to be over a component
            if len(persistent_data['cur_mouse_comp']) == 0:
                continue;
            tmp = persistent_data['cur_mouse_comp'].split("/")
            compID = tmp[1]
            if tmp[1] not in persistent_data['comp_color_map'].keys(): 
                # Component was thrown away. 
                continue

            feature_vector['num_tracks_used'] += 1
            color = persistent_data['comp_color_map'][tmp[1]]
            for c in persistent_data['comp_color_map'].keys():
                if color == persistent_data['comp_color_map'][c]:
                    feature_vector['total_components'] += 1 

            il_coord = ( float(x_), float(y_) ) 
        elif name_ == "LinkTo":
            persistent_data['linking'] = True
            fl_coord = ( float(x_), float(y_) ) 
            dist = 0.0
            if ( len(il_coord) != 0 ): 
                dist = math.sqrt( math.pow((fl_coord[0]-il_coord[0]),2.0) + math.pow((fl_coord[1]-il_coord[1]),2.0) )
            else: 
                dist = 0.0
            il_coord = ( )
            fl_coord = ( )
        elif name_ == "OnMouseComponent":
            feature_vector['num_mouse_on_comp'] += 1
            persistent_data['cur_mouse_comp'] = data_

            # Get current time..
            persistent_data['cur_mouse_time'] = float(time_)

            # Keep a list of components IDs and their corresponding locations
            y_ = y_.strip("\n")
            tmp = data_.split("/")
            persistent_data['comp_loc_map'][tmp[1]] = (float(x_),float(y_)) 

            if ( len(persistent_data['comp_color_map'].keys()) != 0 and tmp[1] in persistent_data['comp_color_map'] ): 
                if persistent_data['cur_track'] == -1:
                    persistent_data['cur_track'] = persistent_data['comp_color_map'][tmp[1]]
                else:
                    # Check to see if we changed tracks here
                    if persistent_data['cur_track'] != persistent_data['comp_color_map'][tmp[1]]:
                        feature_vector['num_track_changes'] += 1
                        persistent_data['cur_track'] = persistent_data['comp_color_map'][tmp[1]]
                    else:
                        # Staying on the same track
                        feature_vector['num_track_no_changes'] += 1
            if persistent_data['linking']:
                if ( len(persistent_data['comp_color_map'].keys()) != 0 ): 
                    tmp = data_.split("/")  
                    if ( tmp[1] in persistent_data['comp_color_map'] ):
                        feature_vector['num_tracks_used'] += 1
                        color = persistent_data['comp_color_map'][tmp[1]]
                        for c in persistent_data['comp_color_map'].keys():
                            if color == persistent_data['comp_color_map'][c]:
                                feature_vector['total_components'] += 1 
                persistent_data['linking'] = False  
        elif name_ == "OutMouseComponent":
            endTime = float(time_)
            feature_vector['total_time_on_component'] += (endTime - persistent_data['cur_mouse_time'])
        elif name_ == 'ToggleConnectionVisibility' and data_ == 'True':
            feature_vector['connection_visibility'] += 1
        elif name_ == 'ToggleFlowVisibility' and data_ == 'True':
            feature_vector['flow_visibility'] += 1
        elif name_ == 'LockFlowVisibility' and data_ == 'True':
            feature_vector['flow_tooltip'] += 1
        elif name_ == 'TriggerGoalPopUp':
            if 'Successfully' in data_:
                feature_vector['popup_success'] += 1
            elif 'starvation' in data_:
                feature_vector['popup_error_starvation'] += 1
                feature_vector['popup_error'] += 1
            elif 'dead' in data_:
                feature_vector['popup_error_deadend'] += 1
                feature_vector['popup_error'] += 1
            elif 'deliveries' in data_:
                feature_vector['popup_error_delivery'] += 1
                feature_vector['popup_error'] += 1
            else:
                feature_vector['popup_error_badgoals'] += 1
                feature_vector['popup_error'] += 1

    # Once we have searched this, finalize feature_vector
    last_line = interval_data[-1]
    t_, e_, d_, s_, _ = last_line.split('\t', 4)

    s_ = datetime.datetime.strptime(t_, '%m-%d-%y-%H-%M-%S')
    s_ = (s_ - epoch).total_seconds()
    s_ = s_ - persistent_data['start_time']

    feature_vector['end_date'] = t_
    feature_vector['total_time'] = (float(s_) - feature_vector['start_time']) #/ 60.0
    
    # NOTE: This should never occur
    if feature_vector['total_time'] < 0:
        print "start_time, s_: {0},{1}".format(feature_vector['start_time'],s_)
        print interval_data

    me_total = 0.0
    for i in ['tests','submissions','replays']:
        me_total += feature_vector['me_'+i]
    if feature_vector['total_time']:
        feature_vector['r_me_per_minute'] = me_total/(feature_vector['total_time']/60.0)
    for i in ['tests', 'submissions', 'replays']:
        if feature_vector['total_time']:
            feature_vector['r_me_'+i+'_per_minute'] = 1.0*feature_vector['me_'+i] / (feature_vector['total_time'] / 60.0)

            feature_vector['r_num_tracks_used_per_minute'] = 1.0*feature_vector['num_tracks_used'] / (feature_vector['total_time'] / 60.0)
            feature_vector['r_num_track_no_changes_per_minute'] = 1.0*feature_vector['num_track_no_changes'] / (feature_vector['total_time'] / 60.0)
            feature_vector['r_num_track_changes_per_minute'] = 1.0*feature_vector['num_track_changes'] / (feature_vector['total_time'] / 60.0)
            feature_vector['r_mouse_on_comp_per_minute'] = 1.0*feature_vector['num_mouse_on_comp'] / (feature_vector['total_time'] / 60.0)
            feature_vector['r_dragged_components_per_minute'] = 1.0*feature_vector['dragged'] / ( feature_vector['total_time'] / 60.0 )
            feature_vector['r_hover_components_per_minute'] = 1.0*feature_vector['hover'] / ( feature_vector['total_time'] / 60.0 )

        if feature_vector['num_mouse_on_comp'] != 0:
            feature_vector['avg_time_on_component'] = 1.0*feature_vector['total_time_on_component'] / ( 1.0*feature_vector['num_mouse_on_comp'] )
        if feature_vector['dragged'] != 0:
            feature_vector['avg_dragged_components_dist'] = 1.0*feature_vector['total_dragged_components_dist'] / ( 1.0*feature_vector['dragged'] )
        if feature_vector['num_tracks_used'] != 0:
            feature_vector['tracks_used_components_per_track_avg'] = 1.0*feature_vector['total_components'] / ( 1.0*feature_vector["num_tracks_used"] )

        if me_total:
            feature_vector['r_me_' + i] = 1.0*feature_vector['me_' + i] / me_total
    for i in ['_tests', '_submissions','']:
        lst_ = []
        for j in feature_vector['_t_d_component_dragged_me_lst']:
            if j[0]==i or not i:
                lst_.append(j[1])
        if lst_:
            feature_vector['t_d_component_dragged_me' + i + '_avg'] = util.average(lst_)
            feature_vector['t_d_component_dragged_me' + i + '_min'] = min(lst_)

    return feature_vector
示例#46
0
 def test_single_list(self):
     self.assertEqual(5, average([5]))
     self.assertEqual(0, average([0]))