def __checkPolygoneItersection(self) :
        self._matchPoints = None
        if self.__gridPoints is None : return

        includePolygone,excludePolygone = [],[]
        for item in self.__getIterator() :
            points = item.drawingManager.points()
            if item.includeMode : includePolygone.append(points)
            else: excludePolygone.append(points)

        pointGrid = self.__gridPoints.tolist()
        allInPoints = []
        if includePolygone :
            for points in includePolygone:
                pInPoly = polygone.points_inclusion(pointGrid,points,False)
                if not allInPoints :
                    allInPoints = pInPoly
                else:
                    allInPoints = [x > 0 or y > 0 for x,y in itertools.izip(pInPoly,allInPoints)]

        if not allInPoints :
            allInPoints = [True for x in range(len(self.__gridPoints))]
            
        for points in excludePolygone:
            pInPoly = polygone.points_inclusion(pointGrid,points,False)
            allInPoints = [x > 0 and not y > 0 for x,y in itertools.izip(allInPoints,pInPoly)]
        self._matchPoints = allInPoints
        self.__refreshInfoText()
Exemplo n.º 2
0
    def ensure_batch(self, keys, computed_list=False):
        """Ensure that the given keys are present in the cache.

        If a key is not present, its entry will be computed.

        Args:
            keys (list): a list of keys
            computed_list (bool): defaults to False. See Returns description.

        Returns:
            if computed_list:
                list(bool): a list of booleans indicating which keys were freshly computed (may include failed computations)
            else:
                int: the number of keys which were freshly computed
        """
        presence = self.cache.contains_batch(keys)
        to_compute = [key for key, present in izip(keys, presence) if not present]
        computed = self.compute_batch(to_compute)

        updates = []
        for key, val in izip(to_compute, computed):
            if not isinstance(val, Failure):
                updates.append((key, val))

        self.cache.set_batch(updates)

        if computed_list:
            return [not p for p in presence]

        return len([p for p in presence if not p])
Exemplo n.º 3
0
def combined(c1, exp1, offset1, c2, exp2, offset2, width=500, height=500, real_min=-2.0, real_max=2.0, imag_min=-2.0, imag_max=2.0, pickColor=julia.timeBased, allPerlin = False):
	
	# Generate evenly spaced values over real and imaginary ranges
	real_range = numpy.arange(real_min, real_max, (real_max - real_min) / width)
	imag_range = numpy.arange(imag_max, imag_min, (imag_min - imag_max) / height)
	
	# Obtain image to work with
	image = Image.new('RGB', (width, height), (0, 0, 0))
	drawer = ImageDraw.Draw(image)
	
	# Generate pixel values
	for imag, ipix in itertools.izip(imag_range, range(height)):
		for real, rpix in itertools.izip(real_range, range(width)):
			z = complex(real, imag) + offset1
			n = 255
			while abs(z) < 10 and n >= 5:
				z = z ** exp1 + c1
				n -= 5
			m = 255
			z = (complex(real, imag) + offset2) * 2
			while abs(z) < 10 and n >= 5:
				z = z ** exp2 + c2
				n -= 5
			n = n - (m * 5)
			n = n % 255
			drawer.point((ipix, rpix), fill=pickColor(n, 0, real, imag)) # n varies between 255 and 5
	
	#time.increase()
	
	# And return results
	return image
Exemplo n.º 4
0
def adam(parameters, gradients, learning_rate=0.001, moment1_decay=0.9, moment2_decay=0.999, epsilon=1e-8, P=None):
    shapes = get_shapes(parameters)
    P.t = np.float32(1)

    moment1_acc = [
            create_param(P, "moment1_" + p.name, np.zeros(s))
              for p, s in izip(parameters, shapes)
            ]

    moment2_acc = [
            create_param(P, "moment2_" + p.name, np.zeros(s))
              for p, s in izip(parameters, shapes)
            ]

    deltas = []
    updates = []
    updates.append((P.t,P.t + 1))
    for m1,m2,g in izip(moment1_acc,moment2_acc,gradients):
        new_m1 = moment1_decay * m1 + (1 - moment1_decay) * g
        new_m2 = moment2_decay * m2 + (1 - moment2_decay) * T.sqr(g)
        bc_m1 = new_m1 / (1 - moment1_decay**P.t)
        bc_m2 = new_m2 / (1 - moment2_decay**P.t)
        delta = learning_rate * bc_m1 / (T.sqrt(bc_m2) + epsilon)

        deltas.append(delta)
        updates.append((m1,new_m1))
        updates.append((m2,new_m2))

    return deltas, updates
Exemplo n.º 5
0
    def resolve_columns(self, row, fields=()):
        """
        This routine is necessary so that distances and geometries returned
        from extra selection SQL get resolved appropriately into Python
        objects.
        """
        values = []
        aliases = self.extra_select.keys()

        # Have to set a starting row number offset that is used for
        # determining the correct starting row index -- needed for
        # doing pagination with Oracle.
        rn_offset = 0
        if SpatialBackend.oracle:
            if self.high_mark is not None or self.low_mark: rn_offset = 1
        index_start = rn_offset + len(aliases)

        # Converting any extra selection values (e.g., geometries and
        # distance objects added by GeoQuerySet methods).
        values = [self.convert_values(v, self.extra_select_fields.get(a, None))
                  for v, a in izip(row[rn_offset:index_start], aliases)]
        if SpatialBackend.oracle:
            # This is what happens normally in OracleQuery's `resolve_columns`.
            for value, field in izip(row[index_start:], fields):
                values.append(self.convert_values(value, field))
        else:
            values.extend(row[index_start:])
        return values
Exemplo n.º 6
0
    def convert(self, dat, argument=None):
        if argument is None:
            argument = swig_paddle.Arguments.createArguments(0)
        assert isinstance(argument, swig_paddle.Arguments)
        argument.resize(len(self.input_types))

        scanners = [
            DataProviderConverter.create_scanner(i, each_type)
            for i, each_type in enumerate(self.input_types)
        ]

        for each_sample in dat:
            for each_step, scanner in itertools.izip(each_sample, scanners):
                scanner.pre_scan(each_step)

        for scanner in scanners:
            scanner.finish_pre_scan(argument)

        for each_sample in dat:
            for each_step, scanner in itertools.izip(each_sample, scanners):
                scanner.scan(each_step)

        for scanner in scanners:
            scanner.finish_scan(argument)

        return argument
Exemplo n.º 7
0
    def compute_errors(self, next_unit_errors, desired_output, outputs, next_unit_weights, activation_derivative):
        """
        :Raises NpyTransferFunctionError:
            If ErrorLinear is used in an output `Unit`.
        """
        if next_unit_weights == None:
            raise NpyTransferFunctionError, 'ErrorLinear cannot be used in an output unit.'
        
        # Pre-allocate the error_sum list so that we can loop on it
        error_sum = []
        for i in range(len(next_unit_weights[0])):
            error_sum.append(0)

        # Compute the error_sum values
        for nexterror, weights in itertools.izip(next_unit_errors, next_unit_weights):
            for weight, error_sum_id in itertools.izip(weights, range(len(error_sum))): 
                error_sum[error_sum_id] = error_sum[error_sum_id] + nexterror * weight 

        # Multiply by the derivative of the activation function
        # to compute the final value
        errors = [] 
        for currenterror, computed in itertools.izip(error_sum, outputs):
            errors.append(activation_derivative(computed) * currenterror)

        return errors
Exemplo n.º 8
0
 def inflect(self, testing_prefix, dp_weight=0.5):
     """Return a list containing inflected versions of the sentences
     described by the files under *testing_prefix*."""
     lr_weight = 1 - dp_weight
     inflected = []
     with utf8open(testing_prefix + ".lemma") as lemma_file, utf8open(testing_prefix + ".tree") as tree_file:
         for lemma_line, tree_line in izip(lemma_file, tree_file):
             l_sentence = lemma_line.split()
             tree = DepTree(tree_line)
             ngrams = dep_ngrams(2, l_sentence, l_sentence, tree)  # not used here
             forms = []
             last_lemma = None
             for lemma, dep_ngram in izip(l_sentence, ngrams):
                 if not self.inflections[lemma]:
                     # We've never seen this lemma before, so just
                     # output it as-is and move on.
                     forms.append(lemma)
                     continue
                 best_form = None
                 best_score = float("-inf")
                 for form in self.inflections[lemma]:
                     if last_lemma is None:
                         context = [""]
                     else:
                         context = ["{}~{}".format(last_lemma, forms[-1])]
                     score = lr_weight * self.lr_model.prob(
                         "{}~{}".format(lemma, form), context
                     ) + dp_weight * self.dp_model.prob(form, dep_ngram[:-1])
                     if score > best_score:
                         best_form = form
                         best_score = score
                 forms.append(best_form)
                 last_lemma = lemma
             inflected.append(" ".join(forms))
     return inflected
Exemplo n.º 9
0
def check(process_output, judge_output, precision, **kwargs):
    from itertools import izip
    process_lines = filter(None, process_output.split('\n'))
    judge_lines = filter(None, judge_output.split('\n'))
    if len(process_lines) != len(judge_lines):
        return False
    epsilon = 10 ** -int(precision)
    try:
        for process_line, judge_line in izip(process_lines, judge_lines):
            process_floats = process_line.split()
            judge_floats = judge_line.split()
            for process_token, judge_token in izip(process_floats, judge_floats):
                try:
                    judge_float = float(judge_token)
                except:
                    if process_token != judge_token:
                        return False
                else:
                    process_float = float(process_token)
                    if abs(process_float - judge_float) > epsilon and \
                            (abs(judge_float) < epsilon or abs(1.0 - process_float / judge_float) > epsilon):
                        return False
    except:
        return False
    return True
Exemplo n.º 10
0
    def test_izip(self):
        import itertools

        it = itertools.izip()
        raises(StopIteration, it.next)

        obj_list = [object(), object(), object()]
        it = itertools.izip(obj_list)
        for x in obj_list:
            assert it.next() == (x, )
        raises(StopIteration, it.next)
        
        it = itertools.izip([1, 2, 3], [4], [5, 6])
        assert it.next() == (1, 4, 5)
        raises(StopIteration, it.next)
        
        it = itertools.izip([], [], [1], [])
        raises(StopIteration, it.next)

        # Up to one additional item may be consumed per iterable, as per python docs
        it1 = iter([1, 2, 3, 4, 5, 6])
        it2 = iter([5, 6])
        it = itertools.izip(it1, it2)
        for x in [(1, 5), (2, 6)]:
            assert it.next() == x
        raises(StopIteration, it.next)
        assert it1.next() in [3, 4]
Exemplo n.º 11
0
    def define(self, *names, **kwargs):
        """Define variable in the problem

        Variables must be defined before they can be accessed by var() or set().
        This function takes keyword arguments lower and upper to define the
        bounds of the variable (default: -inf to inf). The keyword argument types
        can be used to select the type of the variable (Only Continuous is suported).
        """

        names = tuple(names)
        lower = kwargs.get('lower', None)
        upper = kwargs.get('upper', None)
        vartype = kwargs.get('types', None)

        # Repeat values if a scalar is given
        if lower is None or isinstance(lower, numbers.Number):
            lower = repeat(lower, len(names))
        if upper is None or isinstance(upper, numbers.Number):
            upper = repeat(upper, len(names))
        if vartype is None or vartype in (VariableType.Continuous, VariableType.Binary,
                                            VariableType.Integer):
            vartype = repeat(vartype, len(names))

        lp_names = tuple(next(self._var_names) for name in names)

        # Assign default values
        vartype = (VariableType.Continuous if value is None else value for value in vartype)

        self._variables.update(izip(names, lp_names))
        for name, lower, upper, t in izip(lp_names, lower, upper, vartype):
            if t != VariableType.Continuous:
                raise ValueError('Solver does not support non-continuous types')
            self._p.add_variable(0, lower, upper, name)
Exemplo n.º 12
0
def gen_irregular_axes(axes_prop, spacing=0.05, offset=0.05, axis='vert', share=False):
    if not axis in ['horiz', 'vert']:
        raise Exception("'axis' must be either 'horiz' or 'vert'")

    axes_prop = np.array(axes_prop).ravel()
    axes_prop = axes_prop[::-1]
    axes_prop /= sum(axes_prop) 
    axes_prop *= (1-offset*2)
    axes_starts = np.r_[0, np.cumsum(axes_prop)]

    if axis=='vert':
        stuff = [ (s, e-s-spacing) for s, e in izip(axes_starts[:-1], axes_starts[1:])]
        rectangles = [ [spacing, offset+ystart, 1-spacing*2, height] for ystart, height in stuff]
    else:
        stuff = [ (s, e-s-spacing) for s, e in izip(axes_starts[:-1], axes_starts[1:])]
        rectangles = [ [ystart, spacing, height, 1-spacing] for ystart, height in stuff]

    axes = [None]*len(rectangles)
    for k,r in enumerate(rectangles):
        if share and k > 0 and axis == 'vert':
            axes[k] = plt.axes(r, sharex=axes[0])
        elif share and k > 0 and axis=='horiz':
            axes[k] = plt.axes(r, sharey=axes[0])
        else:
            axes[k] = plt.axes(r)

        if k > 0 and share and axis=='vert':
            #axes[k].set_xticks([])
            #axes[k].set_xticklabels([])
            for label in axes[k].get_xticklabels():
                label.set_visible(False)


    return axes            
Exemplo n.º 13
0
 def unpack(self, buf):
     """Unpack packet header fields from buf, and set self.data."""
     for k, v in itertools.izip(self.__hdr_fields__, struct.unpack(self.__hdr_fmt__, buf[: self.__hdr_len__])):
         setattr(self, k, v)
     for k, v in itertools.izip(self.__hdr_fields__, self.__hdr_names__):
         setattr(self, k + "_name", v)
     self.data = buf[self.__hdr_len__ :]
def spike_find(input_array, t, max_spike_width):
    """
    Find the spikes in the input_array.
    Inputs:
        input_array              : a numpy array (1-dimensional) holding 
                                   floats.
        t                        : threshold for spike detection
        max_spike_width          : crossings further apart than this will 
                                   disqualify the spike
    Returns:
        spikes                   : a numpy array (1-dimensional) holding
                                   integers (spike index values)
    """
    crossings = fast_thresh_detect(input_array, threshold=t)
    spikes = []
    if len(crossings) > 1:
        if t > 0.0:
            # find first positive crossing then pair up crossings
            first_p = numpy.argwhere(input_array[crossings] < t)[0]
            for p, n in itertools.izip(crossings[first_p::2], crossings[first_p + 1 :: 2]):
                if abs(p - n) <= max_spike_width:
                    peak_index = numpy.argsort(input_array[p : n + 1])[-1] + p
                    spikes.append(peak_index)
        else:
            # find first negative crossing then pair up crossings
            first_n = numpy.argwhere(input_array[crossings] > t)[0]
            for n, p in itertools.izip(crossings[first_n::2], crossings[first_n + 1 :: 2]):
                if abs(p - n) <= max_spike_width:
                    peak_index = numpy.argsort(input_array[n : p + 1])[0] + n
                    spikes.append(peak_index)
    return numpy.array(spikes)
Exemplo n.º 15
0
def show_top(model):
    top_prefixes = heapq.nlargest(10, izip(model.base.theta_p.counts, model.prefix_vocabulary))
    n_prefixes = sum(1 for c in model.base.theta_p.counts if c > 0)
    logging.info('Top prefixes (10/%d): %s', n_prefixes, ' '.join(prefix+':'+str(c) for c, prefix in top_prefixes))
    top_suffixes = heapq.nlargest(10, izip(model.base.theta_s.counts, model.suffix_vocabulary))
    n_suffixes = sum(1 for c in model.base.theta_s.counts if c > 0)
    logging.info('Top suffixes (10/%d): %s', n_suffixes, ' '.join(suffix+':'+str(c) for c, suffix in top_suffixes))
Exemplo n.º 16
0
    def compute_update(self, Alpha, index, unit, outputs, errors, weight_update, user_data_in, user_data_out): 

        vgamma  = 0.001
        vlambda = 0.1
        valpha  = 0.001
    
        eprevs     = data[0][index]
        reward     = data[1]
        outputnext = data[2]

        output = outputs[-1][0]

        es = []
        for eprev, weight_update in itertools.izip(eprevs, weight_update):
            es.append(vgamma * vlambda * eprev + weight_update * output)
        
        weights = unit.get_weights()
        next_weights = []
        for node_weights, e in itertools.izip(weights, es): 
            #print weight, outputnext, output, e
            next_node_weights = []
            for weight in node_weights:
                next_node_weights.append(weight + valpha *(reward + vgamma * outputnext[0] - output)*e)

            next_weights.append(next_node_weights)

        user_data_out.append(es)

        return next_weights
Exemplo n.º 17
0
def fill_test_cases_with_values(intermediate_variables, scenarios, simulations, use_label, variables):
    output_test_cases = []
    for scenario, simulation in itertools.izip(scenarios, simulations):
        if intermediate_variables:
            holders = []
            for step in simulation.traceback.itervalues():
                holder = step['holder']
                if holder not in holders:
                    holders.append(holder)
        else:
            holders = [
                simulation.get_holder(variable)
                for variable in variables
                ]
        test_case = scenario.to_json()['test_case']
        for holder in holders:
            variable_value_json = holder.to_value_json(use_label = use_label)
            if variable_value_json is None:
                continue
            variable_name = holder.column.name
            entity_members = test_case[holder.entity.key_plural]
            if isinstance(variable_value_json, dict):
                for entity_member_index, entity_member in enumerate(entity_members):
                    entity_member[variable_name] = {
                        period: array_json[entity_member_index]
                        for period, array_json in variable_value_json.iteritems()
                        }
            else:
                for entity_member, cell_json in itertools.izip(entity_members, variable_value_json):
                    entity_member[variable_name] = cell_json
        output_test_cases.append(test_case)
    return output_test_cases
Exemplo n.º 18
0
    def collect(self):
        """
        After adding the desired aggregation columns, `collect`
        finalizes the groupby operation by converting the
        GroupbyTable into a DataTable.

        The first columns of the resulting table are the groupfields,
        followed by the aggregation columns specified in preceeding
        `agg` calls.
        """
        # The final order of columns is determined by the
        # group keys and the aggregation columns
        final_field_order = list(self.__groupfields) + self.__grouptable.fields

        # Transform the group key rows into columns
        col_values = izip(*self.__grouptable['groupkey'])

        # Assign the columns to the table with the relevant name
        for groupfield, column in izip(self.__groupfields, col_values):
            self.__grouptable[groupfield] = column

        # Reorder the columns as defined above
        self.__grouptable.reorder(final_field_order)

        del self.__grouptable['groupkey']
        return self.__grouptable
Exemplo n.º 19
0
def nsmallest(n, iterable, key=None):
    """Find the n smallest elements in a dataset.

    Equivalent to:  sorted(iterable, key=key)[:n]
    """
    # Short-cut for n==1 is to use min() when len(iterable)>0
    if n == 1:
        it = iter(iterable)
        head = list(islice(it, 1))
        if not head:
            return []
        if key is None:
            return [min(chain(head, it))]
        return [min(chain(head, it), key=key)]

    # When n>=size, it's faster to use sorted()
    try:
        size = len(iterable)
    except (TypeError, AttributeError):
        pass
    else:
        if n >= size:
            return sorted(iterable, key=key)[:n]

    # When key is none, use simpler decoration
    if key is None:
        it = izip(iterable, count())                        # decorate
        result = _nsmallest(n, it)
        return map(itemgetter(0), result)                   # undecorate

    # General case, slowest method
    in1, in2 = tee(iterable)
    it = izip(imap(key, in1), count(), in2)                 # decorate
    result = _nsmallest(n, it)
    return map(itemgetter(2), result)                       # undecorate
Exemplo n.º 20
0
def _group_arguments(tokens, predicate_positions, boundaries, labels):
    """
    Groups words pertaining to each argument and returns a dictionary for each predicate.
    """
    arg_structs = []
    
    for predicate_position, pred_boundaries, pred_labels in izip(predicate_positions,
                                                                 boundaries,
                                                                 labels):
        structure = {}

        for token, boundary_tag in izip(tokens, pred_boundaries):
            if boundary_tag == 'O':
                continue

            elif boundary_tag == 'B':
                argument_tokens = [token]

            elif boundary_tag == 'I':
                argument_tokens.append(token)

            elif boundary_tag == 'E':
                argument_tokens.append(token)
                tag = pred_labels.pop(0)
                structure[tag] = argument_tokens

            else:
                # boundary_tag == 'S'
                tag = pred_labels.pop(0)
                structure[tag] = [token]

        predicate = tokens[predicate_position]
        arg_structs.append((predicate, structure))

    return arg_structs
Exemplo n.º 21
0
    def get_builds_for_commits(self, commits, project, all_builds):
        builds_qs = list(Build.query.options(
            joinedload('author'),
            contains_eager('source'),
        ).join(
            Source, Source.id == Build.source_id,
        ).filter(
            Build.source_id == Source.id,
            Build.project_id == project.id,
            Build.status.in_([Status.finished, Status.in_progress, Status.queued]),
            Source.repository_id == project.repository_id,
            Source.revision_sha.in_(c['id'] for c in commits),
            *build_type.get_any_commit_build_filters()
        ).order_by(Build.date_created.asc()))

        if not all_builds:
            # this implicitly only keeps the last build for a revision
            return dict(
                (b.source.revision_sha, d)
                for b, d in itertools.izip(builds_qs, self.serialize(builds_qs))
            )
        else:
            builds_map = defaultdict(list)
            for b, d in itertools.izip(builds_qs, self.serialize(builds_qs)):
                builds_map[b.source.revision_sha].append(d)
            return dict(builds_map)
Exemplo n.º 22
0
    def _compute_convex_hull(self):
        """Extract the convex hull from the triangulation information.

        The output will be a list of point_id's in counter-clockwise order
        forming the convex hull of the data set.
        """
        border = (self.triangle_neighbors == -1)

        edges = {}
        edges.update(dict(izip(self.triangle_nodes[border[:, 0]][:, 1],
                               self.triangle_nodes[border[:, 0]][:, 2])))
        edges.update(dict(izip(self.triangle_nodes[border[:, 1]][:, 2],
                               self.triangle_nodes[border[:, 1]][:, 0])))
        edges.update(dict(izip(self.triangle_nodes[border[:, 2]][:, 0],
                               self.triangle_nodes[border[:, 2]][:, 1])))

        # Take an arbitrary starting point and its subsequent node
        hull = list(edges.popitem())
        while edges:
            hull.append(edges.pop(hull[-1]))

        # hull[-1] == hull[0], so remove hull[-1]
        hull.pop()

        return hull
Exemplo n.º 23
0
Arquivo: grammar.py Projeto: Glank/rdp
 def __remove_left_recursion__(self, head):
     rules = self.rules_by_head(head)
     betas = []
     alphas = []
     for rule in rules:
         if rule.tail[0]==head:
             betas.append(rule)
         else:
             alphas.append(rule)
     if not betas or not alphas:
         return False
     #needs recursion removed
     self.__assert_parent__()
     beta_indexes = []
     for b in xrange(len(betas)):
         rule = betas[b]
         beta_indexes.append(self.index(rule))
         betas[b] = rule.tail[1:]
     alpha_indexes = []
     for a in xrange(len(alphas)):
         rule = alphas[a]
         alpha_indexes.append(self.index(rule))
         alphas[a] = rule.tail
     z = self.__gen_nonterminal__(prefix=head.name)
     for i,alpha in izip(alpha_indexes, alphas):
         self.rules[i] = Rule(head, alpha+[z])
     for i,beta in izip(beta_indexes, betas):
         self.rules[i] = Rule(z, beta+[z])
     added_index = len(self.rules)
     self.rules.append(Rule(z, []))       
     redo = RedoLeftRecursion(added_index, alpha_indexes, beta_indexes)
     self.__commit_transform__(redo)
     return True
Exemplo n.º 24
0
def populate_foreign_key_caches(model, objects_to_populate, fields=None):
    """
    Populates caches for the given related Model in instances of objects
    which have a ForeignKey relationship to it, specified as a list of
    (object list, related attribute name list) two-tuples.

    If a list of field names is given, only the given fields will be
    looked up and related object caches will be populated with a dict of
    the specified fields. Otherwise, complete model instances will be
    retrieved.
    """
    # Get all related object ids for the appropriate fields
    related_object_ids = []
    for objects, attrs in objects_to_populate:
        related_object_ids.append(tuple(tuple(getattr(obj, '%s_id' % attr)
                                              for attr in attrs)
                                  for obj in objects))
    unique_ids = tuple(set(pk for pk in flatten(related_object_ids) if pk))
    related_objects = fetch_model_dict(model, unique_ids, fields)

    # Fill related object caches
    for (objects, attrs), related_ids in itertools.izip(objects_to_populate,
                                                        related_object_ids):
        for obj, related_ids_for_obj in itertools.izip(objects,
                                                       related_ids):
            for attr, related_object in itertools.izip(attrs, (related_objects.get(pk, None)
                                                               for pk in related_ids_for_obj)):
                setattr(obj, '_%s_cache' % attr, related_object)
Exemplo n.º 25
0
def computeConvexHull(schema, schema_graph, values_dict, curve_path):
	'''Computes the convex hull by starting with an initial hull, and then expanding
	if there are some extrusions to consider.'''
	initial_hull = computeInnerHull(schema, curve_path)
	final_hull = []
	if len([e for e in schema["Edges"].keys() if schema["Edges"][e]["Type"] == "CircularArc"]) == 0:
		for hull in initial_hull:
			final_hull.append([(schema["Vertices"][str(h)]["Position"]["X"], schema["Vertices"][str(h)]["Position"]["Y"]) 
				for h in hull])
		return final_hull
	else: 
		'''Only goes through this if there exists a circular arc.'''
		for hull in initial_hull:
			new_hull = [(schema["Vertices"][str(h)]["Position"]["X"], schema["Vertices"][str(h)]["Position"]["Y"]) 
				for h in hull]
			points = hull+[hull[0]] #let's not talk about this hack...
			for a,b in izip(points, islice(points, 1, None)):
				path = [p for p in nx.shortest_path(schema_graph, int(a), int(b))]
				arcs_on_path = [arc for arc in [edgeLookup((x,y), schema) 
					for x,y in izip(path, islice(path, 1, None))] 
					if schema["Edges"][arc]["Type"] == "CircularArc"]
				extrusions = [x for x in arcs_on_path #Dirty trick to check if an arc is an extrusion from the hull.
					if path.index(int(values_dict[x]["start_id"])) > path.index(int(values_dict[x]["finish_id"]))]
				for x in extrusions:
					new_hull.extend(values_dict[x]["box_points"])
			final_hull.append(grahams_hull(new_hull))
		return final_hull
Exemplo n.º 26
0
    def __init__ (self, data=[], columns=[], **kw):
        """
        @param data: the list of objects to be shown.

        @param columns: a list of B{Column}s that describe
            what to show in the grid, how obtain it from the
            objects, and eventually how to save data back to.
        """
        self._tv= gtk.TreeView ()

        self._columns= columns
        # build the tv columns and the data types tuple
        (self._tvcolumns, self._dataspec)= izip(*izip(
            [ gtk.TreeViewColumn (c.name) for c in columns ],
            repeat(str)
            ))
        self.data= data

        # put the TreeView in a scrolled window
        self._widget= gtk.ScrolledWindow ()
        self._widget.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
        self._widget.add (self._tv)

        # add the columns and attrs
        for i in xrange (len (columns)):
            c= self._tvcolumns[i]
            crt= gtk.CellRendererText ()
            c.pack_start (crt, True)
            c.add_attribute (crt, 'text', i)
            self._tv.append_column (c)

        self._tv.connect ('key-release-event', self._keyreleased)
        self._tv.connect ('cursor_changed', self._cursor_changed)

        super (SelectionGrid, self).__init__ (**kw)
Exemplo n.º 27
0
    def str2(self, maxlen=100000):
        include_denom = bool(self.bases_d)

        s = ''
        #numerator
        if include_denom and len(self.coefs_n)>1: s += '('
        numer_s = ['%s' % coefStr(self.coefs_n[0])]
        for (coef, base) in itertools.izip(self.coefs_n[1:], self.bases_n):
            numer_s += ['%s*%s' % (coefStr(coef), base)]
        s += ' + '.join(numer_s)
        if include_denom and len(self.coefs_n)>1: s += ')'

        #denominator
        if self.bases_d:
            s += ' / ('
            denom_s = ['1.0']
            for (coef, base) in itertools.izip(self.coefs_d, self.bases_d):
                denom_s += ['%s*%s' % (coefStr(coef), base)]
            s += ' + '.join(denom_s)
            s += ')'
            
        #change xi to actual variable names
        for var_i in xrange(len(self.varnames)-1, -1, -1):
            s = s.replace('x%d' % var_i, self.varnames[var_i])
        s = s.replace('+ -', '- ')
            
        #truncate long strings
        if len(s) > maxlen:
            s = s[:maxlen] + '...' 

        return s
Exemplo n.º 28
0
def main():

    script_dir, out_dir = get_paths()

    test_files = []
    inner_html_files = []

    if len(sys.argv) > 2:
        test_iterator = itertools.izip(
            itertools.repeat(False),
            sorted(os.path.abspath(item) for item in
                   glob.glob(os.path.join(sys.argv[2], "*.dat"))))
    else:
        test_iterator = itertools.chain(
            itertools.izip(itertools.repeat(False),
                           sorted(support.get_data_files("tree-construction"))),
            itertools.izip(itertools.repeat(True),
                           sorted(support.get_data_files(
                        os.path.join("tree-construction", "scripted")))))

    for (scripted, test_file) in test_iterator:
        input_file_name = os.path.splitext(os.path.split(test_file)[1])[0]
        if scripted:
            input_file_name = "scripted_" + input_file_name
        test_data = support.TestData(test_file)
        test_filename, inner_html_file_name = make_tests(script_dir, out_dir,
                                                         input_file_name, test_data)
        if test_filename is not None:
            test_files.append(test_filename)
        if inner_html_file_name is not None:
            inner_html_files.append(inner_html_file_name)
Exemplo n.º 29
0
def adadelta(parameters, gradients, rho=0.95, eps=1e-6):
    """
        adadelta : training algorithm
    """
    # create variables to store intermediate updates
    gradients_sq = [theano.shared(numpy.zeros(p.get_value().shape,
    							  dtype=theano.config.floatX))
    			    for p in parameters]
    deltas_sq = [theano.shared(numpy.zeros(p.get_value().shape,
    						   dtype=theano.config.floatX))
    		    for p in parameters]

    # calculates the new "average" delta for the next iteration
    gradients_sq_new = [rho*g_sq + (1-rho)*(g**2)
    				   for g_sq,g in izip(gradients_sq, gradients)]

    # calculates the step in direction. The square root is an approximation to getting the RMS for the average value
    deltas = [(T.sqrt(d_sq+eps)/T.sqrt(g_sq+eps))*grad
    		 for d_sq,g_sq,grad in izip(deltas_sq,gradients_sq_new,gradients)]

    # calculates the new "average" deltas for the next step.
    deltas_sq_new = [rho*d_sq + (1-rho)*(d**2) for d_sq,d in izip(deltas_sq,deltas)]

    # Prepare it as a list f
    gradient_sq_updates = zip(gradients_sq,gradients_sq_new)
    deltas_sq_updates = zip(deltas_sq,deltas_sq_new)
    parameters_updates = [(p,T.clip(p - d, -15,15)) for p,d in izip(parameters,deltas)]

    return gradient_sq_updates + deltas_sq_updates + parameters_updates
Exemplo n.º 30
0
 def _process_csv(self, file_):
     def unicode_rows(reader):
         for row in reader:
             try:
                 yield [cell.decode('utf-8') for cell in row]
             except UnicodeDecodeError:
                 raise forms.ValidationError(
                     'Problems with character encoding. Use UTF-8'
                 )
     reader = unicode_rows(csv.reader(file_))
     update_per_sheet = {'csv': {}}
     add_per_sheet = {'csv': []}
     name_row = next(reader)
     update = name_row[0] == 'id'
     if update:
         name_row = name_row[1:]
         for row in reader:
             asset_id = int(row[0])
             update_per_sheet['csv'].setdefault(asset_id, {})
             for key, value in it.izip(name_row, row[1:]):
                 update_per_sheet['csv'][asset_id][key] = value
     else:
         for row in reader:
             asset_data = {}
             add_per_sheet['csv'].append(asset_data)
             for key, value in it.izip(name_row, row[:]):
                 asset_data[key] = value
     names_per_sheet = {'csv': name_row}
     return names_per_sheet, update_per_sheet, add_per_sheet
Exemplo n.º 31
0
    def generate(self):
        """
            generates the positions
        """
        # Generate the initial walker positions, first checking they are within the parameter bounds.

        InputValues = []

        if self.sampler.param_legend['F_STAR10'] is True:
            InputValues.append(self.sampler.FiducialParams['F_STAR10'])

        if self.sampler.param_legend['ALPHA_STAR'] is True:
            InputValues.append(self.sampler.FiducialParams['ALPHA_STAR'])

        if self.sampler.param_legend['F_ESC10'] is True:
            InputValues.append(self.sampler.FiducialParams['F_ESC10'])

        if self.sampler.param_legend['ALPHA_ESC'] is True:
            InputValues.append(self.sampler.FiducialParams['ALPHA_ESC'])

        if self.sampler.param_legend['M_TURN'] is True:
            InputValues.append(self.sampler.FiducialParams['M_TURN'])

        if self.sampler.param_legend['t_STAR'] is True:
            InputValues.append(self.sampler.FiducialParams['t_STAR'])

        if self.sampler.param_legend['ZETA'] is True:
            InputValues.append(self.sampler.FiducialParams['ZETA'])

        if self.sampler.param_legend['MFP'] is True:
            InputValues.append(self.sampler.FiducialParams['MFP'])

        if self.sampler.param_legend['TVIR_MIN'] is True:
            InputValues.append(self.sampler.FiducialParams['TVIR_MIN'])

        if self.sampler.param_legend['L_X'] is True:
            InputValues.append(self.sampler.FiducialParams['L_X'])

        if self.sampler.param_legend['NU_X_THRESH'] is True:
            InputValues.append(self.sampler.FiducialParams['NU_X_THRESH'])

        if self.sampler.param_legend['X_RAY_SPEC_INDEX'] is True:
            InputValues.append(self.sampler.FiducialParams['X_RAY_SPEC_INDEX'])

        if self.sampler.param_legend['SIGMA_8'] is True:
            InputValues.append(self.sampler.FiducialParams['SIGMA_8'])

        if self.sampler.param_legend['littleh'] is True:
            InputValues.append(self.sampler.FiducialParams['littleh'])

        if self.sampler.param_legend['OMEGA_M'] is True:
            InputValues.append(self.sampler.FiducialParams['OMEGA_M'])

        if self.sampler.param_legend['OMEGA_b'] is True:
            InputValues.append(self.sampler.FiducialParams['OMEGA_b'])

        if self.sampler.param_legend['NS'] is True:
            InputValues.append(self.sampler.FiducialParams['NS'])

        ParamWidths = []
        for i in range(len(InputValues)):
            ParamWidths.append(
                (self.sampler.upperbounds[i] - self.sampler.lowerbounds[i]) /
                3.)

        print('Generate Start Positions')
        start_positions = [
            InputValues +
            np.random.normal(size=self.sampler.paramCount) * ParamWidths
            for i in xrange(self.sampler.nwalkers)
        ]

        pool = multiprocessing.Pool(self.sampler.threadCount)

        M = pool.map

        returned_list = list(
            M(
                ICposition_star,
                itertools.izip(start_positions,
                               itertools.repeat(self.sampler.lowerbounds),
                               itertools.repeat(self.sampler.upperbounds),
                               itertools.repeat(InputValues),
                               itertools.repeat(ParamWidths))))

        print('Start Positions Generated')
        return returned_list
Exemplo n.º 32
0
def epitope_distance(aaA, aaB):
    """Return distance of sequences aaA and aaB by comparing epitope sites"""
    epA = epitope_sites(aaA)
    epB = epitope_sites(aaB)
    distance = sum(a != b for a, b in izip(epA, epB))
    return distance
Exemplo n.º 33
0
    rowstartidx_train[1:] = np.cumsum(example_nnz_train)
    new_y_train = np.zeros(n_data_train + n_data_dev, dtype=np.int16)
    new_y_train[:n_data_train] = y_train
    new_y_train[n_data_train:] = y_dev
    y_train = new_y_train

dset = BinaryArrayDataset(all_feature_ids_train, example_nnz_train,
                          rowstartidx_train, y_train)
print y_train[12]

dev_scores = get_dev_scores(W)
accuracy = get_scores_accuracy(dev_scores)
hinge_losses = np.array(
    tuple(1 + s[weights_classes39 != ylabel].max() -
          s[weights_classes39 == ylabel].max()
          for s, ylabel in itertools.izip(dev_scores, y_dev39)))
mid_hinge_value = np.sort(hinge_losses)[int(len(hinge_losses) / 2)]
self_pace_K = 1. / mid_hinge_value
print self_pace_K
# accuracy, cmat = dev_accuracy(W)
print "old accuracy = %g" % accuracy
if args.do_projection:
    print "do_projection = True"
else:
    print "do_projection = False"

start_t = args.start_t
for l in args.l:
    print "Using lambda = %g " % l

    W_trained2 = W.ravel().copy()
Exemplo n.º 34
0
    def _SetIam(self):
        """Set IAM policy for given wildcards on the command line."""

        self.continue_on_error = False
        self.recursion_requested = False
        self.all_versions = False
        force_etag = False
        etag = ''
        if self.sub_opts:
            for o, arg in self.sub_opts:
                if o in ['-r', '-R']:
                    self.recursion_requested = True
                elif o == '-f':
                    self.continue_on_error = True
                elif o == '-a':
                    self.all_versions = True
                elif o == '-e':
                    etag = str(arg)
                    force_etag = True
                else:
                    self.RaiseInvalidArgumentException()

        file_url = self.args[0]
        patterns = self.args[1:]

        # Load the IAM policy file and raise error if the file is invalid JSON or
        # does not exist.
        try:
            with open(file_url, 'r') as fp:
                policy = json.loads(fp.read())
        except IOError:
            raise ArgumentException(
                'Specified IAM policy file "%s" does not exist.' % file_url)
        except ValueError:
            raise ArgumentException('Invalid IAM policy file "%s".' % file_url)

        bindings = policy.get('bindings', [])
        if not force_etag:
            etag = policy.get('etag', '')

        policy_json = json.dumps({'bindings': bindings, 'etag': etag})
        try:
            policy = protojson.decode_message(apitools_messages.Policy,
                                              policy_json)
        except DecodeError:
            raise ArgumentException(
                'Invalid IAM policy file "%s" or etag "%s".' %
                (file_url, etag))

        self.everything_set_okay = True

        # This list of wildcard strings will be handled by NameExpansionIterator.
        threaded_wildcards = []

        for pattern in patterns:
            surl = StorageUrlFromString(pattern)
            if surl.IsBucket():
                if self.recursion_requested:
                    surl.object_name = '*'
                    threaded_wildcards.append(surl.url_string)
                else:
                    self.SetIamHelper(surl, policy)
            else:
                threaded_wildcards.append(surl.url_string)

        # N.B.: If threaded_wildcards contains a non-existent bucket
        # (e.g. ["gs://non-existent", "gs://existent"]), NameExpansionIterator
        # will raise an exception in iter.next. This halts all iteration, even
        # when -f is set. This behavior is also evident in acl set. This behavior
        # also appears for any exception that will be raised when iterating over
        # wildcard expansions (access denied if bucket cannot be listed, etc.).
        if threaded_wildcards:
            name_expansion_iterator = NameExpansionIterator(
                self.command_name,
                self.debug,
                self.logger,
                self.gsutil_api,
                threaded_wildcards,
                self.recursion_requested,
                all_versions=self.all_versions,
                continue_on_error=self.continue_on_error
                or self.parallel_operations,
                bucket_listing_fields=['name'])

            seek_ahead_iterator = SeekAheadNameExpansionIterator(
                self.command_name,
                self.debug,
                self.GetSeekAheadGsutilApi(),
                threaded_wildcards,
                self.recursion_requested,
                all_versions=self.all_versions)

            policy_it = itertools.repeat(protojson.encode_message(policy))
            self.Apply(_SetIamWrapper,
                       itertools.izip(policy_it, name_expansion_iterator),
                       _SetIamExceptionHandler,
                       fail_on_error=not self.continue_on_error,
                       seek_ahead_iterator=seek_ahead_iterator)

            self.everything_set_okay &= not GetFailureCount() > 0

        # TODO: Add an error counter for files and objects.
        if not self.everything_set_okay:
            raise CommandException('Some IAM policies could not be set.')
Exemplo n.º 35
0
    def _PatchIam(self):
        self.continue_on_error = False
        self.recursion_requested = False

        patch_bindings_tuples = []

        if self.sub_opts:
            for o, a in self.sub_opts:
                if o in ['-r', '-R']:
                    self.recursion_requested = True
                elif o == '-f':
                    self.continue_on_error = True
                elif o == '-d':
                    patch_bindings_tuples.append(BindingStringToTuple(
                        False, a))

        patterns = []

        # N.B.: self.sub_opts stops taking in options at the first non-flagged
        # token. The rest of the tokens are sent to self.args. Thus, in order to
        # handle input of the form "-d <binding> <binding> <url>", we will have to
        # parse self.args for a mix of both bindings and CloudUrls. We are not
        # expecting to come across the -r, -f flags here.
        it = iter(self.args)
        for token in it:
            if token == '-d':
                patch_bindings_tuples.append(
                    BindingStringToTuple(False, it.next()))
            else:
                try:
                    patch_bindings_tuples.append(
                        BindingStringToTuple(True, token))
                # All following arguments are urls.
                except (ArgumentException, CommandException):
                    patterns.append(token)
                    for token in it:
                        patterns.append(token)

        # We must have some bindings to process, else this is pointless.
        if not patch_bindings_tuples:
            raise CommandException('Must specify at least one binding.')

        self.everything_set_okay = True
        threaded_wildcards = []
        for pattern in patterns:
            surl = StorageUrlFromString(pattern)
            try:
                if surl.IsBucket():
                    if self.recursion_requested:
                        surl.object = '*'
                        threaded_wildcards.append(surl.url_string)
                    else:
                        self.PatchIamHelper(surl, patch_bindings_tuples)
                else:
                    threaded_wildcards.append(surl.url_string)
            except AttributeError:
                error_msg = 'Invalid Cloud URL "%s".' % surl.object_name
                if set(surl.object_name).issubset(set('-Rrf')):
                    error_msg += (
                        ' This resource handle looks like a flag, which must appear '
                        'before all bindings. See "gsutil help iam ch" for more details.'
                    )
                raise CommandException(error_msg)

        if threaded_wildcards:
            name_expansion_iterator = NameExpansionIterator(
                self.command_name,
                self.debug,
                self.logger,
                self.gsutil_api,
                threaded_wildcards,
                self.recursion_requested,
                all_versions=self.all_versions,
                continue_on_error=self.continue_on_error
                or self.parallel_operations,
                bucket_listing_fields=['name'])

            seek_ahead_iterator = SeekAheadNameExpansionIterator(
                self.command_name,
                self.debug,
                self.GetSeekAheadGsutilApi(),
                threaded_wildcards,
                self.recursion_requested,
                all_versions=self.all_versions)

            serialized_bindings_tuples_it = itertools.repeat(
                [SerializeBindingsTuple(t) for t in patch_bindings_tuples])
            self.Apply(_PatchIamWrapper,
                       itertools.izip(serialized_bindings_tuples_it,
                                      name_expansion_iterator),
                       _PatchIamExceptionHandler,
                       fail_on_error=not self.continue_on_error,
                       seek_ahead_iterator=seek_ahead_iterator)

            self.everything_set_okay &= not GetFailureCount() > 0

        # TODO: Add an error counter for files and objects.
        if not self.everything_set_okay:
            raise CommandException('Some IAM policies could not be patched.')
Exemplo n.º 36
0
def grouped(iterable, n):
    "s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."
    return izip(*[iter(iterable)]*n)
Exemplo n.º 37
0
def previousAndNext(some_iterable):
	#http://stackoverflow.com/questions/1011938/python-previous-and-next-values-inside-a-loop
	prevs, items, nexts = tee(some_iterable, 3)
	prevs = chain([None], prevs)
	nexts = chain(islice(nexts, 1, None), [None])
	return izip(prevs, items, nexts)
Exemplo n.º 38
0
 def wrapper():
     for i, data in izip(xrange(samples_per_pass), training_reader):
         yield data
Exemplo n.º 39
0
def consensus(data, sample, tmpchunk, optim):
    """
    from a clust file handle, reads in all copies at a locus and sorts
    bases at each site, tests for errors at the site according to error
    rate, calls consensus.
    """

    ## temporarily store the mean estimates to Assembly
    data._este = data.stats.error_est.mean()
    data._esth = data.stats.hetero_est.mean()

    ## number relative to tmp file
    tmpnum = int(tmpchunk.split(".")[-1])

    ## prepare data for reading
    clusters = open(tmpchunk, 'rb')
    pairdealer = itertools.izip(*[iter(clusters)]*2)

    ## array to store all the coverage data, including consens reads that are
    ## excluded (for now). The reason we include the low cov data is that this
    ## Assembly might be branched and the new one use a lower depth filter.
    #### dimensions: nreads_in_this_chunk, max_read_length, 4 bases
    maxlen = data._hackersonly["max_fragment_length"]
    #if any(x in data.paramsdict["datatype"] for x in ['pair', 'gbs']):
    #    maxlen *= 2

    ## write to tmp cons to file to be combined later
    consenshandle = os.path.join(data.dirs.consens,
                                 sample.name+"_tmpcons."+str(tmpnum))
    ## h5 for data storage
    io5 = h5py.File(consenshandle.replace("_tmpcons.", "_tmpcats."), 'w')
    catarr = io5.create_dataset("cats", (optim, maxlen, 4), dtype=np.uint32)
    nallel = io5.create_dataset("alls", (optim, ), dtype=np.uint8)
    
    ## Enable storing arbitrary length strings
    dt = h5py.special_dtype(vlen=bytes)
    chrompos = io5.create_dataset("chroms", (optim, ), dtype=dt)
    ## maybe could get away with uint32
    #chrompos = io5.create_dataset("chroms", (optim, ), dtype=np.uint64)

    ## store data for stats counters
    counters = {"name" : tmpnum,
                "heteros": 0,
                "nsites" : 0,
                "nconsens" : 0}
    ## store data for what got filtered
    filters = {"depth" : 0,
               "maxh" : 0,
               "maxn" : 0}

    ## store data for writing
    storeseq = {}

    ## set max limits
    if 'pair' in data.paramsdict["datatype"]:
        maxhet = sum(data.paramsdict["max_Hs_consens"])
        maxn = sum(data.paramsdict["max_Ns_consens"])
    else:
        maxhet = data.paramsdict["max_Hs_consens"][0]
        maxn = data.paramsdict["max_Ns_consens"][0]

    ## iterate over clusters
    done = 0
    while not done:
        try:
            done, chunk = clustdealer(pairdealer, 1)
        except IndexError:
            raise IPyradError("clustfile formatting error in %s", chunk)

        if chunk:
            ## get names and seqs
            piece = chunk[0].strip().split("\n")
            names = piece[0::2]
            seqs = piece[1::2]
            ## pull replicate read info from seqs
            reps = [int(sname.split(";")[-2][5:]) for sname in names]

            ## IF this is a reference mapped read store the chrom and pos info
            ## This is hackish. If the reference scaffolds contain ";" this is f****d.
            ## Just split from the right side using rsplit and negative indexing!
            ref_position = ""
            if "reference" in data.paramsdict["assembly_method"]:
                try:
                    name_string = names[0].rsplit(";")
                    ## Only record the reference position if it's actually a refmapped locus
                    if len(name_string) > 3:
                        ref_position = name_string[-3]
                        LOGGER.debug("Refpos - {}".format(ref_position))
                except:
                    LOGGER.debug("Reference sequence chrom/pos failed for {}".format(names[0]))
                    ref_position = ""

            ## apply read depth filter
            if nfilter1(data, reps):

                ## get stacks of base counts
                sseqs = [list(seq) for seq in seqs]
                arrayed = np.concatenate(
                          [[seq]*rep for seq, rep in zip(sseqs, reps)])
                arrayed = arrayed[:, :maxlen]
                ## get consens call for each site, applies paralog-x-site filter
                consens = np.apply_along_axis(basecall, 0, arrayed, data)

                ## apply a filter to remove low coverage sites/Ns that
                ## are likely sequence repeat errors. This is only applied to
                ## clusters that already passed the read-depth filter (1)
                if "N" in consens:
                    try:
                        consens, arrayed = removerepeats(consens, arrayed)

                    except ValueError as _:
                        LOGGER.info("Caught a bad chunk w/ all Ns. Skip it.")
                        continue

                ## get hetero sites
                hidx = [i for (i, j) in enumerate(consens) \
                            if j in list("RKSYWM")]
                nheteros = len(hidx)

                ## filter for max number of hetero sites
                if nfilter2(nheteros, maxhet):
                    ## filter for maxN, & minlen
                    if nfilter3(consens, maxn):
                        ## get N alleles and get lower case in consens
                        consens, nhaps = nfilter4(consens, hidx, arrayed)

                        ## store the number of alleles observed
                        nallel[counters["nconsens"]] = nhaps

                        ## store a reduced array with only CATG
                        catg = np.array(\
                            [np.sum(arrayed == i, axis=0)  \
                            for i in list("CATG")],
                            dtype='uint32').T
                        catarr[counters["nconsens"], :catg.shape[0], :] = catg
                        chrompos[counters["nconsens"]] = ref_position

                        ## store the seqdata for tmpchunk
                        storeseq[counters["name"]] = "".join(list(consens))
                        counters["name"] += 1
                        counters["nconsens"] += 1
                        counters["heteros"] += nheteros
                    else:
                        #LOGGER.debug("@haplo")
                        filters['maxn'] += 1
                else:
                    #LOGGER.debug("@hetero")
                    filters['maxh'] += 1
            else:
                #LOGGER.debug("@depth")
                filters['depth'] += 1
    ## close file io
    clusters.close()

    #LOGGER.info('writing %s', consenshandle)
    #LOGGER.info('passed in this chunk: %s', len(storeseq))
    #LOGGER.info('caught in this chunk: %s', filters)
    if storeseq:
        with open(consenshandle, 'wb') as outfile:
            outfile.write("\n".join([">"+sample.name+"_"+str(key)+"\n"+\
                                   str(storeseq[key]) for key in storeseq]))

    ## save tmp catg array that will be combined into hdf5 later
    io5.close()

    ## final counts and return
    counters['nsites'] = sum([len(i) for i in storeseq.itervalues()])

    return counters, filters
Exemplo n.º 40
0
    'Kansas City Defense', 'New York G Defense', 'New York J Defense',
    'Buffalo Defense', 'Carolina Defense', 'Cincinnati Defense',
    'Cleveland Defense', 'Philadelphia Defense', 'Pittsburgh Defense',
    'Tennessee Defense', 'Dallas Defense', 'Houston Defense',
    'Seattle Defense', 'Green Bay Defense'
]

team_name_lst = [
    'Cardinals', 'Rams', 'Raiders', 'Chargers', '49ers', 'Broncos', 'Jaguars',
    'Dolphins', 'Buccaneers', 'Falcons', 'Bear', 'Colts', 'Saints', 'Ravens',
    'Redskins', 'Patriots', 'Lions', 'Vikings', 'Chiefs', 'Giants', 'Jets',
    'Buffalo', 'Panthers', 'Bengals', 'Browns', 'Eagles', 'Steelers', 'Titans',
    'Cowboys', 'Texans', 'Seahawks', 'Packers'
]

defense_dict = dict(itertools.izip(team_name_lst, defense_lst))


class dfAnalysis(object):
    def __init__(self,
                 contest_file,
                 point_file,
                 defense_dict=defense_dict,
                 delim_whitespace=False):

        self.defense_dict = defense_dict
        self.contest_file = contest_file
        self.point_file = point_file
        self.contest_df = self._load_contest_df(self.contest_file)
        self.point_df = self._load_point_df(self.point_file)
        self.lineup_df, self.percent_df = self._arrange_contest_data(
Exemplo n.º 41
0
def map_distribution(f, p, g=None):
    """
    map_distribution(f, p [, g]) -> mapping
    
    Returns a copy of the mapping p, with each key replaced by its
    image under f. Any duplicate image keys are merged, with the value of
    the merged key equal to the sum of the values.
    
    It is expected that f returns tuples or scalars, and behaves in a
    reasonable way when given vector state array arguments.
    
    If g is supplied, it is used instead of addition to reduce the values of
    duplicate image keys. If given, g must have a reduce method of the form
        g.reduce(probabilities) -> reduced_probability
        
    for example, setting g to a numpy ufunc would be fine.
    """
    
    # all this nonsense actually does something fairly straight forward
    # see 'map_distribution_simple' for a reference implementation that
    # avoids numpy operations
    
    num_items = len(p)
    
    if num_items == 0:
        return {}
    
    if g is None:
        g = numpy.add
    
    s, v = domain.from_mapping(p)
    fs = numpy.asarray(f(s))
    
    # handle case where f returns scalar arguments, say
    # this might be a touch flakey
    if len(fs.shape) != 2:
        fs = fs*numpy.ones((1, s.shape[1]))
    
    # sort image states using lexical ordering on coords, then
    # apply same ordering to values
    
    order = numpy.lexsort(fs)
    sfs = fs[:, order]
    sv = v[order]
    
    # figure out the indices of the first instance of each state
    not_equal_adj = numpy.logical_or.reduce(sfs[:, :-1] != sfs[:, 1:])
    not_equal_adj = numpy.concatenate(([True], not_equal_adj))
    
    # extract the unique image states under f
    usfs = sfs[:, not_equal_adj]
    
    # convert back from arrya representation to iterator of state tuples
    unique_image_states = domain.to_iter(usfs)
    
    # determine start and end indices of each equivalence class of
    # values in the sorted values array, where values are equivalent if
    # they are associated with states that agree under the transform f
    class_begin = numpy.nonzero(not_equal_adj)[0]
    class_end = numpy.concatenate((class_begin[1:], [num_items]))
    
    # construct the resulting mapped probability distribution
    # each image state s maps to the values in its equivalence class,
    # reduced by g
    p_mapped = {}
    for s, i, j in itertools.izip(unique_image_states, class_begin, class_end):
        p_mapped[s] = g.reduce(sv[i:j])
    
    return p_mapped
Exemplo n.º 42
0
 def _update_all_stop_buttons(self):
     tracks = self._track_provider.controlled_tracks()
     self.stop_track_clips_buttons.control_count = len(tracks)
     for track, button in izip(tracks, self.stop_track_clips_buttons):
         self._update_stop_button(track, button)
Exemplo n.º 43
0
    def backpropagation(self, trainingset, ERROR_LIMIT=1e-3, learning_rate=0.3, momentum_factor=0.9  ):
        def addBias(A):
            # Add 1 as bias.
            return np.hstack(( np.ones((A.shape[0],1)), A ))
        #end addBias
        
	#check for possible error 
        assert trainingset[0].features.shape[0] == self.n_inputs, "ERROR: input size varies from the defined input setting"
        assert trainingset[0].targets.shape[0] == self.n_outputs, "ERROR: output size varies from the defined output setting"
        
        training_data = np.array( [instance.features for instance in trainingset ] )
        training_targets = np.array( [instance.targets for instance in trainingset ] )
        
        MSE      = ( ) # inf
        neterror = None
        momentum = collections.defaultdict( int )
        # collections.defaultdict( int ) a special  dictionary that can use use any data type as key and value 
        epoch = 0
        while MSE > ERROR_LIMIT:
            epoch += 1
            
            input_layers = self.forwordProp( training_data, trace=True )
            out         = input_layers[-1]
                              
            error       = training_targets - out #Error at final layer
            delta       = error
            MSE         = np.mean( np.power(error,2) )
            
            
            loop  = itertools.izip(
                            xrange(len(self.weights)-1, -1, -1),
                            reversed(self.weights),
                            reversed(input_layers[:-1]),
                        )

            
            for i, weight_layer, input_signals in loop:
		#input_signels , input to that layer 
                # Loop over the weight layers in reversed order to calculate the deltas
                
                # Calculate weight change ##np.dot() calculate dot product of array or matrix mult of matrixes depends on input
                dW = learning_rate * np.dot( addBias(input_signals).T, delta ) + momentum_factor * momentum[i]
                # matrix.T give transpose
                if i!= 0:
                    """Do not calculate the delta unnecessarily."""
                    # Skipping the bias weight during calculation.
                    weight_delta = np.dot( delta, weight_layer[1:,:].T )
            
                    # Calculate the delta for the subsequent layer
                    delta = np.multiply(  weight_delta, self.activation_functions[i-1]( input_signals, derivative=True) )
                
                # Store the momentum
                momentum[i] = dW
                
                # Update the weights
                self.weights[ i ] += dW
            
            if epoch%50==0:
                # Show the current training status
                print "* Epoch: "+str(epoch)+ "  * current network error (MSE):", MSE
        
        print "* Converged to error bound (%.4g) with MSE(mean square error) = %.4g." % ( ERROR_LIMIT, MSE )
        print "* Trained for %d epochs." % epoch
Exemplo n.º 44
0
    def run(self):

        parser = self.create_arg_parser()
        args = parser.parse_args()

        self.initialize(args)

        dead_walkers_idxs = np.where(self.ncopiess == 0)[0]
        print "number of dead walkers: %i" % dead_walkers_idxs.size
        ndead_walkers_kept = 0

        # look for dead walkers that are finally kept because their x first nneighbors are all dead walkers.
        for idx in dead_walkers_idxs:
            nneighbors = self.get_nneighbors(args.nnfile, idx)

            if args.max_dead_neighbors is None:
                nnneighbors = nneighbors.size
            else:
                nnneighbors = min(nneighbors.size, args.max_dead_neighbors + 1)

            if nnneighbors > 1:  #the first element of nneighbors is always the point itself
                jdx = 1
                nneighbor_idx = nneighbors[jdx]
                while self.ncopiess[nneighbor_idx] == 0:
                    jdx += 1
                    if jdx == nnneighbors:  # all the neighbors are dead walkers
                        break
                    else:
                        nneighbor_idx = nneighbors[jdx]
                if jdx == nnneighbors:  # all nneighbors are dead within the range fixed
                    self.ncopiess[idx] = 1  # do not kill this walker
                    ndead_walkers_kept += 1
            else:  # the point has no nneighbor within the range fixed
                self.ncopiess[idx] = 1  # do not kill this walker
                ndead_walkers_kept += 1

        print "number of dead walkers kept: %i" % ndead_walkers_kept
        # update dead_walkers_idxs
        if ndead_walkers_kept > 0:
            dead_walkers_idxs = np.where(self.ncopiess == 0)[0]

        print "add weights of dead walkers to their nneighbor among new_walkers... "

        # redristribute the weights of dead walkers among their first alive nneighbors
        for idx in dead_walkers_idxs:  # every idx has at least one nneighbor alive
            nneighbors = self.get_nneighbors(args.nnfile, idx)
            assert len(
                nneighbors
            ) > 1, "the number of nnearest neighbors should be greater than 1."
            nneighbors = nneighbors[
                1:]  # exclude the first neighbor which is the point itself
            nalive_nneighbors = 0
            if args.max_alive_neighbors is None:  # if args.max_alive_neighbors is None, spread the weight of the dead walker over all their nearest neighbors available
                nalive_nneighbors = np.sum(
                    self.ncopiess[nneighbors])  # number of neighbors alive
                self.weights[nneighbors] += self.weights[idx] * self.ncopiess[
                    nneighbors] / nalive_nneighbors
            else:  # if args.max_alive_neighbors > 0, spread the weight of dead walkers over the first "args.max_alive_neighbors" neighbors
                for jdx, nneighbor_idx in enumerate(nneighbors):
                    nalive_nneighbors += self.ncopiess[nneighbor_idx]
                    if nalive_nneighbors >= args.max_alive_neighbors:
                        break
                last_nneighbor_idx = jdx
                self.weights[
                    nneighbors[:last_nneighbor_idx +
                               1]] += self.weights[idx] * self.ncopiess[
                                   nneighbors[:last_nneighbor_idx +
                                              1]] / nalive_nneighbors
            self.weights[idx] = 0.0

        cutoff = 1e-6
        self.ncopiess[self.weights < cutoff] = 0

        # build vector of new coords and new weights
        new_coords = []
        new_weights = []
        for ncopies, weight, coord in it.izip(self.ncopiess, self.weights,
                                              self.coords):
            if ncopies > 0:
                new_weight = weight / ncopies
                for ncopy in range(ncopies):
                    new_coords.append(coord)
                    new_weights.append(new_weight)

        self.new_coords = np.array(new_coords)
        self.new_weights = np.array(new_weights)

        sum_old_weights = int(round(np.sum(self.weights)))
        sum_new_weights = int(round(np.sum(self.new_weights)))
        assert sum_new_weights==sum_old_weights, \
            "sum of the weights differs from old number: sum_new_weights/sum_old_weights: %i/%i" %(sum_new_weights,sum_old_weights)

        self.save(args)
Exemplo n.º 45
0
    def mutate_delta(self,
                     _source,
                     step_size=1,
                     per_indiv_rate=1.0,
                     per_gene_rate=0.1,
                     genes=None,
                     positive_rate=0.5):
        '''Mutates a group of individuals by adding or subtracting `step_size`
        to or from individiual genes.
        
        .. include:: epydoc_include.txt
        
        :Parameters:
          _source : iterable(`IntegerIndividual`)
            A sequence of individuals. Individuals are taken one at a time
            from this sequence and either returned unaltered or cloned and
            mutated.
          
          step_size : int
            The amount to adjust mutated genes by. If this value is not an
            integer, it is truncated before use.
          
          per_indiv_rate : |prob|
            The probability of any individual being mutated. If an individual
            is not mutated, it is returned unmodified.
          
          per_gene_rate : |prob|
            The probability of any gene being mutated. If an individual is not
            selected for mutation (under `per_indiv_rate`) then this value is
            unused.
          
          genes : int
            The exact number of genes to mutate. If `None`, `per_gene_rate` is
            used instead.
          
          positive_rate : |prob|
            The probability of `step_size` being added to the gene value.
            Otherwise, `step_size` is subtracted.
        '''
        assert step_size is not True, "step_size has no value"
        assert per_indiv_rate is not True, "per_indiv_rate has no value"
        assert per_gene_rate is not True, "per_gene_rate has no value"
        assert genes is not True, "genes has no value"
        assert positive_rate is not True, "positive_rate has no value"

        frand = rand.random
        shuffle = rand.shuffle

        do_all_gene = (per_gene_rate >= 1.0)
        do_all_indiv = (per_indiv_rate >= 1.0)

        genes = int(genes or 0)

        # Die (if debugging) if step_size is not an integer
        assert step_size == int(
            step_size), "step_size must be an integer for integer species"
        # Force step_size to be an integer
        step_size = int(step_size)

        for indiv in _source:
            assert isinstance(
                indiv, IntegerIndividual
            ), "Want `IntegerIndividual`, not `%s`" % type(indiv)

            if do_all_indiv or frand() < per_indiv_rate:
                step_size_sum = 0
                new_genes = list(indiv.genome)
                source = izip(xrange(len(new_genes)), new_genes,
                              indiv.lower_bounds, indiv.upper_bounds)

                if genes:
                    do_all_gene = True
                    source = list(source)
                    shuffle(source)
                    source = islice(source, genes)

                for i, gene, low, high in source:
                    if do_all_gene or frand() < per_gene_rate:
                        step_size_sum += step_size
                        new_gene = gene + (step_size if frand() < positive_rate
                                           else -step_size)
                        new_genes[i] = (low if new_gene < low else
                                        high if new_gene > high else new_gene)

                yield type(indiv)(new_genes,
                                  indiv,
                                  statistic={
                                      'mutated': 1,
                                      'step_sum': step_size_sum
                                  })
            else:
                yield indiv
Exemplo n.º 46
0
    def build_instances(self,
                        context,
                        instances,
                        image,
                        filter_properties,
                        admin_password,
                        injected_files,
                        requested_networks,
                        security_groups,
                        block_device_mapping=None,
                        legacy_bdm=True):
        # TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
        #                 2.0 of the RPC API.
        request_spec = scheduler_utils.build_request_spec(
            context, image, instances)
        scheduler_utils.setup_instance_group(context, request_spec,
                                             filter_properties)
        # TODO(danms): Remove this in version 2.0 of the RPC API
        if (requested_networks and not isinstance(requested_networks,
                                                  objects.NetworkRequestList)):
            requested_networks = objects.NetworkRequestList(objects=[
                objects.NetworkRequest.from_tuple(t)
                for t in requested_networks
            ])

        try:
            # check retry policy. Rather ugly use of instances[0]...
            # but if we've exceeded max retries... then we really only
            # have a single instance.
            scheduler_utils.populate_retry(filter_properties,
                                           instances[0].uuid)
            hosts = self.scheduler_client.select_destinations(
                context, request_spec, filter_properties)
        except Exception as exc:
            for instance in instances:
                scheduler_driver.handle_schedule_error(context, exc,
                                                       instance.uuid,
                                                       request_spec)
            return

        for (instance, host) in itertools.izip(instances, hosts):
            try:
                instance.refresh()
            except (exception.InstanceNotFound,
                    exception.InstanceInfoCacheNotFound):
                LOG.debug('Instance deleted during build', instance=instance)
                continue
            local_filter_props = copy.deepcopy(filter_properties)
            scheduler_utils.populate_filter_properties(local_filter_props,
                                                       host)
            # The block_device_mapping passed from the api doesn't contain
            # instance specific information
            bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
                context, instance.uuid)

            self.compute_rpcapi.build_and_run_instance(
                context,
                instance=instance,
                host=host['host'],
                image=image,
                request_spec=request_spec,
                filter_properties=local_filter_props,
                admin_password=admin_password,
                injected_files=injected_files,
                requested_networks=requested_networks,
                security_groups=security_groups,
                block_device_mapping=bdms,
                node=host['nodename'],
                limits=host['limits'])
Exemplo n.º 47
0
    def tupleAdd(self, a, b):
        """ 
			Element wise addition of two tuples 
			Credits to @delnan http://stackoverflow.com/questions/5607284/how-to-add-with-tuples
		"""
        return tuple(x + y for x, y in izip(a, b))
Exemplo n.º 48
0
    def mutate_gaussian(self,
                        _source,
                        step_size=1.0,
                        sigma=None,
                        per_indiv_rate=1.0,
                        per_gene_rate=0.1,
                        genes=None):
        '''Mutates a group of individuals by adding or subtracting a random
        value with Gaussian distribution based on `step_size` or `sigma`.
        
        .. include:: epydoc_include.txt
        
        :Parameters:
          _source : iterable(`IntegerIndividual`)
            A sequence of individuals. Individuals are taken one at a time
            from this sequence and either returned unaltered or cloned and
            mutated.
          
          step_size : float
            Determines the standard deviation of the distribution used to
            determine the adjustment amount. If `sigma` is provided, this
            value is ignored.
          
          sigma : float
            The standard deviation of the distribution used determine the
            adjust amount. If omitted, the value of `step_size` is used
            to calculate a value for `sigma`.
          
          per_indiv_rate : |prob|
            The probability of any individual being mutated. If an individual
            is not mutated, it is returned unmodified.
          
          per_gene_rate : |prob|
            The probability of any gene being mutated. If an individual is not
            selected for mutation (under `per_indiv_rate`) then this value is
            unused.
          
          genes : int
            The exact number of genes to mutate. If `None`, `per_gene_rate` is
            used instead.
        '''
        assert step_size is not True, "step_size has no value"
        assert sigma is not True, "sigma has no value"
        assert per_indiv_rate is not True, "per_indiv_rate has no value"
        assert per_gene_rate is not True, "per_gene_rate has no value"
        assert genes is not True, "genes has no value"

        sigma = sigma or (step_size * 1.253)
        frand = rand.random
        shuffle = rand.shuffle
        gauss = rand.gauss

        do_all_gene = (per_gene_rate >= 1.0)
        do_all_indiv = (per_indiv_rate >= 1.0)

        genes = int(genes or 0)

        for indiv in _source:
            assert isinstance(
                indiv, IntegerIndividual
            ), "Want `IntegerIndividual`, not `%s`" % type(indiv)

            if do_all_indiv or frand() < per_indiv_rate:
                step_size_sum = 0
                new_genes = list(indiv.genome)
                source = izip(xrange(len(new_genes)), new_genes,
                              indiv.lower_bounds, indiv.upper_bounds)

                if genes:
                    do_all_gene = True
                    source = list(source)
                    shuffle(source)
                    source = islice(source, genes)

                for i, gene, low, high in source:
                    if do_all_gene or frand() < per_gene_rate:
                        step = int(gauss(0, sigma))
                        step_size_sum += step
                        new_gene = gene + step
                        new_genes[i] = (low if new_gene < low else
                                        high if new_gene > high else new_gene)

                yield type(indiv)(new_genes,
                                  indiv,
                                  statistic={
                                      'mutated': 1,
                                      'step_sum': step_size_sum
                                  })
            else:
                yield indiv
Exemplo n.º 49
0
def game_list_create(game, system_name, rom_path, rom_extensions,
                     launcher_script, artwork_base_path, icon_path,
                     poster_path, poster_fallback_path, fanart_path,
                     fanart_fallback_path, thumb_path, logo_path,
                     clearart_path, banner_path, media_path, trailer_path,
                     context_mode):
    if game.find('enabled') != None and game.find('enabled').text != 'Yes':
        return
    game_name = game.find('description').text
    game_file_name = game.attrib['name']
    li = xbmcgui.ListItem('%s' % game_name)
    input1 = [
        'year', 'dev', 'manufacturer', 'rating', 'genre', 'score', 'story',
        'player'
    ]
    label1 = [
        'Year', 'Studio', 'Director', 'Mpaa', 'Genre', 'Rating', 'Plot',
        'Writer'
    ]
    d1 = {}
    for i1, l1 in izip(input1, label1):
        if game.find(i1) != None and game.find(i1).text:
            d1[l1] = game.find(i1).text
    input2 = [
        icon_path, fanart_fallback_path, fanart_path, thumb_path,
        poster_fallback_path, poster_path, logo_path, clearart_path,
        banner_path, media_path, trailer_path
    ]
    label2 = [
        'icon', 'fanart', 'fanart', 'thumb', 'poster', 'poster', 'clearlogo',
        'clearart', 'banner', 'discart', 'trailer'
    ]
    d2 = {}
    for i2, l2 in izip(input2, label2):
        f2 = 'false'
        if i2 != 'false':
            vlog('Path exists, checking for art')
            if i2 and l2 == 'trailer':
                f2 = get_game_art(game_file_name, i2, 'none', 'video')
            else:
                f2 = get_game_art(game_file_name, i2, 'none', 'image')
        else:
            vlog('Path does not exist for artwork type:')
        if f2 != 'false':
            if i2 and l2 == 'trailer':
                d1[l2] = f2
            else:
                d2[l2] = f2
    d1.update({
        'Title': game_name,
        'OriginalTitle': game_file_name,
        'launcher_script': launcher_script
    })
    li.setArt(d2)
    b_url = build_url({
        'mode': 'artwork',
        'game_name': game_name,
        'game_file_name': game_file_name,
        'artwork_base_path': artwork_base_path
    })
    d1['Album'] = b_url
    li.setInfo('video', d1)
    url = build_url({
        'mode': 'file',
        'foldername': system_name,
        'game_name': game_name,
        'filename': game_file_name,
        'rom_path': rom_path,
        'launcher_script': launcher_script,
        'rom_extensions': rom_extensions
    })
    li.setProperty('IsPlayable', 'false')
    contextMenuItems = []
    if context_mode != 'context_two':
        contextMenuItems.append((
            language(50208),
            'XBMC.Container.Update(%s)' % build_url({
                'mode': 'search_input',
                'foldername': 'none',
                'system_name': system_name
            }),
        ))
    if artwork_base_path:
        contextMenuItems.append((
            language(50209),
            'XBMC.Container.Update(%s)' % b_url,
        ))
    contextMenuItems.append((
        language(50210),
        'XBMC.RunPlugin(%s)' % build_url({'mode': 'random_focus'}),
    ))
    contextMenuItems.append((
        language(50211),
        'XBMC.RunPlugin(%s)' % build_url({
            'mode': 'select_launcher',
            'foldername': system_name,
            'game_name': game_name,
            'filename': game_file_name,
            'rom_path': rom_path,
            'launcher_script': launcher_script,
            'rom_extensions': rom_extensions
        }),
    ))
    li.addContextMenuItems(contextMenuItems)
    listing.append((url, li, True))
Exemplo n.º 50
0
 def tupleSub(self, a, b):
     return tuple(x - y for x, y in izip(a, b))
def each_cons(xs, n):
    return itertools.izip(*(itertools.islice(g, i, None)
                            for i, g in enumerate(itertools.tee(xs, n))))
Exemplo n.º 52
0
def makeTester(name, op, expected, good=None, bad_build=None, checks=None,
               bad_runtime=None, mode=None, skip=False, eps=1e-10):
    if good is None:
        good = {}
    if bad_build is None:
        bad_build = {}
    if bad_runtime is None:
        bad_runtime = {}
    if checks is None:
        checks = {}

    _op = op
    _expected = expected
    _good = good
    _bad_build = bad_build
    _bad_runtime = bad_runtime
    _skip = skip
    _checks = checks

    class Checker(unittest.TestCase):
        op = staticmethod(_op)
        expected = staticmethod(_expected)
        good = _good
        bad_build = _bad_build
        bad_runtime = _bad_runtime
        skip = _skip
        checks = _checks

        def setUp(self):
            eval(self.__class__.__module__ + '.' + self.__class__.__name__)

        def test_good(self):
            if skip:
                raise SkipTest(skip)

            for testname, inputs in good.items():
                inputs = [copy(input) for input in inputs]
                inputrs = [fake_shared(input) for input in inputs]

                try:
                    node = safe_make_node(self.op, *inputrs)
                except Exception, exc:
                    err_msg = ("Test %s::%s: Error occured while making "
                               "a node with inputs %s") % (self.op, testname,
                                                           inputs)
                    exc.args += (err_msg,)
                    raise

                try:
                    f = inplace_func([], node.outputs, mode=mode,
                                     name='test_good')
                except Exception, exc:
                    err_msg = ("Test %s::%s: Error occured while trying to "
                               "make a Function") % (self.op, testname)
                    exc.args += (err_msg,)
                    raise

                if isinstance(self.expected, dict) and \
                        testname in self.expected:
                    expecteds = self.expected[testname]
                else:
                    expecteds = self.expected(*inputs)

                if not isinstance(expecteds, (list, tuple)):
                    expecteds = (expecteds,)

                try:
                    variables = f()
                except Exception, exc:
                    err_msg = ("Test %s::%s: Error occured while calling "
                               "the Function on the inputs %s") % (self.op,
                                                                   testname,
                                                                   inputs)
                    exc.args += (err_msg,)
                    raise

                for i, (variable, expected) in \
                        enumerate(izip(variables, expecteds)):
                    if variable.dtype != expected.dtype or \
                            variable.shape != expected.shape or \
                            not GpuArrayType.values_eq_approx(variable,
                                                             expected):
                        self.fail(("Test %s::%s: Output %s gave the wrong "
                                   "value. With inputs %s, expected %s "
                                   "(dtype %s), got %s (dtype %s).") % (
                                self.op, testname, i, inputs, expected,
                                expected.dtype, variable, variable.dtype))

                for description, check in self.checks.items():
                    if not check(inputs, variables):
                        self.fail(("Test %s::%s: Failed check: %s "
                                   "(inputs were %s, ouputs were %s)") %
                                  (self.op, testname, description,
                                   inputs, variables))
Exemplo n.º 53
0
 def _pairwise(iterable):
     a, b = itertools.tee(iterable)
     next(b, None)
     return itertools.izip(a, b)
Exemplo n.º 54
0
 system_config = os.path.join(SYSTEMS_CONFIG_PATH,
                              system_name + '-config.xml')
 system_artwork_path = os.path.join(SYSTEMS_ARTWORK_PATH,
                                    system_name)
 if createSystemArtworkFolder == 'true':
     create_system_artwork_folder()
 file_check(system_config, system_config)
 tree = ET.parse(system_config)
 root = tree.getroot()
 input1 = [
     'release_year', 'manufacturer', 'manufacturer', 'description'
 ]
 label1 = ['Year', 'Studio', 'Director', 'Plot']
 d1 = {}
 for item in root.findall('info'):
     for i1, l1 in izip(input1, label1):
         if item.find(i1).text:
             d1[l1] = item.find(i1).text
 d2 = {}
 system_art = ['icon', 'poster', 'logo', 'fanart', 'trailer']
 system_art_out = [
     'icon', 'poster', 'clearlogo', 'fanart', 'trailer'
 ]
 system_art_type = ['image', 'image', 'image', 'image', 'video']
 for sa, sao, sat in izip(system_art, system_art_out,
                          system_art_type):
     game_art = get_game_art(system_name + '-' + sa,
                             system_artwork_path, 'none', sat)
     if sa == 'trailer' and sao == 'trailer':
         d1[sao] = game_art
     else:
Exemplo n.º 55
0
 def merge_row(row):
     return reduce(op.add,
                   (x if t else (x, ) for t, x in izip(istuple, row)))
Exemplo n.º 56
0
def pairwise(iterable):  # from the itertools documentation
    "s -> (s0,s1), (s1,s2), (s2, s3), ..."
    a, b = itertools.tee(iterable)
    next(b, None)
    return itertools.izip(a, b)
Exemplo n.º 57
0
def iter_kwargs(kwargs):
    keys = kwargs.keys()
    values = kwargs.values()
    for args in izip(values):
        yield dict(izip(keys, args))
Exemplo n.º 58
0
def attn_performance_monitor(data,
                             objects,
                             y_dim,
                             t_window=None,
                             t_w_offset=None,
                             t_start=None,
                             t_stop=None,
                             t_pos=0):
    if t_pos == 1:
        data = numpy.fliplr(data)
    data.sort(axis=0)
    if t_window == None: t_window = data[-1, 0] - data[0, 0]
    if t_w_offset == None: t_w_offset = t_window / 2
    if t_start == None: t_start = data[0, 0]
    if t_stop == None: t_stop = data[-1, 0]
    data_ranges = [[
        datum for datum in data
        if datum[0] >= t - t_w_offset and datum[0] < t - t_w_offset + t_window
    ] for t in range(int(t_start + t_w_offset), int(t_stop), int(t_window))]
    data_range_times = [
        set([d_t[0] for d_t in d_range]) for d_range in data_ranges
    ]
    data_groups = itertools.izip(data_ranges, data_range_times)
    if debug:
        performance_measures = []
        debug_file = open('vis_metric_dbg.txt', 'w')
        debug_file.write("objects in visual field: %s\n" % objects)
        for group in range(len(data_ranges)):
            total_metric = 0
            debug_file.write("active spiking times for group %d: %s\n" %
                             (group, data_range_times[group]))
            for spike in data_ranges[group]:
                debug_file.write(
                    "t=%d, coordinate=%s\n" %
                    (spike[0], (int(spike[1]) / y_dim, int(spike[1]) % y_dim)))
            for time in data_range_times[group]:
                instantaneous_metric = [0, []]
                for spike in data_ranges[group]:
                    if spike[0] == time:
                        fixation_point = (int(spike[1]) / y_dim,
                                          int(spike[1]) % y_dim)
                        if fixation_point not in [
                                point[0] for point in instantaneous_metric[1]
                        ]:
                            local_metric = attention_metric(
                                fixation_point=fixation_point,
                                preferred_objs=objects['preferred'],
                                aversive_objs=objects['aversive'],
                                neutral_objs=objects['neutral'])
                            instantaneous_metric[0] += local_metric
                            instantaneous_metric[1].append(
                                (fixation_point, local_metric))
                debug_file.write("active time %d, metrics %s\n" %
                                 (time, instantaneous_metric))
                total_metric += instantaneous_metric[0]
            total_metric /= t_window
            debug_file.write("Total metric for time window %d: %f" %
                             (group, total_metric))
            performance_measures.append(total_metric)
        debug_file.close()
    # complicated data reduction does the following:
    # 1: break up the data into groups belonging to a specific time window
    # 2: break up each group into the separate times where there is at least one fixation point
    # 3: aggregate the attention metrics, for each fixation point within a given time slot
    # 4: aggregate these aggregations, over the whole time window
    # 5: divide by the number of active time slots in the window
    else:
        performance_measures = [
            reduce(lambda x, y: x + y, [
                reduce(
                    lambda x, y: x + attention_metric(
                        fixation_point=(int(y[1]) / y_dim, int(y[1]) % y_dim),
                        preferred_objs=objects['preferred'],
                        aversive_objs=objects['aversive'],
                        neutral_objs=objects['neutral']), frame, 0)
                for frame in [[
                    fixation_point for fixation_point in d_group[0]
                    if fixation_point[0] == f_time
                ] for f_time in d_group[1]]
            ], 0) / t_window for d_group in data_groups
        ]
    return performance_measures
Exemplo n.º 59
0
 def createExonLines(self):
     self.exon_lines = []
     for from_exon, to_exon in it.izip(self.exon_list, self.exon_list[1:]):
         self.exon_lines.append([from_exon.right, to_exon.left])
Exemplo n.º 60
0
def iter_args(args):
    for a in izip(*args):
        yield a