Example #1
0
    def test_sort_by_date(self):
        # Test data
        entry_first = {
            'title': 'First',
            'year': 2012,
            'month': 'Jan'}
        entry_second = {
            'title': 'Second',
            'year': 2012,
            'month': 'Oct'}
        entry_third = {
            'title': 'Third',
            'year': 2013}
        entry_fourth = {
            'title': 'Third',
            'year': 2013,
            'month': 'Feb'}
        entries = [entry_second, entry_first, entry_fourth, entry_third]
        expected_result = [
            entry_first,
            entry_second,
            entry_third,
            entry_fourth]

        # Test sort by date
        result = utils.sort_by_date(entries)
        for es in zip(result, expected_result):
            self.assertEqual(es[0], es[1])

        # Test reverse sort by date
        expected_result = expected_result[::-1]
        result = utils.sort_by_date(entries, reverse=True)
        for es in zip(result, expected_result):
            self.assertEqual(es[0], es[1])
    def test_limits(self):
        """Check line graphs."""
        #TODO - Fix GD so that the same min/max is used for all three lines?
        points = 1000
        scale = math.pi * 2.0 / points
        data1 = [math.sin(x*scale) for x in range(points)]
        data2 = [math.cos(x*scale) for x in range(points)]
        data3 = [2*math.sin(2*x*scale) for x in range(points)]

        gdd = Diagram('Test Diagram', circular=False,
                      y=0.01, yt=0.01, yb=0.01,
                      x=0.01, xl=0.01, xr=0.01)
        gdt_data = gdd.new_track(1, greytrack=False)
        gds_data = gdt_data.new_set("graph")
        for data_values, name, color in zip([data1, data2, data3],
                                            ["sin", "cos", "2sin2"],
                                            ["red", "green", "blue"]):
            data = list(zip(range(points), data_values))
            gds_data.new_graph(data, "", style="line",
                               color = color, altcolor = color,
                               center = 0)

        gdd.draw(format='linear',
                 tracklines=False,
                 pagesize=(15*cm, 15*cm),
                 fragments=1,
                 start=0, end=points)
        gdd.write(os.path.join('Graphics', "line_graph.pdf"), "pdf")
        #Circular diagram
        gdd.draw(tracklines=False,
                 pagesize=(15*cm, 15*cm),
                 circular=True,  # Data designed to be periodic
                 start=0, end=points, circle_core=0.5)
        gdd.write(os.path.join('Graphics', "line_graph_c.pdf"), "pdf")
Example #3
0
 def read(self):
     super(NamdLogFile, self).read()
     self.titles = self.read_etitle()
     #self.fp.seek(0)
     self.values = self.read_energy()
     self.data = dict(zip(self.titles, zip(*self.values)))
     return self.data
Example #4
0
def plot(y_vals, x_vals=None, num_x_chars=70, num_y_chars=15):
    """
    Plots the values given by y_vals. The x_vals values are the y indexes, by
    default, unless explicitly given. Pairs (x, y) are matched by the x_vals
    and y_vals indexes, so these must have the same length.

    The num_x_chars and num_y_chars inputs are respectively the width and
    height for the output plot to be printed, given in characters.
    """
    y_vals = list(y_vals)
    x_vals = list(x_vals) if x_vals else list(range(len(y_vals)))
    if len(x_vals) != len(y_vals):
        raise ValueError("x_vals and y_vals must have the same length")

    ymin = min(y_vals)
    ymax = max(y_vals)
    xmin = min(x_vals)
    xmax = max(x_vals)

    xbinwidth = (xmax - xmin) / num_x_chars
    y_bin_width = (ymax - ymin) / num_y_chars

    x_bin_ends = [(xmin + (i+1) * xbinwidth, 0) for i in range(num_x_chars)]
    y_bin_ends = [ymin + (i+1) * y_bin_width for i in range(num_y_chars)]

    columns_pairs = bin_generator(zip(x_vals, y_vals), x_bin_ends)
    yloop = lambda *args: [charlookup(len(el)) for el in bin_generator(*args)]
    ygetter = lambda iterable: map(itemgetter(1), iterable)
    columns = (yloop(ygetter(pairs), y_bin_ends) for pairs in columns_pairs)
    rows = list(zip(*columns))

    for idx, row in enumerated_reversed(rows):
        y_bin_mid = y_bin_ends[idx] - y_bin_width * 0.5
        print("{:10.4f} {}".format(y_bin_mid, "".join(row)))
Example #5
0
    def read_simulation_results(self):
        """
        Reads the results of a BNG simulation and parses them into a numpy
        array
        """
        # Read concentrations data
        cdat_arr = numpy.loadtxt(self.base_filename + '.cdat', skiprows=1)
        # Read groups data
        if self.model and len(self.model.observables):
            # Exclude first column (time)
            gdat_arr = numpy.loadtxt(self.base_filename + '.gdat',
                                     skiprows=1)[:,1:]
        else:
            gdat_arr = numpy.ndarray((len(cdat_arr), 0))

        # -1 for time column
        names = ['time'] + ['__s%d' % i for i in range(cdat_arr.shape[1]-1)]
        yfull_dtype = list(zip(names, itertools.repeat(float)))
        if self.model and len(self.model.observables):
            yfull_dtype += list(zip(self.model.observables.keys(),
                                    itertools.repeat(float)))
        yfull = numpy.ndarray(len(cdat_arr), yfull_dtype)

        yfull_view = yfull.view(float).reshape(len(yfull), -1)
        yfull_view[:, :len(names)] = cdat_arr
        yfull_view[:, len(names):] = gdat_arr

        return yfull
Example #6
0
File: pluck.py Project: nvie/pluck
def ipluck_multiple(iterable, defaults, *keys):
    if len(keys) > 1:
        iters = tee(iterable, len(keys))
    else:
        iters = (iterable,)
    iters = [ipluck_single(it, key, default=defaults.get(key, FAIL)) for it, key in zip(iters, keys)]
    return zip(*iters)
Example #7
0
 def test_border_condensation(self):
     vals = "red solid 5px"
     css = "; ".join(
         "border-%s-%s: %s" % (edge, p, v) for edge in EDGES for p, v in zip(BORDER_PROPS, vals.split())
     )
     style = parseStyle(css)
     condense_rule(style)
     for e, p in product(EDGES, BORDER_PROPS):
         self.assertFalse(style.getProperty("border-%s-%s" % (e, p)))
         self.assertFalse(style.getProperty("border-%s" % e))
         self.assertFalse(style.getProperty("border-%s" % p))
     self.assertEqual(style.getProperty("border").value, vals)
     css = "; ".join(
         "border-%s-%s: %s" % (edge, p, v) for edge in ("top",) for p, v in zip(BORDER_PROPS, vals.split())
     )
     style = parseStyle(css)
     condense_rule(style)
     self.assertEqual(style.cssText, "border-top: %s" % vals)
     css += ";" + "; ".join(
         "border-%s-%s: %s" % (edge, p, v)
         for edge in ("right", "left", "bottom")
         for p, v in zip(BORDER_PROPS, vals.replace("red", "green").split())
     )
     style = parseStyle(css)
     condense_rule(style)
     self.assertEqual(len(style.getProperties()), 4)
     self.assertEqual(style.getProperty("border-top").value, vals)
     self.assertEqual(style.getProperty("border-left").value, vals.replace("red", "green"))
Example #8
0
def normalize_edge(name, cssvalue):
    style = {}
    if isinstance(cssvalue, PropertyValue):
        primitives = [v.cssText for v in cssvalue]
    else:
        primitives = [cssvalue.cssText]
    if len(primitives) == 1:
        value, = primitives
        values = (value, value, value, value)
    elif len(primitives) == 2:
        vert, horiz = primitives
        values = (vert, horiz, vert, horiz)
    elif len(primitives) == 3:
        top, horiz, bottom = primitives
        values = (top, horiz, bottom, horiz)
    else:
        values = primitives[:4]
    if '-' in name:
        l, _, r = name.partition('-')
        for edge, value in zip(EDGES, values):
            style['%s-%s-%s' % (l, edge, r)] = value
    else:
        for edge, value in zip(EDGES, values):
            style['%s-%s' % (name, edge)] = value
    return style
Example #9
0
    def define_association(self, rel_id, source_kind, source_keys, source_many,
                           source_conditional, source_phrase, target_kind, 
                           target_keys, target_many, target_conditional, 
                           target_phrase):
        '''
        Define and return an association from one kind of class (the source 
        kind) to some other kind of class (the target kind).
        '''
        if isinstance(rel_id, int):
            rel_id = 'R%d' % rel_id
            
        source_metaclass = self.find_metaclass(source_kind)
        target_metaclass = self.find_metaclass(target_kind)

        source_link = target_metaclass.add_link(source_metaclass, rel_id,
                                                many=source_many,
                                                phrase=target_phrase,
                                                conditional=source_conditional)
                
        target_link = source_metaclass.add_link(target_metaclass, rel_id,
                                                many=target_many,
                                                phrase=source_phrase,
                                                conditional=target_conditional)
        
        ass = Association(rel_id,
                          source_keys, source_link,
                          target_keys, target_link)
        
        source_link.key_map = dict(zip(source_keys, target_keys))
        target_link.key_map = dict(zip(target_keys, source_keys))
        
        self.associations.append(ass)

        return ass
Example #10
0
    def common_ancestor(self, targets, *more_targets):
        """Most recent common ancestor (clade) of all the given targets.

        Edge cases:
        - If no target is given, returns self.root
        - If 1 target is given, returns the target
        - If any target is not found in this tree, raises a ValueError
        """
        paths = [self.get_path(t)
                 for t in _combine_args(targets, *more_targets)]
        # Validation -- otherwise izip throws a spooky error below
        for p, t in zip(paths, targets):
            if p is None:
                raise ValueError("target %s is not in this tree" % repr(t))
        mrca = self.root
        for level in zip(*paths):
            ref = level[0]
            for other in level[1:]:
                if ref is not other:
                    break
            else:
                mrca = ref
            if ref is not mrca:
                break
        return mrca
Example #11
0
def test():
    f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0]
    f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0]
    f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0]
    ld = (({b'a': 15, b'bb': f1, b'ccc': f2, b'': (f3, (), False, True, b'')}, (b'a', 10**20),
           tuple(range(-100000, 100000)), b'b' * 31, b'b' * 62, b'b' * 64, 2**30, 2**33, 2**62,
           2**64, 2**30, 2**33, 2**62, 2**64, False, False, True, -1, 2, 0),)
    assert loads(dumps(ld)) == ld
    d = dict(zip(range(-100000, 100000), range(-100000, 100000)))
    d.update({b'a': 20, 20: 40, 40: 41, f1: f2, f2: f3, f3: False, False: True, True: False})
    ld = (d, {}, {5: 6}, {7: 7, True: 8}, {9: 10, 22: 39, 49: 50, 44: b''})
    assert loads(dumps(ld)) == ld
    ld = (b'', b'a' * 10, b'a' * 100, b'a' * 1000, b'a' * 10000, b'a' * 100000, b'a' * 1000000, b'a' * 10000000)
    assert loads(dumps(ld)) == ld
    ld = tuple([dict(zip(range(n), range(n))) for n in range(100)]) + (b'b',)
    assert loads(dumps(ld)) == ld
    ld = tuple([dict(zip(range(n), range(-n, 0))) for n in range(100)]) + (b'b',)
    assert loads(dumps(ld)) == ld
    ld = tuple([tuple(range(n)) for n in range(100)]) + (b'b',)
    assert loads(dumps(ld)) == ld
    ld = tuple([b'a' * n for n in range(1000)]) + (b'b',)
    assert loads(dumps(ld)) == ld
    ld = tuple([b'a' * n for n in range(1000)]) + (None, True, None)
    assert loads(dumps(ld)) == ld
    assert loads(dumps(None)) is None
    assert loads(dumps({None: None})) == {None: None}
    assert 1e-10 < abs(loads(dumps(1.1)) - 1.1) < 1e-6
    assert 1e-10 < abs(loads(dumps(1.1, 32)) - 1.1) < 1e-6
    assert abs(loads(dumps(1.1, 64)) - 1.1) < 1e-12
    assert loads(dumps('Hello World!!'), decode_utf8=True)
Example #12
0
def _get_inter_coords(coords, strand=1):
    """From the given pairs of coordinates, returns a list of pairs
    covering the intervening ranges."""
    # adapted from Python's itertools guide
    # if strand is -1, adjust coords to the ends and starts are chained
    if strand == -1:
        sorted_coords = [(max(a, b), min(a, b)) for a, b in coords]
        inter_coords = list(chain(*sorted_coords))[1:-1]
        return list(zip(inter_coords[1::2], inter_coords[::2]))
    else:
        inter_coords = list(chain(*coords))[1:-1]
        return list(zip(inter_coords[::2], inter_coords[1::2]))
Example #13
0
    def clitest( n ):
        times			= clitimes  # How many I/O per client
        # take apart the sequence of ( ..., ((elm,cnt), "Int[1-2]=1,2"), ...)
        # into two sequences: (..., (elm,cnt), ...) and (..., "Int[1-2]=1,2", ...)
        tag_targets		= [('Int',enip.INT), ('DInt',enip.DINT), ('Real',enip.REAL)]
        name,tag_class		= random.choice( tag_targets )
        regs,tags		= zip( *list( tagtests( total=times, name=name, tag_class=tag_class )))
        connection		= None
        while not connection:
            try:
                connection	= enip.client.connector( *svraddr, timeout=clitimeout )
            except OSError as exc:
                if exc.errno != errno.ECONNREFUSED:
                    raise
                time.sleep( .1 )
         
        results			= []
        failures		= 0
        with connection:
            multiple		= random.randint( 0, 4 ) * climultiple // 4 	# eg. 0, 125, 250, 375, 500
            depth		= random.randint( 0, clidepth )			# eg. 0 .. 5
            for idx,dsc,req,rpy,sts,val in connection.pipeline(
                    operations=enip.client.parse_operations( tags ), timeout=clitimeout,
                    multiple=multiple, depth=depth ):
                log.detail( "Client %3d: %s --> %r ", n, dsc, val )
                if not val:
                    log.warning( "Client %d harvested %d/%d results; failed request: %s",
                                 n, len( results ), len( tags ), rpy )
                    failures       += 1
                results.append( (dsc,val) )
        if len( results ) != len( tags ):
            log.warning( "Client %d harvested %d/%d results", n, len( results ), len( tags ))
            failures	       += 1
        # Now, ensure that any results that reported values reported the correct values -- each
        # value equals its own index or 0.
        for i,(elm,cnt),tag,(dsc,val) in zip( range( times ), regs, tags, results ):
            log.detail( "Running on test %3d: operation %34s (%34s) on %5s[%3d-%-3d] ==> %s",
                i, tag, dsc, name, elm, elm + cnt - 1, val )
            if not val:
                log.warning( "Failure in test %3d: operation %34s (%34s) on %5s[%3d-%-3d]: %s",
                             i, tag, dsc, name, elm, elm + cnt - 1, val )
                failures       += 1
            if isinstance( val, list ): # write returns True; read returns list of data
                #print( "%s testing %10s[%5d-%-5d]: %r" % ( threading.current_thread().name, tag, elm, elm + cnt - 1, val ))
                if not all( v in (e,0) for v,e in zip( val, range( elm, elm + cnt ))):
                    log.warning( "Failure in test %3d: operation %34s (%34s) on %5s[%3d-%-3d] didn't equal indexes: %s",
                                 i, tag, dsc, name, elm, elm + cnt - 1, val )
                    failures       += 1

        return 1 if failures else 0
Example #14
0
    def drawTextItem(self, point, text_item):
        # return super(PdfEngine, self).drawTextItem(point, text_item)
        self.apply_graphics_state()
        gi = GlyphInfo(*self.qt_hack.get_glyphs(point, text_item))
        if not gi.indices:
            return
        metrics = self.fonts.get(gi.name)
        if metrics is None:
            from calibre.utils.fonts.utils import get_all_font_names
            try:
                names = get_all_font_names(gi.name, True)
                names = ' '.join('%s=%s'%(k, names[k]) for k in sorted(names))
            except Exception:
                names = 'Unknown'
            self.debug('Loading font: %s' % names)
            try:
                self.fonts[gi.name] = metrics = self.create_sfnt(text_item)
            except UnsupportedFont:
                self.debug('Failed to load font: %s, drawing text as outlines...' % names)
                return super(PdfEngine, self).drawTextItem(point, text_item)
        indices, positions = [], []
        ignore_glyphs = metrics.ignore_glyphs
        for glyph_id, gpos in zip(gi.indices, gi.positions):
            if glyph_id not in ignore_glyphs:
                indices.append(glyph_id), positions.append(gpos)
        for glyph_id in indices:
            try:
                metrics.glyph_map[glyph_id] = metrics.full_glyph_map[glyph_id]
            except (KeyError, ValueError):
                pass
        glyphs = []
        last_x = last_y = 0
        for glyph_index, (x, y) in zip(indices, positions):
            glyphs.append((x-last_x, last_y - y, glyph_index))
            last_x, last_y = x, y

        if not self.content_written_to_current_page:
            dy = self.graphics.current_state.transform.dy()
            ypositions = [y + dy for x, y in positions]
            miny = min(ypositions or (0,))
            maxy = max(ypositions or (self.pixel_height,))
            page_top = self.header_height if self.has_headers else 0
            page_bottom = self.pixel_height - (self.footer_height if self.has_footers else 0)
            if page_top <= miny <= page_bottom or page_top <= maxy <= page_bottom:
                self.content_written_to_current_page = 'drawTextItem'
            else:
                self.debug('Text in header/footer: miny=%s maxy=%s page_top=%s page_bottom=%s'% (
                    miny, maxy, page_top, page_bottom))
        self.pdf.draw_glyph_run([gi.stretch, 0, 0, -1, 0, 0], gi.size, metrics,
                                glyphs)
Example #15
0
    def as_sql(self):
        # We don't need quote_name_unless_alias() here, since these are all
        # going to be column names (so we can avoid the extra overhead).
        qn = self.connection.ops.quote_name
        opts = self.query.model._meta
        result = ['INSERT INTO %s' % qn(opts.db_table)]

        has_fields = bool(self.query.fields)
        fields = self.query.fields if has_fields else [opts.pk]
        result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))

        if has_fields:
            params = values = [
                [
                    f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
                    for f in fields
                ]
                for obj in self.query.objs
            ]
        else:
            values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
            params = [[]]
            fields = [None]
        can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
            not self.return_id and self.connection.features.has_bulk_insert)

        if can_bulk:
            placeholders = [["%s"] * len(fields)]
        else:
            placeholders = [
                [self.placeholder(field, v) for field, v in zip(fields, val)]
                for val in values
            ]
        if self.return_id and self.connection.features.can_return_id_from_insert:
            params = params[0]
            col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
            result.append("VALUES (%s)" % ", ".join(placeholders[0]))
            r_fmt, r_params = self.connection.ops.return_insert_id()
            result.append(r_fmt % col)
            params += r_params
            return [(" ".join(result), tuple(params))]
        if can_bulk:
            result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
            return [(" ".join(result), tuple([v for val in values for v in val]))]
        else:
            return [
                (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
                for p, vals in zip(placeholders, params)
            ]
Example #16
0
def pad_larger(*arrays):
    """Pad the smallest of `n` arrays.

    Parameters
    ----------
    arrays : tuple of array_like

    Raises
    ------
    AssertionError

    Returns
    -------
    ret : tuple
        Tuple of zero padded arrays.
    """
    assert all(map(isinstance, arrays, itools.repeat(np.ndarray))), \
        ("all arguments must be instances of ndarray or implement the ndarray"
         " interface")
    if len(arrays) == 2:
        return pad_larger2(*arrays)

    sizes = np.fromiter(map(operator.attrgetter('size'), arrays), int)
    lsize = sizes.max()

    ret = []

    for array, size in zip(arrays, sizes):
        size_diff = lsize - size
        ret.append(np.pad(array, (0, size_diff), 'constant',
                          constant_values=(0,)))
    ret.append(lsize)

    return ret
Example #17
0
    def doparse(url, d):
        parm = {}
        path = url.split("://")[1]
        delim = path.find("@");
        if delim != -1:
            (user, pswd, host, port) = path.split('@')[0].split(":")
            path = path.split('@')[1]
        else:
            (host, port) = d.getVar('P4PORT', False).split(':')
            user = ""
            pswd = ""

        if path.find(";") != -1:
            keys=[]
            values=[]
            plist = path.split(';')
            for item in plist:
                if item.count('='):
                    (key, value) = item.split('=')
                    keys.append(key)
                    values.append(value)

            parm = dict(zip(keys, values))
        path = "//" + path.split(';')[0]
        host += ":%s" % (port)
        parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm)

        return host, path, user, pswd, parm
Example #18
0
 def handle_checks(results):
     from future_builtins import zip
     for balance, (success, result) in zip(balances_to_check, results):
         peer = balance[0]
         if success is True:
             if result is False:
                 if balance[4] <= 1:  # first or second strike, give them another chance
                     new_expected_balance = (balance[0],
                                             balance[1],
                                             balance[2],
                                             datetime.datetime.now() + self.max_expected_payment_time,
                                             balance[4] + 1,
                                             balance[5])
                     self.expected_balance_at_time.append(new_expected_balance)
                     peer.update_score(-5.0)
                 else:
                     peer.update_score(-50.0)
             else:
                 if balance[4] == 0:
                     peer.update_score(balance[5])
                 peer.update_stats('points_received', balance[5])
         else:
             log.warning("Something went wrong checking a balance. Peer: %s, account: %s,"
                         "expected balance: %s, expected time: %s, count: %s, error: %s",
                         str(balance[0]), str(balance[1]), str(balance[2]), str(balance[3]),
                         str(balance[4]), str(result.getErrorMessage()))
Example #19
0
 def generate_designs(self,  parameters, nr_samples):
     '''external interface to sampler. Returns the computational experiments
     over the specified parameters, for the given number of samples for each
     parameter.
     
     Parameters
     ----------
     parameters : list 
                     a list of parameters for which to generate the
                     experimental designs
     nr_samples : int
                  the number of samples to draw for each parameter
     
     
     Returns
     -------
     generator
         a generator object that yields the designs resulting from
         combining the parameters
     int
         the number of experimental designs
     
     '''
     parameters = sorted(parameters, key=operator.attrgetter('name'))
     sampled_parameters = self.generate_samples(parameters, nr_samples)
     designs = zip(*[sampled_parameters[u.name] for u in parameters]) 
     designs = DefaultDesigns(designs, parameters, nr_samples)
     
     return designs
Example #20
0
def design_generator(designs, params, kind):
    '''generator that combines the sampled parameters with their correct 
    name in order to return dicts.
    
    Parameters
    ----------
    designs : iterable of tuples
    params : iterable of str
    
    Yields
    ------
    dict
        experimental design dictionary
    
    '''
    
    for design in designs:

        design_dict = {}
        for param, value in zip(params, design):
            if isinstance(param, IntegerParameter):
                value = int(value)
            if isinstance(param, CategoricalParameter):
                # categorical parameter is an integer parameter, so
                # conversion to int is already done
                value = param.cat_for_index(value).value
            
            design_dict[param.name] = value
        
        yield kind(**design_dict)
        
Example #21
0
    def randomize(self, ntax=None, taxon_list=None, branchlength=1.0, branchlength_sd=None, bifurcate=True):
        """Generates a random tree with ntax taxa and/or taxa from taxlabels.

        new_tree = randomize(self,ntax=None,taxon_list=None,branchlength=1.0,branchlength_sd=None,bifurcate=True)
        Trees are bifurcating by default. (Polytomies not yet supported).
        """

        if not ntax and taxon_list:
            ntax = len(taxon_list)
        elif not taxon_list and ntax:
            taxon_list = ["taxon" + str(i + 1) for i in range(ntax)]
        elif not ntax and not taxon_list:
            raise TreeError("Either numer of taxa or list of taxa must be specified.")
        elif ntax != len(taxon_list):
            raise TreeError("Length of taxon list must correspond to ntax.")
        # initiate self with empty root
        self.__init__()
        terminals = self.get_terminals()
        # bifurcate randomly at terminal nodes until ntax is reached
        while len(terminals) < ntax:
            newsplit = random.choice(terminals)
            new_terminals = self.split(parent_id=newsplit, branchlength=branchlength)
            # if desired, give some variation to the branch length
            if branchlength_sd:
                for nt in new_terminals:
                    bl = random.gauss(branchlength, branchlength_sd)
                    if bl < 0:
                        bl = 0
                    self.node(nt).data.branchlength = bl
            terminals.extend(new_terminals)
            terminals.remove(newsplit)
        # distribute taxon labels randomly
        random.shuffle(taxon_list)
        for (node, name) in zip(terminals, taxon_list):
            self.node(node).data.taxon = name
Example #22
0
    def drawTextItem(self, point, text_item):
        self.content_written_to_current_page = 'drawTextItem'
        # return super(PdfEngine, self).drawTextItem(point, text_item)
        self.apply_graphics_state()
        gi = GlyphInfo(*self.qt_hack.get_glyphs(point, text_item))
        if not gi.indices:
            return
        metrics = self.fonts.get(gi.name)
        if metrics is None:
            from calibre.utils.fonts.utils import get_all_font_names
            try:
                names = get_all_font_names(gi.name, True)
                names = ' '.join('%s=%s'%(k, names[k]) for k in sorted(names))
            except Exception:
                names = 'Unknown'
            self.debug('Loading font: %s' % names)
            try:
                self.fonts[gi.name] = metrics = self.create_sfnt(text_item)
            except UnsupportedFont:
                return super(PdfEngine, self).drawTextItem(point, text_item)
        for glyph_id in gi.indices:
            try:
                metrics.glyph_map[glyph_id] = metrics.full_glyph_map[glyph_id]
            except (KeyError, ValueError):
                pass
        glyphs = []
        last_x = last_y = 0
        for glyph_index, (x, y) in zip(gi.indices, gi.positions):
            glyphs.append((x-last_x, last_y - y, glyph_index))
            last_x, last_y = x, y

        self.pdf.draw_glyph_run([gi.stretch, 0, 0, -1, 0, 0], gi.size, metrics,
                                glyphs)
Example #23
0
def mi2df(mi):
    """Return a `pandas <http://pandas.pydata.org>`_
    `MultiIndex <http://pandas.pydata.org/pandas-docs/dev/indexing.html#hierarchical-indexing-multiindex>`_
    as a `DataFrame <http://pandas.pydata.org/pandas-docs/dev/dsintro.html#dataframe>`_.

    Parameters
    ----------
    mi : `MultiIndex <http://pandas.pydata.org/pandas-docs/dev/indexing.html#hierarchical-indexing-multiindex>`_

    Returns
    -------
    df : `DataFrame <http://pandas.pydata.org/pandas-docs/dev/dsintro.html#dataframe>`_
    """
    assert isinstance(mi, MultiIndex), \
        'conversion not implemented for simple indices'

    def _type_converter(x):
        if not isinstance(x, basestring):
            return type(x)

        return 'S%i' % len(x)

    v = mi.values  # raw object array
    n = mi.names
    t = list(map(_type_converter, v[0]))  # strings are empty without this call
    dt = np.dtype(list(zip(n, t)))
    r = v.astype(dt)  # recarray
    return DataFrame(r)  # df handles recarrays very nicely
Example #24
0
    def parse_text_assertion(self, raw, ans):
        oraw = raw
        if not raw.startswith('['):
            return oraw
        raw = raw[1:]
        ta = {}
        m, raw = self.do_match(self.ta1_pat, raw)
        if m is not None:
            before, after = m.groups()
            ta['before'] = self.unescape(before)
            if after is not None:
                ta['after'] = self.unescape(after)
        else:
            m, raw = self.do_match(self.ta2_pat, raw)
            if m is not None:
                ta['after'] = self.unescape(m.group(1))

        # parse parameters
        m, raw = self.do_match(self.parameters_pat, raw)
        if m is not None:
            params = {}
            for name, value in zip(m.captures(1), m.captures(2)):
                params[name] = tuple(map(self.unescape, self.csv_pat.match(value).captures(1)))
            if params:
                ta['params'] = params

        if not raw.startswith(']'):
            return oraw  # no closing ] or extra content in the assertion

        if ta:
            ans['text_assertion'] = ta
        return raw[1:]
Example #25
0
def izip_records(seqarrays, fill_value=None, flatten=True):
    """
    Returns an iterator of concatenated items from a sequence of arrays.

    Parameters
    ----------
    seqarray : sequence of arrays
        Sequence of arrays.
    fill_value : {None, integer}
        Value used to pad shorter iterables.
    flatten : {True, False},
        Whether to
    """
    # OK, that's a complete ripoff from Python2.6 itertools.izip_longest
    def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
        "Yields the fill_value or raises IndexError"
        yield counter()
    #
    fillers = itertools.repeat(fill_value)
    iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
    # Should we flatten the items, or just use a nested approach
    if flatten:
        zipfunc = _izip_fields_flat
    else:
        zipfunc = _izip_fields
    #
    try:
        for tup in zip(*iters):
            yield tuple(zipfunc(tup))
    except IndexError:
        pass
Example #26
0
 def filesystem_cache(self):
     if self._filesystem_cache is None:
         debug('Loading filesystem metadata...')
         st = time.time()
         from calibre.devices.mtp.filesystem_cache import FilesystemCache
         ts = self.total_space()
         all_storage = []
         items = []
         for storage_id, capacity in zip([self._main_id, self._carda_id,
             self._cardb_id], ts):
             if storage_id is None: continue
             name = _('Unknown')
             for s in self.dev.data['storage']:
                 if s['id'] == storage_id:
                     name = s['name']
                     break
             storage = {'id':storage_id, 'size':capacity, 'name':name,
                     'is_folder':True, 'can_delete':False, 'is_system':True}
             self._currently_getting_sid = unicode(storage_id)
             id_map = self.dev.get_filesystem(storage_id,
                     self._filesystem_callback)
             for x in id_map.itervalues(): x['storage_id'] = storage_id
             all_storage.append(storage)
             items.append(id_map.itervalues())
         self._filesystem_cache = FilesystemCache(all_storage, chain(*items))
         debug('Filesystem metadata loaded in %g seconds (%d objects)'%(
             time.time()-st, len(self._filesystem_cache)))
     return self._filesystem_cache
Example #27
0
def fetch_plugins(old_index):
    ans = {}
    pool = ThreadPool(processes=10)
    entries = tuple(parse_index())
    result = pool.map(partial(parallel_fetch, old_index), entries)
    for entry, plugin in zip(entries, result):
        if isinstance(plugin, dict):
            ans[entry.name] = plugin
        else:
            if entry.name in old_index:
                ans[entry.name] = old_index[entry.name]
            log('Failed to get plugin', entry.name, 'at', datetime.utcnow().isoformat(), 'with error:')
            log(plugin)
    # Move staged files
    for plugin in ans.itervalues():
        if plugin['file'].startswith('staging_'):
            src = plugin['file']
            plugin['file'] = src.partition('_')[-1]
            os.rename(src, plugin['file'])
    raw = bz2.compress(json.dumps(ans, sort_keys=True, indent=4, separators=(',', ': ')))
    atomic_write(raw, PLUGINS)
    # Cleanup any extra .zip files
    all_plugin_files = {p['file'] for p in ans.itervalues()}
    extra = set(glob.glob('*.zip')) - all_plugin_files
    for x in extra:
        os.unlink(x)
    return ans
Example #28
0
def shifted_corr(reference, image, displacement):
    """Calculate the correlation between the reference and the image shifted
    by the given displacement.

    Parameters
    ----------
    reference : np.ndarray
    image : np.ndarray
    displacement : np.ndarray

    Returns
    -------
    correlation : float
    """
    ref_cuts = np.maximum(0, displacement)
    ref = reference[ref_cuts[0]:, ref_cuts[1]:, ref_cuts[2]:]
    im_cuts = np.maximum(0, -displacement)
    im = image[im_cuts[0]:, im_cuts[1]:, im_cuts[2]:]
    s = np.minimum(im.shape, ref.shape)
    ref = ref[:s[0], :s[1], :s[2]]
    im = im[:s[0], :s[1], :s[2]]
    ref -= nanmean(ref.reshape(-1, ref.shape[-1]), axis=0)
    ref = np.nan_to_num(ref)
    im -= nanmean(im.reshape(-1, im.shape[-1]), axis=0)
    im = np.nan_to_num(im)
    assert np.all(np.isfinite(ref)) and np.all(np.isfinite(im))
    corr = nanmean(
        [np.sum(i * r) / np.sqrt(np.sum(i * i) * np.sum(r * r)) for
         i, r in zip(np.rollaxis(im, -1), np.rollaxis(ref, -1))])
    return corr
Example #29
0
    def randomized(cls, taxa, branch_length=1.0, branch_stdev=None):
        """Create a randomized bifurcating tree given a list of taxa.

        :param taxa: Either an integer specifying the number of taxa to create
            (automatically named taxon#), or an iterable of taxon names, as
            strings.

        :returns: a tree of the same type as this class.
        """
        if isinstance(taxa, int):
            taxa = ['taxon%s' % (i+1) for i in range(taxa)]
        elif hasattr(taxa, '__iter__'):
            taxa = list(taxa)
        else:
            raise TypeError("taxa argument must be integer (# taxa) or "
                            "iterable of taxon names.")
        rtree = cls()
        terminals = [rtree.root]
        while len(terminals) < len(taxa):
            newsplit = random.choice(terminals)
            newsplit.split(branch_length=branch_length)
            newterms = newsplit.clades
            if branch_stdev:
                # Add some noise to the branch lengths
                for nt in newterms:
                    nt.branch_length = max(0,
                            random.gauss(branch_length, branch_stdev))
            terminals.remove(newsplit)
            terminals.extend(newterms)
        # Distribute taxon labels randomly
        random.shuffle(taxa)
        for node, name in zip(terminals, taxa):
            node.name = name
        return rtree
def generate_simulation_params ():
    """
    Custom function to generate
    file names for gene/gwas simulation study
    """

    simulation_files, simulation_file_roots    = get_simulation_files()
    gene_gwas_file_pairs, gene_gwas_file_roots =  get_gene_gwas_file_pairs()

    for sim_file, sim_file_root in zip(simulation_files, simulation_file_roots):
        for (gene, gwas), gene_file_root in zip(gene_gwas_file_pairs, gene_gwas_file_roots):

            result_file = "%s.%s.simulation_res" % (gene_file_root, sim_file_root)
            result_file_path = os.path.join(working_dir, "simulation_results", result_file)

            yield [gene, gwas, sim_file], result_file_path, gene_file_root, sim_file_root, result_file
Example #31
0
    def drawTextItem(self, point, text_item):
        # return super(PdfEngine, self).drawTextItem(point, text_item)
        self.apply_graphics_state()
        gi = GlyphInfo(*self.qt_hack.get_glyphs(point, text_item))
        if not gi.indices:
            return
        metrics = self.fonts.get(gi.name)
        if metrics is None:
            from calibre.utils.fonts.utils import get_all_font_names
            try:
                names = get_all_font_names(gi.name, True)
                names = ' '.join('%s=%s' % (k, names[k])
                                 for k in sorted(names))
            except Exception:
                names = 'Unknown'
            self.debug('Loading font: %s' % names)
            try:
                self.fonts[gi.name] = metrics = self.create_sfnt(text_item)
            except UnsupportedFont:
                self.debug(
                    'Failed to load font: %s, drawing text as outlines...' %
                    names)
                return super(PdfEngine, self).drawTextItem(point, text_item)
        for glyph_id in gi.indices:
            try:
                metrics.glyph_map[glyph_id] = metrics.full_glyph_map[glyph_id]
            except (KeyError, ValueError):
                pass
        glyphs = []
        last_x = last_y = 0
        for glyph_index, (x, y) in zip(gi.indices, gi.positions):
            glyphs.append((x - last_x, last_y - y, glyph_index))
            last_x, last_y = x, y

        if not self.content_written_to_current_page:
            dy = self.graphics.current_state.transform.dy()
            ypositions = [y + dy for x, y in gi.positions]
            miny = min(ypositions or (0, ))
            maxy = max(ypositions or (self.pixel_height, ))
            page_top = self.header_height if self.has_headers else 0
            page_bottom = self.pixel_height - (self.footer_height
                                               if self.has_footers else 0)
            if page_top <= miny <= page_bottom or page_top <= maxy <= page_bottom:
                self.content_written_to_current_page = 'drawTextItem'
            else:
                self.debug(
                    'Text in header/footer: miny=%s maxy=%s page_top=%s page_bottom=%s'
                    % (miny, maxy, page_top, page_bottom))
        self.pdf.draw_glyph_run([gi.stretch, 0, 0, -1, 0, 0], gi.size, metrics,
                                glyphs)
Example #32
0
    def parse(self):
        """
        Parse the bitstream and extract each NALU.
        Call the respective callbacks for each NALU type found.
        """

        self._get_nalu_positions()
        nalu_sps = None
        nalu_pps = None
        for current_nalu_pos, next_nalu_pos in zip(self.nal_unit_positions, islice(self.nal_unit_positions, 1, None)):
            current_nalu_bytepos = int(current_nalu_pos / 8)
            next_nalu_bytepos = int(next_nalu_pos / 8)
            nalu_bytes = self.stream[current_nalu_pos: next_nalu_pos]

            self.__call('nalu', nalu_bytes)

            if self.verbose:
                print("")
                print("========================================================================================================")
                print("")
                print("NALU bytepos:\t[" + str(current_nalu_bytepos) + ", " + str(next_nalu_bytepos - 1) + "]")
                print("NALU offset:\t" + str(current_nalu_bytepos) + " Bytes")
                print("NALU length:\t" + str(next_nalu_bytepos - current_nalu_bytepos) + " Bytes (including start code)")

            current_nalu_stream_segment = BitStream(self.stream[current_nalu_pos: next_nalu_pos])

            nal_unit_type, rbsp_payload = self._decode_nalu(current_nalu_stream_segment)

            if self.verbose:
                print("NALU type:\t" + str(nal_unit_type) + " (" + nalutypes.get_description(nal_unit_type) + ")")
                print("NALU bytes:\t" + str(nalu_bytes))
                print("NALU RBSP:\t" + str(rbsp_payload))
                print("")

            if nal_unit_type == nalutypes.NAL_UNIT_TYPE_SPS:
                nalu_sps = nalutypes.SPS(rbsp_payload, self.verbose)
                self.__call('sps', rbsp_payload)
            elif nal_unit_type == nalutypes.NAL_UNIT_TYPE_PPS:
                nalu_pps = nalutypes.PPS(rbsp_payload, self.verbose)
                self.__call('pps', rbsp_payload)
            elif nal_unit_type == nalutypes.NAL_UNIT_TYPE_AUD:
                aud = nalutypes.AUD(rbsp_payload, self.verbose)
                self.__call('aud', rbsp_payload)
            elif nal_unit_type == nalutypes.NAL_UNIT_TYPE_CODED_SLICE_NON_IDR:
                nalu_slice = nalutypes.CodedSliceNonIDR(rbsp_payload, nalu_sps, nalu_pps, self.verbose)
                self.__call('slice', rbsp_payload)
            elif nal_unit_type == nalutypes.NAL_UNIT_TYPE_CODED_SLICE_IDR:
                nalu_slice = nalutypes.CodedSliceIDR(rbsp_payload, nalu_sps, nalu_pps, self.verbose)
                self.__call('slice', rbsp_payload)
Example #33
0
    def results_iter(self):
        """
        Returns an iterator over the results from executing this query.
        """
        resolve_columns = hasattr(self, 'resolve_columns')
        fields = None
        has_aggregate_select = bool(self.query.aggregate_select)
        # Set transaction dirty if we're using SELECT FOR UPDATE to ensure
        # a subsequent commit/rollback is executed, so any database locks
        # are released.
        if self.query.select_for_update and transaction.is_managed(self.using):
            transaction.set_dirty(self.using)
        for rows in self.execute_sql(MULTI):
            for row in rows:
                if resolve_columns:
                    if fields is None:
                        # We only set this up here because
                        # related_select_fields isn't populated until
                        # execute_sql() has been called.
                        if self.query.select_fields:
                            fields = self.query.select_fields + self.query.related_select_fields
                        else:
                            fields = self.query.model._meta.fields
                        # If the field was deferred, exclude it from being passed
                        # into `resolve_columns` because it wasn't selected.
                        only_load = self.deferred_to_columns()
                        if only_load:
                            db_table = self.query.model._meta.db_table
                            fields = [
                                f for f in fields if db_table in only_load
                                and f.column in only_load[db_table]
                            ]
                    row = self.resolve_columns(row, fields)

                if has_aggregate_select:
                    aggregate_start = len(
                        self.query.extra_select.keys()) + len(
                            self.query.select)
                    aggregate_end = aggregate_start + len(
                        self.query.aggregate_select)
                    row = tuple(row[:aggregate_start]) + tuple([
                        self.query.resolve_aggregate(value, aggregate,
                                                     self.connection)
                        for (alias, aggregate), value in zip(
                            self.query.aggregate_select.items(),
                            row[aggregate_start:aggregate_end])
                    ]) + tuple(row[aggregate_end:])

                yield row
Example #34
0
def test_stereo_calibrated_mp_dummy():
    from datetime import datetime

    camera = StereoCamera(left_camera, right_camera, R, T, E, F, Q)

    total_single = 0
    left = CalibratedCamera(DummySubclass(), camera.left)
    with left as left_:
        for frame in left_:
            delta = (datetime.now() - frame.timestamp).total_seconds()
            total_single += delta

    total_sum = 0
    left = CalibratedCamera(DummySubclass(), camera.left)
    right = CalibratedCamera(DummySubclass(), camera.right)
    with left as left_:
        with right as right_:
            for framea, frameb in zip(left_, right_):
                delta = (datetime.now() - framea.timestamp).total_seconds()
                total_sum += delta

    total_th = 0
    left = CalibratedCamera(DummySubclass(), camera.left)
    right = CalibratedCamera(DummySubclass(), camera.right)
    with CalibratedStereoCamera(left, right, camera,
                                display_results=False) as vision:
        for frame in vision:
            delta = (datetime.now() - frame.timestamp).total_seconds()
            total_th += delta

    total_mp = 0
    left = MultiProcessing(CalibratedCamera(DummySubclass(), camera.left),
                           freerun=False)
    right = MultiProcessing(CalibratedCamera(DummySubclass(), camera.right),
                            freerun=False)
    with CalibratedStereoCamera(left, right, camera,
                                display_results=False) as vision:
        for frame in vision:
            delta = (datetime.now() - frame.timestamp).total_seconds()
            total_mp += delta

    print('Single total = ', total_single)
    print('Sequential total = ', total_sum)
    print('Threaded total = ', total_th)
    print('Multiprocessing total = ', total_mp)

    assert (total_sum > total_single)
    assert (total_single <= total_th < total_sum)
    assert (total_single <= total_mp < total_sum)
Example #35
0
def _parse_reaction(model, line):
    """Parse a 'reaction' line from a BNGL net file."""
    (number, reactants, products, rate, rule) = line.strip().split(' ', 4)
    # the -1 is to switch from one-based to zero-based indexing
    reactants = tuple(int(r) - 1 for r in reactants.split(','))
    products = tuple(int(p) - 1 for p in products.split(','))
    rate = rate.rsplit('*')
    (rule_list, unit_conversion) = re.match(
                    r'#([\w,\(\)]+)(?: unit_conversion=(.*))?\s*$', rule).groups()
    rule_list = rule_list.split(',') # BNG lists all rules that generate a rxn
    # Support new (BNG 2.2.6-stable or greater) and old BNG naming convention for reverse rules
    rule_name, is_reverse = zip(*[re.subn('^_reverse_|\(reverse\)$', '', r) for r in rule_list])
    is_reverse = tuple(bool(i) for i in is_reverse)
    r_names = ['__s%d' % r for r in reactants]
    combined_rate = sympy.Mul(*[sympy.S(t) for t in r_names + rate])
    reaction = {
        'reactants': reactants,
        'products': products,
        'rate': combined_rate,
        'rule': rule_name,
        'reverse': is_reverse,
        }
    model.reactions.append(reaction)
    # bidirectional reactions
    key = (reactants, products)
    key_reverse = (products, reactants)
    if key in reaction_cache:
        reaction_bd = reaction_cache.get(key)
        reaction_bd['rate'] += combined_rate
        reaction_bd['rule'] += tuple(r for r in rule_name if r not in
                                     reaction_bd['rule'])
    elif key_reverse in reaction_cache:
        reaction_bd = reaction_cache.get(key_reverse)
        reaction_bd['reversible'] = True
        reaction_bd['rate'] -= combined_rate
        reaction_bd['rule'] += tuple(r for r in rule_name if r not in
                                         reaction_bd['rule'])
    else:
        # make a copy of the reaction dict
        reaction_bd = dict(reaction)
        # default to false until we find a matching reverse reaction
        reaction_bd['reversible'] = False
        reaction_cache[key] = reaction_bd
        model.reactions_bidirectional.append(reaction_bd)
    # odes
    for p in products:
        model.odes[p] += combined_rate
    for r in reactants:
        model.odes[r] -= combined_rate
Example #36
0
def _parse_kasim_outfile(out_filename):
    """
    Parses the KaSim .out file into a Numpy ndarray.

    Parameters
    ----------
    out_filename : string
        String specifying the location of the .out filename produced by KaSim.

    Returns
    -------
    numpy.ndarray
        Returns the KaSim simulation data as a Numpy ndarray. Data is accessed
        using the syntax::

            results[index_name]

        The index 'time' gives the data for the time coordinates of the
        simulation. Data for the observables can be accessed by indexing the
        array with the names of the observables.
    """

    try:
        out_file = open(out_filename, 'r')

        line = out_file.readline().strip()  # Get the first line
        out_file.close()
        line = line[2:]  # strip off opening '# '
        raw_names = re.split(' ', line)
        column_names = []

        # Get rid of the quotes surrounding the observable names
        for raw_name in raw_names:
            mo = re.match("'(.*)'", raw_name)
            if (mo):
                column_names.append(mo.group(1))
            else:
                column_names.append(raw_name)

        # Create the dtype argument for the numpy record array
        dt = list(zip(column_names, ('float', ) * len(column_names)))

        # Load the output file as a numpy record array, skip the name row
        arr = np.loadtxt(out_filename, dtype=float, skiprows=1)
        recarr = arr.view(dt)
    except Exception as e:
        raise Exception("problem parsing KaSim outfile: " + str(e))

    return recarr
Example #37
0
    def npts(self, lon1, lat1, lon2, lat2, npts, radians=False):
        """
        Given a single initial point and terminus point (specified by
        python floats lon1,lat1 and lon2,lat2), returns a list of
        longitude/latitude pairs describing npts equally spaced
        intermediate points along the geodesic between the initial and
        terminus points.

        if radians=True, lons/lats are radians instead of degrees.

        Example usage:

        >>> from pyproj import Geod
        >>> g = Geod(ellps='clrk66') # Use Clarke 1966 ellipsoid.
        >>> # specify the lat/lons of Boston and Portland.
        >>> boston_lat = 42.+(15./60.); boston_lon = -71.-(7./60.)
        >>> portland_lat = 45.+(31./60.); portland_lon = -123.-(41./60.)
        >>> # find ten equally spaced points between Boston and Portland.
        >>> lonlats = g.npts(boston_lon,boston_lat,portland_lon,portland_lat,10)
        >>> for lon,lat in lonlats: '%6.3f  %7.3f' % (lat, lon)
        '43.528  -75.414'
        '44.637  -79.883'
        '45.565  -84.512'
        '46.299  -89.279'
        '46.830  -94.156'
        '47.149  -99.112'
        '47.251  -104.106'
        '47.136  -109.100'
        '46.805  -114.051'
        '46.262  -118.924'
        >>> # test with radians=True (inputs/outputs in radians, not degrees)
        >>> import math
        >>> dg2rad = math.radians(1.)
        >>> rad2dg = math.degrees(1.)
        >>> lonlats = g.npts(dg2rad*boston_lon,dg2rad*boston_lat,dg2rad*portland_lon,dg2rad*portland_lat,10,radians=True)
        >>> for lon,lat in lonlats: '%6.3f  %7.3f' % (rad2dg*lat, rad2dg*lon)
        '43.528  -75.414'
        '44.637  -79.883'
        '45.565  -84.512'
        '46.299  -89.279'
        '46.830  -94.156'
        '47.149  -99.112'
        '47.251  -104.106'
        '47.136  -109.100'
        '46.805  -114.051'
        '46.262  -118.924'
        """
        lons, lats = _proj.Geod._npts(self, lon1, lat1, lon2, lat2, npts, radians=radians)
        return list(zip(lons, lats))
Example #38
0
    def inputChoice(self, question, options, hotkeys, default=None):
        """
        Ask the user a question with a predefined list of acceptable answers.

        DEPRECATED: Use L{input_choice} instead!

        Directly calls L{input_choice} with the options and hotkeys zipped
        into a tuple list. It always returns the hotkeys and throws no
        L{QuitKeyboardInterrupt} if quit was selected.
        """
        return self.input_choice(question=question,
                                 options=zip(options, hotkeys),
                                 default=default,
                                 return_shortcut=True,
                                 automatic_quit=False)
Example #39
0
 def add_books(self, paths, formats, metadata, add_duplicates=True, return_ids=False):
     books = [(mi, {fmt:path}) for mi, path, fmt in zip(metadata, paths, formats)]
     book_ids, duplicates = self.new_api.add_books(books, add_duplicates=add_duplicates, dbapi=self)
     if duplicates:
         paths, formats, metadata = [], [], []
         for mi, format_map in duplicates:
             metadata.append(mi)
             for fmt, path in format_map.iteritems():
                 formats.append(fmt)
                 paths.append(path)
         duplicates = (paths, formats, metadata)
     ids = book_ids if return_ids else len(book_ids)
     if book_ids:
         self.data.books_added(book_ids)
     return duplicates or None, ids
Example #40
0
def execute( via, params=None, pass_thru=None ):
    """Perform a single poll via the supplied enip.get_attribute.gateway instance, yielding the
    parameters and their polled values.  Supply params (a sequence of CIP ('<address>', '<type>')),
    as might be produced by the provided via's class' parameter_substitution method...

    By default, we'll look for the parameters in the module's PARAMS list, which must be recognized
    by the supplied via's parameter_substitutions method, if pass_thru is not Truthy.

    Yields tuples of each of the supplied params, with their polled values.

    """
    with contextlib.closing( via.read(
            via.parameter_substitution( params or PARAMS, pass_thru=pass_thru ))) as reader:
        for p,v in zip( params or PARAMS, reader ): # "lazy" zip
            yield p,v
Example #41
0
def execute( via, params=None, pass_thru=None ):
    """Perform a single poll via the supplied enip.get_attribute 'proxy' instance, yielding the
    parameters and their polled values.

    By default, we'll look for the parameters in the module's PARAMS list, which must be recognized
    by the supplied via's parameter_substitutions method, if pass_thru is not Truthy (default:
    True).

    Yields tuples of each of the supplied params, with their polled values.

    """
    with contextlib.closing( via.read(
            via.parameter_substitution( params or PARAMS, pass_thru=pass_thru ))) as reader:
        for p,v in zip( params or PARAMS, reader ): # "lazy" zip
            yield p,v
Example #42
0
    def calculate_length(self):
        delta = 0
        line_number_changes = ([], [])
        for v, lmap, changes in zip((self.view.left, self.view.right),
                                    ({}, {}), line_number_changes):
            b = v.document().firstBlock()
            ebl = v.document().documentLayout().ensureBlockLayout
            last_line_count = 0
            while b.isValid():
                ebl(b)
                lmap[b.blockNumber()] = last_line_count
                last_line_count += b.layout().lineCount()
                b = b.next()
            for top, bot, kind in v.changes:
                changes.append((lmap[top], lmap[bot], kind))

        changes = []
        for (l_top, l_bot, kind), (r_top, r_bot,
                                   kind) in zip(*line_number_changes):
            height = max(l_bot - l_top, r_bot - r_top)
            top = delta + l_top
            changes.append((top, top + height, kind))
            delta = top + height - l_bot
        self.changes, self.delta = (changes, ) + line_number_changes, delta
Example #43
0
    def _match_stereo(self, last_features, new_features):
        """Matches Last frame features with new frame features.
        Filters matched features based on triangulated points.

        :return: (last2d, last3d, last_descr, new2d, new3d, new_descr, last_points_right, new_points_right) or None
        """
        matches = self._match_features(last_features[3], new_features[3],
                self._feature_type, self._ratio, self._distance_thresh / 3, self._min_matches)

        if matches is None:
            return None

        last_points_3d = [last_features[2][m.queryIdx] for m in matches]
        new_points_3d = [new_features[2][m.trainIdx] for m in matches]

        dZ = 3 * sum(i[0] ** 2 for i in self._last_pose.translation) ** .5 if self._last_pose else self._dZ
        dZ = min(self._max_dZ, max(self._dZ, dZ))
        self._dZ = dZ
        mask = [abs(a[2] - b[2]) < dZ for a, b in zip(last_points_3d, new_points_3d)]

        if sum(mask) < self._min_matches:
            return None

        new_points_3d = np.float32([p for M, p in zip(mask, new_points_3d) if M])
        last_points_3d = np.float32([p for M, p in zip(mask, last_points_3d) if M])
        new_points_2d = np.float32([new_features[0][m.trainIdx] for M, m in zip(mask, matches) if M])
        new_points_2d_right = np.float32([new_features[1][m.trainIdx] for M, m in zip(mask, matches) if M])
        last_points_2d = np.float32([last_features[0][m.queryIdx] for M, m in zip(mask, matches) if M])
        last_points_2d_right = np.float32([last_features[1][m.queryIdx] for M, m in zip(mask, matches) if M])

        if isinstance(last_features[3], cv2.UMat):
            last_descriptors = last_features[3].get()
            new_descriptors = new_features[3].get()
        else:
            last_descriptors = last_features[3]
            new_descriptors = new_features[3]
        last_descriptors = np.array([last_descriptors[m.queryIdx] for M, m in zip(mask, matches) if M], dtype=last_descriptors.dtype)
        new_descriptors = np.array([new_descriptors[m.trainIdx] for M, m in zip(mask, matches) if M], dtype=new_descriptors.dtype)

        return last_points_2d, last_points_3d, last_descriptors, new_points_2d, new_points_3d, new_descriptors, last_points_2d_right, new_points_2d_right
Example #44
0
    def importIonChannels(self, doc, vmin=-120e-3, vmax=40e-3, vdivs=3000):
        for chan in doc.ionChannel:
            # print dir(chan)
            if chan.type_ == 'ionChannelHH':
                mchan = moose.HHChannel('%s/%s' % (self.lib.path, chan.id))
                mgates = map(moose.element,
                             (mchan.gateX, mchan.gateY, mchan.gateZ))
                assert (len(chan.gate) <= 3
                        )  # We handle only up to 3 gates in HHCHannel
                for ngate, mgate in zip(chan.gate, mgates):
                    if mgate.name.endswith('X'):
                        mchan.Xpower = ngate.instances
                    elif mgate.name.endswith('Y'):
                        mchan.Ypower = ngate.instances
                    elif mgate.name.endswith('Z'):
                        mchan.Zpower = ngate.instance
                    mgate.min = vmin
                    mgate.max = vmax
                    mgate.divs = vdivs

                    # I saw only examples of GateHHRates in
                    # HH-channels, the meaning of forwardRate and
                    # reverseRate and steadyState are not clear in the
                    # classes GateHHRatesInf, GateHHRatesTau and in
                    # FateHHTauInf the meaning of timeCourse and
                    # steady state is not obvious. Is the last one
                    # refering to tau_inf and m_inf??
                    fwd = ngate.forwardRate
                    rev = ngate.reverseRate
                    if (fwd is not None) and (rev is not None):
                        beta = calculateRateFn(fwd, vmin, vmax, vdivs)
                        alpha = calculateRateFn(rev, vmin, vmax, vdivs)
                        mgate.tableA = alpha
                        mgate.tableB = alpha + beta
                        break
                    # Assuming the meaning of the elements in GateHHTauInf ...
                    tau = ngate.timeCourse
                    inf = ngate.steadyState
                    if (tau is not None) and (inf is not None):
                        tau = calculateRateFn(tau, vmin, vmax, vdivs)
                        inf = calculateRateFn(inf, vmin, vmax, vdivs)
                        mgate.tableA = inf / tau
                        mgate.tableB = 1 / tau
                        break
                self.id_to_ionChannel[chan.id] = chan
                self.nml_to_moose[chan] = mchan
                self.proto_chans[chan.id] = mchan
Example #45
0
    def test_create_from_naught_prestack(self):
        with TestContext("create_from_naught_prestack"):
            fname = "mk-ps.sgy"
            spec = segyio.spec()
            spec.format = 5
            spec.sorting = 2
            spec.samples = range(7)
            spec.ilines = range(1, 4)
            spec.xlines = range(1, 3)
            spec.offsets = range(1, 6)

            with segyio.create(fname, spec) as dst:
                arr = np.arange(start=0.000,
                                stop=0.007,
                                step=0.001,
                                dtype=np.single)

                arr = np.concatenate([[arr + 0.01], [arr + 0.02]], axis=0)
                lines = [arr + i for i in spec.ilines]
                cube = [(off * 100) + line for line in lines
                        for off in spec.offsets]

                dst.iline[:, :] = cube

                for of in spec.offsets:
                    for il in spec.ilines:
                        dst.header.iline[il, of] = {
                            TraceField.INLINE_3D: il,
                            TraceField.offset: of
                        }
                    for xl in spec.xlines:
                        dst.header.xline[xl, of] = {
                            TraceField.CROSSLINE_3D: xl
                        }

            with segyio.open(fname, "r") as f:
                self.assertAlmostEqual(101.010, f.trace[0][0], places=4)
                self.assertAlmostEqual(101.011, f.trace[0][1], places=4)
                self.assertAlmostEqual(101.016, f.trace[0][-1], places=4)
                self.assertAlmostEqual(503.025, f.trace[-1][5], places=4)
                self.assertNotEqual(f.header[0][TraceField.offset],
                                    f.header[1][TraceField.offset])
                self.assertEqual(1, f.header[0][TraceField.offset])
                self.assertEqual(2, f.header[1][TraceField.offset])

                for x, y in zip(f.iline[:, :], cube):
                    self.assertListEqual(list(x.flatten()), list(y.flatten()))
Example #46
0
    def __setitem__(self, i, val):
        """text[i] = val

        Write the ith text header of the file, starting at 0.
        If val is instance of Text or iterable of Text,
        value is set to be the first element of every Text

        Parameters
        ----------
        i : int or slice
        val : str, Text or iterable if i is slice

        Examples
        --------
        Write a new textual header:

        >>> f.text[0] = make_new_header()
        >>> f.text[1:3] = ["new_header1", "new_header_2"]

        Copy a textual header:

        >>> f.text[1] = g.text[0]

        Write a textual header based on Text:

        >>> f.text[1] = g.text
        >>> assert f.text[1] == g.text[0]

        >>> f.text[1:3] = [g1.text, g2.text]
        >>> assert f.text[1] == g1.text[0]
        >>> assert f.text[2] == g2.text[0]

        """
        if isinstance(val, Text):
            self[i] = val[0]
            return

        try:
            i = self.wrapindex(i)
            self.filehandle.puttext(i, val)

        except TypeError:
            for i, text in zip(range(*i.indices(len(self))), val):
                if isinstance(text, Text):
                    text = text[0]
                self.filehandle.puttext(i, text)
Example #47
0
    def sort(self, fields):
        """
        Sort the traces in the group, obeying the `fields` order of
        most-to-least significant word.
        """
        # TODO: examples

        headers = [dict(self.parent.header[i]) for i in self.index]
        index = list(zip(headers, self.index))
        # sorting is stable, so sort the whole set by field, applied in the
        # reverse order:
        for field in reversed(fields):
            index.sort(key=lambda x: x[0][field])

        # strip off all the headers
        index = [i for _, i in index]
        self.index = index
Example #48
0
def parameters_equal(a, b):
    """Compares two parameter instances

    Checks full name, data, and ranges. Does not consider the comment.

    :return: True or False

    :raises: ValueError if both inputs are no parameter instances

    """
    if (not b.v_is_parameter and not a.v_is_parameter):
        raise ValueError('Both inputs are not parameters')

    if (not b.v_is_parameter or not a.v_is_parameter):
        return False

    if a.v_full_name != b.v_full_name:
        return False

    if a.f_is_empty() and b.f_is_empty():
        return True

    if a.f_is_empty() != b.f_is_empty():
        return False

    if not a._values_of_same_type(a.f_get(), b.f_get()):
        return False

    if not a._equal_values(a.f_get(), b.f_get()):
        return False

    if a.f_has_range() != b.f_has_range():
        return False

    if a.f_has_range():
        if a.f_get_range_length() != b.f_get_range_length():
            return False

        for myitem, bitem in zip(a.f_get_range(copy=False),
                                 b.f_get_range(copy=False)):
            if not a._values_of_same_type(myitem, bitem):
                return False
            if not a._equal_values(myitem, bitem):
                return False

    return True
Example #49
0
def adjacent_pairs_iterate (array, reverse = False):
    """
    returns pairs of iterators to successive positions
    """
    if len(array) < 2:
        return

    # get iterators to successive positions
    if reverse:
        curr_iter = reversed(array)
        next_iter = reversed(array)
    else:
        curr_iter = iter(array)
        next_iter = iter(array)
    next(next_iter)
    for i, j in zip(curr_iter, next_iter):
        yield i, j
Example #50
0
def keyboardPartsToBraille(keyboardScore, **keywords):
    """
    Translates a Score object containing two :class:`~music21.stream.Part` instances to braille,
    an upper part and a lower
    part. Assumes that the two parts are aligned and well constructed. Bar over bar format is used.
    """
    parts = keyboardScore.getElementsByClass(['Part', 'PartStaff'])
    if len(parts) != 2:
        raise BrailleTranslateException("Can only translate two keyboard parts at a time")
    (inPlace, debug) = _translateArgs(**keywords)
    staffUpper = parts[0]
    staffLower = parts[1]
    upperPartToTranscribe = staffUpper
    if not inPlace:
        upperPartToTranscribe = staffUpper.makeNotation(cautionaryNotImmediateRepeat=False)
    lowerPartToTranscribe = staffLower
    if not inPlace:
        lowerPartToTranscribe = staffLower.makeNotation(cautionaryNotImmediateRepeat=False)
    rhSegments = segment.findSegments(upperPartToTranscribe, setHand='right', **keywords)
    lhSegments = segment.findSegments(lowerPartToTranscribe, setHand='left', **keywords)

    allBrailleText = []
    for (rhSegment, lhSegment) in zip(rhSegments, lhSegments):
        bg = segment.BrailleGrandSegment()
        for rhGroupingKey in rhSegment:
            bg[rhGroupingKey] = rhSegment[rhGroupingKey]

        for lhGroupingKey in lhSegment:
            bg[lhGroupingKey] = lhSegment[lhGroupingKey]

        bg.transcribe()
        if not debug:
            allBrailleText.append(bg.brailleText)
        else:
            if six.PY2:
                bsStr = str(bg)
                bsUni = bsStr.decode('utf-8')
                allBrailleText.append(bsUni)
            else:
                allBrailleText.append(str(bg))

    if six.PY2 and debug:
        return u"\n".join(allBrailleText)
    else:
        return u"\n".join([unicode(bt) for bt in allBrailleText])
Example #51
0
    def save(self):
        """
        Saves all options into the file.
        """

        for options, lfn, sfn, default in self._handlers:
            vals = sfn()

            # map into list if necessary
            if not hasattr(vals, "__iter__"):
                vals = [vals]
            debug("Saving %s with values %s", options, vals)

            for value, (section, option) in zip(vals, options):
                self.set(option, unicode(value), section)

        with open(self._file, "w") as f:
            self._cfg.write(f)
Example #52
0
    def randomize(
        self,
        ntax=None,
        taxon_list=None,
        branchlength=1.0,
        branchlength_sd=None,
        bifurcate=True,
    ):
        """Generates a random tree with ntax taxa and/or taxa from taxlabels.

        new_tree = randomize(self,ntax=None,taxon_list=None,branchlength=1.0,branchlength_sd=None,bifurcate=True)
        Trees are bifurcating by default. (Polytomies not yet supported).
        """

        if not ntax and taxon_list:
            ntax = len(taxon_list)
        elif not taxon_list and ntax:
            taxon_list = ["taxon" + str(i + 1) for i in range(ntax)]
        elif not ntax and not taxon_list:
            raise TreeError(
                "Either numer of taxa or list of taxa must be specified.")
        elif ntax != len(taxon_list):
            raise TreeError("Length of taxon list must correspond to ntax.")
        # initiate self with empty root
        self.__init__()
        terminals = self.get_terminals()
        # bifurcate randomly at terminal nodes until ntax is reached
        while len(terminals) < ntax:
            newsplit = random.choice(terminals)
            new_terminals = self.split(parent_id=newsplit,
                                       branchlength=branchlength)
            # if desired, give some variation to the branch length
            if branchlength_sd:
                for nt in new_terminals:
                    bl = random.gauss(branchlength, branchlength_sd)
                    if bl < 0:
                        bl = 0
                    self.node(nt).data.branchlength = bl
            terminals.extend(new_terminals)
            terminals.remove(newsplit)
        # distribute taxon labels randomly
        random.shuffle(taxon_list)
        for (node, name) in zip(terminals, taxon_list):
            self.node(node).data.taxon = name
Example #53
0
def argmatch(formals, actuals):
    if len(formals) != len(actuals):
        return False
    for (f, a) in zip(formals, actuals):
        # Here's the compatibility logic.  First, we catch the situations
        # in which a more restricted actual type matches a more general
        # formal one.  Then we have a fallback rule checking for type
        # equality or wildcarding.
        ftype = formaltype(f)
        atype = actualtype(a)
        if ftype == "any":
            pass
        elif (atype == "numeric" or a == "global") and ftype == "side":
            pass
        elif atype in ("filter", "empty") and ftype == "wml":
            pass
        elif atype in ("numeric", "position") and ftype == "span":
            pass
        elif atype in ("shortname", "name", "empty",
                       "stringliteral") and ftype == "affix":
            pass
        elif atype in ("shortname", "name",
                       "stringliteral") and ftype == "string":
            pass
        elif atype in ("shortname", "name", "string", "stringliteral",
                       "empty") and ftype == "optional_string":
            pass
        elif atype in ("shortname", ) and ftype == "terrain_code":
            pass
        elif atype in ("numeric", "position", "span",
                       "empty") and ftype == "alliance":
            pass
        elif atype in ("terrain_code", "shortname",
                       "name") and ftype == "terrain_pattern":
            pass
        elif atype in ("string", "shortname", "name") and ftype == "types":
            pass
        elif atype in ("numeric", "percentage") and ftype == "percentage":
            pass
        elif atype == "range" and ftype == "name":
            pass
        elif atype != ftype and ftype is not None and atype is not None:
            return False
    return True
Example #54
0
def find_unique_points(explored_parameters):
    """Takes a list of explored parameters and finds unique parameter combinations.

    If parameter ranges are hashable operates in O(N), otherwise O(N**2).

    :param explored_parameters:

        List of **explored** parameters

    :return:

        List of tuples, first entry being the parameter values, second entry a list
        containing the run position of the unique combination.

    """
    ranges = [param.f_get_range(copy=False) for param in explored_parameters]
    zipped_tuples = list(zip(*ranges))
    try:
        unique_elements = OrderedDict()
        for idx, val_tuple in enumerate(zipped_tuples):
            if val_tuple not in unique_elements:
                unique_elements[val_tuple] = []
            unique_elements[val_tuple].append(idx)
        return compat.listitems(unique_elements)
    except TypeError:
        logger = logging.getLogger('pypet.find_unique')
        logger.error('Your parameter entries could not be hashed, '
                     'now I am sorting slowly in O(N**2).')
        unique_elements = []
        for idx, val_tuple in enumerate(zipped_tuples):
            matches = False
            for added_tuple, pos_list in unique_elements:
                matches = True
                for idx2, val in enumerate(added_tuple):
                    if not explored_parameters[idx2]._equal_values(
                            val_tuple[idx2], val):
                        matches = False
                        break
                if matches:
                    pos_list.append(idx)
                    break
            if not matches:
                unique_elements.append((val_tuple, [idx]))
        return unique_elements
Example #55
0
    def createHHChannel(self, chan):
        mchan = moose.HHChannel('%s/%s' % (self.lib.path, chan.id))
        mgates = map(moose.element, (mchan.gateX, mchan.gateY, mchan.gateZ))
        assert (len(chan.gate) <= 3
                )  # We handle only up to 3 gates in HHCHannel
        for ngate, mgate in zip(chan.gate, mgates):
            if mgate.name.endswith('X'):
                mchan.Xpower = ngate.instances
            elif mgate.name.endswith('Y'):
                mchan.Ypower = ngate.instances
            elif mgate.name.endswith('Z'):
                mchan.Zpower = ngate.instance
            mgate.min = vmin
            mgate.max = vmax
            mgate.divs = vdivs

            # I saw only examples of GateHHRates in
            # HH-channels, the meaning of forwardRate and
            # reverseRate and steadyState are not clear in the
            # classes GateHHRatesInf, GateHHRatesTau and in
            # FateHHTauInf the meaning of timeCourse and
            # steady state is not obvious. Is the last one
            # refering to tau_inf and m_inf??
            fwd = ngate.forwardRate
            rev = ngate.reverseRate
            if (fwd is not None) and (rev is not None):
                beta = calculateRateFn(fwd, vmin, vmax, vdivs)
                alpha = calculateRateFn(rev, vmin, vmax, vdivs)
                mgate.tableA = alpha
                mgate.tableB = alpha + beta
                break
            # Assuming the meaning of the elements in GateHHTauInf ...
            tau = ngate.timeCourse
            inf = ngate.steadyState
            if (tau is not None) and (inf is not None):
                tau = calculateRateFn(tau, vmin, vmax, vdivs)
                inf = calculateRateFn(inf, vmin, vmax, vdivs)
                mgate.tableA = inf / tau
                mgate.tableB = 1 / tau
                break
        if self.verbose:
            print(self.filename, 'Created', mchan.path, 'for', chan.id)
        return mchan
Example #56
0
    def __init__(self, buf):
        """
        Args:
          buf: file-like object containing ico file data
        """
        self.buf = buf
        self.entry = []

        header = struct.unpack('<3H', buf.read(6))
        if (0, 1) != header[:2]:
            raise SyntaxError('not an ico file')

        self.nb_items = header[2]

        dir_fields = (
            'width',
            'height',
            'nb_color',
            'reserved',
            'planes',
            'bpp',
            'size',
            'offset',
        )
        for i in range(self.nb_items):
            directory = list(struct.unpack('<4B2H2I', buf.read(16)))
            for j in range(3):
                if not directory[j]:
                    directory[j] = 256
            icon_header = dict(zip(dir_fields, directory))
            icon_header['color_depth'] = icon_header['bpp'] or (
                icon_header['nb_color'] == 16 and 4
            )
            icon_header['dim'] = (icon_header['width'], icon_header['height'])
            self.entry.append(icon_header)
        # end for (read headers)

        # order by size and color depth
        self.entry.sort(
            lambda x, y: cmp(x['width'], y['width'])
            or cmp(x['color_depth'], y['color_depth'])
        )
        self.entry.reverse()
Example #57
0
    def reduce(self, function, init_values=None):
        """
            Independently reduces all streams (folding from left, obviously).

            Returns list of results.
        """
        if init_values:
            if isinstance(init_values, list):
                lefts = init_values
            else:
                lefts = [init_values] * len(self.stream_collection)
        else:
            lefts = self.next()
        rights = self.next()
        while not all(rights is None):
            valid = not rights is None
            args = zip(lefts[valid], rights[valid])
            lefts = map(lambda arg: function(*arg), args)
            rights = self.next()
Example #58
0
    def __setitem__(self, depth, val):
        """depth[i] = val

        Write the ith depth, a horizontal cross-section, of the file, starting
        at 0. It accepts any array_like, but `val` must be at least as big as
        the underlying data slice.

        If `val` is longer than the underlying trace, `val` is essentially truncated.

        Parameters
        ----------
        i   : int or slice
        val : array_like

        Notes
        -----
        .. versionadded:: 1.1

        Behaves like [] for lists.

        Examples
        --------
        Copy a depth:

        >>> depth[4] = other[19]

        Copy consecutive depths, and assign to a sub volume (inject a sub cube
        into the larger volume):

        >>> depth[10:50] = other[:]

        Copy into every other depth from an iterable:

        >>> depth[::2] = other
        """
        if isinstance(depth, slice):
            for i, x in zip(range(*depth.indices(len(self))), val):
                self[i] = x
            return

        val = castarray(val, dtype=self.dtype)
        self.filehandle.putdepth(depth, val.size, self.offsets, val)
Example #59
0
def make_SDP(deltas):
    pvms = []
    gaps = {("even", 0): 3, ("odd+", 0): 3}
    for spin in range(0, lmax):
        if spin % 2 == 0:
            pvms.append(make_F(deltas, "even", spin, gaps))
            pvms.append(make_F(deltas, "odd+", spin, gaps))
        else:
            pvms.append(make_F(deltas, "odd-", spin, gaps))

    epsilon_contribution = make_F(deltas, "even", 0, dict(), Delta=deltas[1])
    sigma_contribution = make_F(deltas, "odd+", 0, dict(), Delta=deltas[0])
    for m, x in zip(epsilon_contribution, sigma_contribution):
        m[0][0] += x
    pvms.append(epsilon_contribution)
    norm = []
    for v in make_F(deltas, "even", 0, dict(), Delta=0):
        norm.append(v[0][0] + v[0][1] + v[1][0] + v[1][1])
    obj = 0
    return context.sumrule_to_SDP(norm, obj, pvms)
Example #60
0
    def write(self,outlist,func=null,args=(),**kwargs):
        '''
        make FITS file with storing FITS header and data

        Parameters
        ----------
        outlist : array-like
            list of output FITS paths
            raise ValueError if the length of this list is different
            from the number of images.
        kwargs : keyward arguments
            These are given to writeto method of HDU object
        '''
        if len(outlist) != len(self):
            msg = 'the length of outlist differs from the number of images'
            raise ValueError(msg)
        for i,(o,head,data) in enumerate(zip(outlist,self.header,self.data)):
            hdu = mkhdu(data,header=head)
            hdu.writeto(o,**kwargs)
            func(i,*args)