Example #1
0
 def __init__(self, address, num_workers, app, **kwargs):
     self.address = address
     self.num_workers = num_workers
     self.worker_age = 0
     self.app = app
     self.conf = kwargs.get("config", {})
     self.timeout = self.conf['timeout']
     self.reexec_pid = 0
     self.debug = kwargs.get("debug", False)
     self.log = logging.getLogger(__name__)
     self.opts = kwargs
     
     self._pidfile = None
     self.master_name = "Master"
     self.proc_name = self.conf['proc_name']
     
     # get current path, try to use PWD env first
     try:
         a = os.stat(os.environ('PWD'))
         b = os.stat(os.getcwd())
         if a.ino == b.ino and a.dev == b.dev:
             cwd = os.environ('PWD')
         else:
             cwd = os.getcwd()
     except:
         cwd = os.getcwd()
         
     # init start context
     self.START_CTX = {
         "argv": copy.copy(sys.argv),
         "cwd": cwd,
         0: copy.copy(sys.argv[0])
     }
Example #2
0
 def initialize(self, optimizer):
     # Initialize the parameters
     self.gamma  = np.ones(self.input_shape)
     self.beta = np.zeros(self.input_shape)
     # parameter optimizers
     self.gamma_opt  = copy.copy(optimizer)
     self.beta_opt = copy.copy(optimizer)
    def neargroups(self, blocknames):
        """Given a list or set of block names, finds groups of 'near' blocks.  Blocks are assigned the same group
        if they are neighbours, or share a neighbour."""
        blocknames = list(set(blocknames))
        groups = []
        for blk in blocknames:
            groups.append(set([blk]))
        from copy import copy

        done = False
        while not done:
            done = True
            for i, g in enumerate(groups):
                ng = copy(g)
                for blk in g:
                    ng = ng | self.block[blk].neighbour_name
                if i < len(groups) - 1:
                    for g2 in groups[i + 1 :]:
                        ng2 = copy(g2)
                        for blk in g2:
                            ng2 = ng2 | self.block[blk].neighbour_name
                        if ng & ng2:
                            g.update(g2)
                            groups.remove(g2)
                            done = False
                            break
                    if not done:
                        break
        return groups
 def new(*args):
     """Method overload."""
     if len(args) == 2:
         new_args = (args[0], copy(args[1]))
     else:
         new_args = (args[0], copy(args[1]), args[2:])
     if hasattr(args[0], 'baseFilename'):
         return fn(*args)
     levelno = new_args[1].levelno
     if levelno >= 50:
         color = '\x1b[31;5;7m\n '  # blinking red with black
     elif levelno >= 40:
         color = '\x1b[31m'  # red
     elif levelno >= 30:
         color = '\x1b[33m'  # yellow
     elif levelno >= 20:
         color = '\x1b[32m'  # green
     elif levelno >= 10:
         color = '\x1b[35m'  # pink
     else:
         color = '\x1b[0m'  # normal
     try:
         new_args[1].msg = color + str(new_args[1].msg) + ' \x1b[0m'
     except Exception as reason:
         print(reason)  # Do not use log here.
     return fn(*new_args)
Example #5
0
	def draw_xy_spm(self):
		K4 = xy_SPM(copy.copy(self.u), copy.copy(self.u1), self.L.value())
		real_spm = xy_SPM_to_Real_(K4, G(self.u, len(K4)),  G(self.u1, len(K4)))
		
		self.sc9.axes.plot(range(len(real_spm[0])), real_spm[0], '-')
		self.sc9.axes.relim()
		self.sc9.axes.margins(0.1, 0.1)
		self.sc9.draw()

		self.sc10.axes.plot(range(len(real_spm[1])), real_spm[1], '-')
		self.sc10.axes.relim()
		self.sc10.axes.margins(0.1, 0.1)
		self.sc10.draw()

		self.sc11.axes.plot(range(len(real_spm[2])), real_spm[2], '-')
		self.sc11.axes.relim()
		self.sc11.axes.margins(0.1, 0.1)
		self.sc11.draw()

		self.sc12.axes.plot(range(len(real_spm[3])), real_spm[3], '-')
		self.sc12.axes.relim()
		self.sc12.axes.margins(0.1, 0.1)
		self.sc12.draw()

		self.sc13.axes.plot(range(len(real_spm[4])), real_spm[4], '-')
		self.sc13.axes.relim()
		self.sc13.axes.margins(0.1, 0.1)
		self.sc13.draw()

		self.sc14.axes.plot(range(len(real_spm[5])), real_spm[5], '-')
		self.sc14.axes.relim()
		self.sc14.axes.margins(0.1, 0.1)
		self.sc14.draw()
Example #6
0
 def is_conjunction_predicate(self):
     self.conjResult = []
     conjChildIndList = any_in([c.parent_relation for c in self.children], conjunction_dependencies)
     if not conjChildIndList:
         return False
     childrenCopy = [x for x in self.children]
     self.children = [x for i,x in enumerate(self.children) if i not in conjChildIndList and 
                      x.parent_relation !='conj']
     self.baseElm = copy.copy(self)
     self.children = childrenCopy
     
     cc = []
     conjElements = []
     for child in self.children:
         if child.parent_relation == 'cc':
             if cc and (cc[-1][0] != child.id-1):
                 self.conjResult.append((copy.copy(cc),copy.copy(conjElements)))
                 cc = []
                 conjElements = []
             cc.append((child.id,child.word))
             
                 
         elif child.parent_relation == 'conj':
             conjElements.append(child)              
     
     if cc:
         self.conjResult.append((copy.copy(cc),copy.copy(conjElements)))
     
     return True
Example #7
0
    def test_add_extra_panel_tab(self):
        """ Tests if a tab can be added to a course tab list. """
        for tab_type in utils.EXTRA_TAB_PANELS.keys():
            tab = utils.EXTRA_TAB_PANELS.get(tab_type)

            # test adding with changed = True
            for tab_setup in ["", "x", "x,y,z"]:
                course = self.get_course_with_tabs(tab_setup)
                expected_tabs = copy.copy(course.tabs)
                expected_tabs.append(tab)
                changed, actual_tabs = utils.add_extra_panel_tab(tab_type, course)
                self.assertTrue(changed)
                self.assertEqual(actual_tabs, expected_tabs)

            # test adding with changed = False
            tab_test_setup = [
                [tab],
                [tab, self.get_tab_type_dicts("x,y,z")],
                [self.get_tab_type_dicts("x,y"), tab, self.get_tab_type_dicts("z")],
                [self.get_tab_type_dicts("x,y,z"), tab],
            ]

            for tab_setup in tab_test_setup:
                course = self.get_course_with_tabs(tab_setup)
                expected_tabs = copy.copy(course.tabs)
                changed, actual_tabs = utils.add_extra_panel_tab(tab_type, course)
                self.assertFalse(changed)
                self.assertEqual(actual_tabs, expected_tabs)
Example #8
0
def HandleSerie(operation=None, selected_season=None, selected_episode=None, **params):
    movie_documents = service.get_movie_documents(params['id'])

    if len(movie_documents) == 1:
        return HandleSerieVersion(version=1, operation=operation, selected_season=selected_season,
                                  selected_episode=selected_episode, **params)
    else:
        oc = ObjectContainer(title2=unicode(params['title']))

        if 'version' in params:
            version = int(params['version'])

            new_params = copy.copy(params)
            new_params['version'] = version

            return HandleSerieVersion(operation=operation, selected_season=selected_season,
                                      selected_episode=selected_episode, **new_params)
        else:
            for index in range(0, len(movie_documents)):
                version = index + 1

                new_params = copy.copy(params)
                new_params['version'] = version

                oc.add(DirectoryObject(
                    key=Callback(HandleSerieVersion, operation=operation, selected_season=selected_season,
                                      selected_episode=selected_episode, **new_params),
                    title=unicode(movie_documents[index]['release']),
                ))

        return oc
def get_available_languages(domain):
    """Lists the available languages for the given translation domain.

    :param domain: the domain to get languages for
    """
    if domain in _AVAILABLE_LANGUAGES:
        return copy.copy(_AVAILABLE_LANGUAGES[domain])

    localedir = '%s_LOCALEDIR' % domain.upper()
    find = lambda x: gettext.find(domain,
                                  localedir=os.environ.get(localedir),
                                  languages=[x])

    # NOTE(mrodden): en_US should always be available (and first in case
    # order matters) since our in-line message strings are en_US
    language_list = ['en_US']
    # NOTE(luisg): Babel <1.0 used a function called list(), which was
    # renamed to locale_identifiers() in >=1.0, the requirements master list
    # requires >=0.9.6, uncapped, so defensively work with both. We can remove
    # this check when the master list updates to >=1.0, and all projects udpate
    list_identifiers = (getattr(localedata, 'list', None) or
                        getattr(localedata, 'locale_identifiers'))
    locale_identifiers = list_identifiers()
    for i in locale_identifiers:
        if find(i) is not None:
            language_list.append(i)
    _AVAILABLE_LANGUAGES[domain] = language_list
    return copy.copy(language_list)
Example #10
0
	def current_player_value(self):
		'''
		Tells us the value of current player
			Args: None
			Returns: int
		'''	
		number_of_aces = 0
		current_sum = 0
		if self.game_state == 0:
			p = copy.copy(self.player0)
		else:
			p = copy.copy(self.player1)

		p = [ n.split('_')[1] for n in p]
		
		for card in p:
			if card.isdigit():
				current_sum += int(card)
			else:
				if card in ['K','Q','J']:
					current_sum += 10
				else:
					number_of_aces += 1

		while number_of_aces > 0:
			number_of_aces -= 1
			if current_sum+11 <= 21 - number_of_aces:
				current_sum+= 11
			else:
				current_sum += 1

		return current_sum
Example #11
0
File: cpython.py Project: snim2/eco
    def _profile(self):
        f = tempfile.mkstemp()
        self.tm.export_as_text(f[1])
        # Delete any stale profile info
        self.tm.profile_is_dirty = False
        self.tm.profile_map = dict()
        self.tm.profile_data = dict()
        # python -m cProfile [-o output_file] [-s sort_order] myscript.py
        proc = subprocess.Popen(["python2", "-m", "cProfile", f[1]], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0)
        stdout_value, stderr_value = proc.communicate()
        # Mock Popen here, so that we can return a Popen-like object.
        # This allows Eco to append the output of the profiler
        # to the console.
        mock = MockPopen(copy.copy(stdout_value), copy.copy(stderr_value))
        # Lex profiler output:
        # ncalls  tottime  percall  cumtime  percall fname:lineno(fn)
        table = False
        temp_cursor = self.tm.cursor.copy()
        ncalls_dict = dict()
        for line in stdout_value.split('\n'):
            tokens = line.strip().split()
            if not tokens:
                continue
            elif len(tokens) < 6:
                continue
            elif not table:
                if tokens[0] == 'ncalls':
                    table = True
            else:
                if not ':' in tokens[5]:
                    continue
                fname, loc = tokens[5].split(':')
                if not (fname == os.path.basename(f[1]) or fname == f[1]):
                    continue
                ncalls = tokens[0]
                lineno = int(loc.split('(')[0])
                func = loc.split('(')[1][:-1]
                # Move cursor to correct line and character
                msg = ('%s: called %s times ran at %ss / call' % (func, ncalls, tokens[2]))
                temp_cursor.line = lineno - 1
                temp_cursor.move_to_x(0, self.tm.lines)
                node = temp_cursor.find_next_visible(temp_cursor.node)
                if node.lookup == "<ws>":
                    node = node.next_term
                node.remove_annotations_by_class(CPythonFuncProfileMsg)
                node.add_annotation(CPythonFuncProfileMsg(msg))
                ncalls_dict[node] = float(ncalls)

        # Normalise profiler information.
        vals = ncalls_dict.values()
        val_min = float(min(vals))
        val_max = float(max(vals))
        val_diff = val_max - val_min
        for node in ncalls_dict:
            ncalls_dict[node] = (ncalls_dict[node] - val_min) / val_diff
        for node in ncalls_dict:
            node.remove_annotations_by_class(CPythonFuncProfileVal)
            node.add_annotation(CPythonFuncProfileVal(ncalls_dict[node]))

        return mock
Example #12
0
def crossover(genome_pair,settings):

    if genome_pair[1].fitness > genome_pair[0].fitness:
        G1 = genome_pair[1]
        G2 = genome_pair[0]
    else:
        G1 = genome_pair[0]
        G2 = genome_pair[1]

    G1_genes = G1.getAllGenesDict()
    G2_genes = G2.getAllGenesDict()

    new_genes = []
    for key, gene in G1_genes.iteritems():
        if key in G2_genes and bool(random.getrandbits(1)):
            new_genes.append(copy.copy(G2_genes[key]))
        else:
            new_genes.append(copy.copy(gene))

    ####### AAARRRRRGGGHHHHHH this cost me a day,
    # learn: never say = always copy in a new object python will keep reference

    NG = Genome(settings)

    NG.createFromGeneList(new_genes)

    return NG
Example #13
0
def collapse_partition(collapse_nodes, items, category_node, idx, tag, opts, top_level_component,
    cat_len, category_is_hierarchical, category_items, eval_formatter, is_gst,
    last_idx, node_parent):
    # Only partition at the top level. This means that we must not do a break
    # until the outermost component changes.
    if idx >= last_idx + opts.collapse_at and not tag.original_name.startswith(top_level_component+'.'):
        last = idx + opts.collapse_at - 1 if cat_len > idx + opts.collapse_at else cat_len - 1
        if category_is_hierarchical:
            ct = copy(category_items[last])
            components = get_name_components(ct.original_name)
            ct.sort = ct.name = components[0]
            # Do the first node after the last node so that the components
            # array contains the right values to be used later
            ct2 = copy(tag)
            components = get_name_components(ct2.original_name)
            ct2.sort = ct2.name = components[0]
            format_data = {'last': ct, 'first':ct2}
        else:
            format_data = {'first': tag, 'last': category_items[last]}

        name = eval_formatter.safe_format(opts.template, format_data, '##TAG_VIEW##', None)
        if not name.startswith('##TAG_VIEW##'):
            # Formatter succeeded
            node_id = category_as_json(
                items, items[category_node['id']].category, name, 0,
                parent=category_node['id'], is_editable=False, is_gst=is_gst,
                is_hierarchical=category_is_hierarchical, is_searchable=False)
            node_parent = {'id':node_id, 'children':[]}
            collapse_nodes.append(node_parent)
            category_node['children'].append(node_parent)
        last_idx = idx  # remember where we last partitioned
    return last_idx, node_parent
Example #14
0
def update(item):
    global w,b,history
    w[0] += step*item[1]*item[0][0]
    w[1] += step*item[1]*item[0][1]
    b[0] += step*item[1]
    print w,b
    history.append([copy.copy(w),copy.copy(b)])
Example #15
0
 def _loadNameReplacementTables():
     """Load the list of atom and residue name replacements."""
     if len(PDBFile._residueNameReplacements) == 0:
         tree = etree.parse(os.path.join(os.path.dirname(__file__), 'data', 'pdbNames.xml'))
         allResidues = {}
         proteinResidues = {}
         nucleicAcidResidues = {}
         for residue in tree.getroot().findall('Residue'):
             name = residue.attrib['name']
             if name == 'All':
                 PDBFile._parseResidueAtoms(residue, allResidues)
             elif name == 'Protein':
                 PDBFile._parseResidueAtoms(residue, proteinResidues)
             elif name == 'Nucleic':
                 PDBFile._parseResidueAtoms(residue, nucleicAcidResidues)
         for atom in allResidues:
             proteinResidues[atom] = allResidues[atom]
             nucleicAcidResidues[atom] = allResidues[atom]
         for residue in tree.getroot().findall('Residue'):
             name = residue.attrib['name']
             for id in residue.attrib:
                 if id == 'name' or id.startswith('alt'):
                     PDBFile._residueNameReplacements[residue.attrib[id]] = name
             if 'type' not in residue.attrib:
                 atoms = copy(allResidues)
             elif residue.attrib['type'] == 'Protein':
                 atoms = copy(proteinResidues)
             elif residue.attrib['type'] == 'Nucleic':
                 atoms = copy(nucleicAcidResidues)
             else:
                 atoms = copy(allResidues)
             PDBFile._parseResidueAtoms(residue, atoms)
             PDBFile._atomNameReplacements[name] = atoms
Example #16
0
    def abort(action):
        # use newer Proxies as Proxies cannot be shared between threads
        telescope = copy.copy(PointHandler.telescope)
        dome = copy.copy(PointHandler.dome)

        telescope.abortSlew()
        dome.abortSlew()
Example #17
0
 def env(self):
     env = self.get_user_env()
     sys.path = copy.copy(env)
     try:
         yield
     finally:
         sys.path = copy.copy(self._origin_env)
Example #18
0
File: pcg.py Project: zhh210/pypdas
def pcg0(H,c,A,b,x0,fA=None,callback=None):
    '''
    Projected CG method to solve the problem: {min 1/2x'Hx + c'x | Ax = b}.
    Initial point x0 must safisty Ax0 = b. Unstable version, not recommended.
    '''
    # Initialize some variables
    r = H*x0 + c
    r = project(A,r)
    g = project(A,r)
    p = -copy(g)
    x = copy(x0)

    while True:
        alpha = dotu(r,g)/dotu(p,H*p)
        x = x+ alpha*p
        r2 = r + alpha*H*p
        g2 = project(A,r2)
        # Do iterative refinement
        # for i in range(5000):
        #     g2 = project(A,g2)
        beta = dotu(r2,g2)/dotu(r,g)
        p = -g2 + beta*p
        g = copy(g2)
        r = copy(r2)
        if nrm2(r) < 1e-16:
            break
    return x
 def getVarFlag(self, var, flag, expand=False, noweakdefault=False):
     local_var = self._findVar(var)
     value = None
     if local_var is not None:
         if flag in local_var:
             value = copy.copy(local_var[flag])
         elif flag == "_content" and "_defaultval" in local_var and not noweakdefault:
             value = copy.copy(local_var["_defaultval"])
     if expand and value:
         # Only getvar (flag == _content) hits the expand cache
         cachename = None
         if flag == "_content":
             cachename = var
         else:
             cachename = var + "[" + flag + "]"
         value = self.expand(value, cachename)
     if value and flag == "_content" and local_var is not None and "_removeactive" in local_var:
         removes = [self.expand(r).split()  for r in local_var["_removeactive"]]
         removes = reduce(lambda a, b: a+b, removes, [])
         filtered = filter(lambda v: v not in removes,
                           value.split())
         value = " ".join(filtered)
         if expand:
              # We need to ensure the expand cache has the correct value
              # flag == "_content" here
             self.expand_cache[var].value = value
     return value
Example #20
0
def convert(filename):
    inp = PdfFileReader(open(filename, 'rb'))
    outp = PdfFileWriter()

    for page in inp.pages:
        page1 = copy.copy(page)
        page2 = copy.copy(page)

        UL = page.mediaBox.upperLeft
        UR = page.mediaBox.upperRight
        LL = page.mediaBox.lowerLeft
        LR = page.mediaBox.lowerRight

        # left column
        page1.mediaBox.upperLeft = (UL[0], UL[1])
        page1.mediaBox.upperRight = (UR[0]/2, UR[1])
        page1.mediaBox.lowerLeft = (LL[0], LL[1])
        page1.mediaBox.lowerRight = (LR[0]/2, LR[1])
        outp.addPage(page1)

        # right column
        page2.mediaBox.upperLeft = (UR[0]/2, UL[1])
        page2.mediaBox.upperRight = (UR[0], UR[1])
        page2.mediaBox.lowerLeft = (LR[0]/2, LR[1])
        page2.mediaBox.lowerRight = (LR[0], LR[1])
        outp.addPage(page2)

    outp.write(open(filename+'.2', 'wb'))
Example #21
0
def get_cursor(alias, use=True):
    """Return a cursor for database `alias` and the name of the database.

    If `use` is False, don't pass the db argument.
    """
    if alias in settings.DATABASES:
        s = settings.DATABASES[alias]
        kwargs = {
            'host': s['HOST'],
            'port': int(s['PORT'] or '0'),
            'db': s['NAME'],
            'user': s['USER'],
            'passwd': s['PASSWORD'],
        }
        kwargs.update(s['OPTIONS'])
        if use:
            kwargs['db'] = s['NAME']
    elif alias in settings.OTHER_DATABASES:
        kwargs = copy(settings.OTHER_DATABASES[alias])
    else:
        raise Exception('No such database in DATABASES or OTHER_DATABASES')
    conf = copy(kwargs)
    if not use:
        del kwargs['db']
    return MySQLdb.connect(**kwargs).cursor(), conf
def move_to_nearest_satisfying(c,s,verbose=False):
    '''
    will move individual c to nearest satisfying location
    simulate the move and check whether satisfying
    returns a tuple new state, and boolean satisfied
    note: very inefficient but simple solution
    '''

    move_limit = max(size-c, c)
    move_distance = 0
    new_s = []
    satisfied = False
    while move_distance < move_limit and not(satisfied):
        move_distance += 1
        new_s = copy.copy(s) # used to simulate the move
        if c+move_distance < size:
            new_s = move_to(c, c+move_distance, new_s)
            satisfied = is_happy(c+move_distance, new_s)
        if c-move_distance >= 0 and not satisfied: # trying to move left
            new_s = copy.copy(s)
            new_s = move_to(c, c-move_distance, new_s)
            satisfied = is_happy(c-move_distance, new_s)
        if verbose and satisfied:
            print (c, " moved to:", c-move_distance)
    return new_s, satisfied
  def set_PID(self,wanted_position,sensor_input):
    """send a signal through a PID, based on the wanted command and the sensor_input"""
    self.time= time.time()
    self.out=(wanted_position/self.gain)-self.offset

    self.error=self.out-sensor_input
    self.I_term += self.Ki*self.error*(self.last_time-self.time)
    
    if self.I_term>self.out_max:
      self.I_term=self.out_max
    elif self.I_term<self.out_min:
      self.I_term=self.out_min
    
    self.out_PID=self.last_output+self.K*self.error+self.I_term-self.Kd*(sensor_input-self.last_sensor_input)/(self.last_time-self.time)
    
    if self.out_PID>self.out_max:
      self.out_PID=self.out_max
    elif self.out_PID<self.out_min:
      self.out_PID=self.out_min
      
    self.last_time=copy.copy(self.time)
    self.last_sensor_input=copy.copy(sensor_input)
    self.last_output=copy.copy(self.out_PID)
    out_a=c.comedi_from_phys(self.out_PID,self.range_ds,self.maxdata) # convert the wanted_position 
    c.comedi_data_write(self.device0,self.subdevice,self.channel,self.range_num,c.AREF_GROUND,out_a) # send the signal to the controler
    t=time.time()
    return (t,self.out_PID)
 def compute_delta_time(self):
     l1 = copy.copy(self.spikes_time)
     l2 = copy.copy(self.spikes_time)
     del l1[-1]
     del l2[0]
     self.delta_time = np.array(l2) - np.array(l1)
     return self.delta_time
Example #25
0
    def rect(self, x, y, width, height):
        """Returns a new Canvas that contains a copy of the pixels in the given rectangle.

        @param int x: The x coordinate of the rectangle's top left corner.
        @param int y: The y coordinate of the rectangle's top left corner.
        @param int width: The width of the rectangle, extending to the right from the top left corner.
        @param int height: The height of the rectangle, extending downwards from the top left corner.
        @return canvas:
        """
        if x < 0 or x > self.width - 1 \
                or y < 0 or y > self.height - 1:
            raise ValueError("x or y coordinates out of bounds.")

        if width < 0 or height < 0:
            raise ValueError("Width and height must be positive.")

        if x + width > self.width \
                or y + height > self.height:
            raise ValueError("The rectangle does not fit into the image.")

        rect = Canvas(width, height, copy(self.bgcolor))

        for target_y in range(height):
            for target_x in range(width):
                color = self.at(x + target_x, y + target_y)
                target_i = target_y * width + target_x
                rect.canvas[target_i] = copy(color)

        return rect
 def _transpose_pitch_carrier_by_numbered_interval(
     pitch_carrier, numbered_interval):
     mci = pitchtools.NumberedInterval(numbered_interval)
     if isinstance(pitch_carrier, pitchtools.Pitch):
         number = pitch_carrier.pitch_number + mci.semitones
         return type(pitch_carrier)(number)
     elif isinstance(pitch_carrier, numbers.Number):
         pitch_carrier = pitchtools.NumberedPitch(pitch_carrier)
         result = _transpose_pitch_carrier_by_numbered_interval(
             pitch_carrier, mci)
         return result.pitch_number
     elif isinstance(pitch_carrier, scoretools.Note):
         new_note = copy.copy(pitch_carrier)
         number = pitchtools.NumberedPitch(
             pitch_carrier.written_pitch).pitch_number
         number += mci.number
         new_pitch = pitchtools.NamedPitch(number)
         new_note.written_pitch = new_pitch
         return new_note
     elif isinstance(pitch_carrier, scoretools.Chord):
         new_chord = copy.copy(pitch_carrier)
         pairs = zip(new_chord.note_heads, pitch_carrier.note_heads)
         for new_nh, old_nh in pairs:
             number = \
                 pitchtools.NumberedPitch(old_nh.written_pitch).pitch_number
             number += mci.number
             new_pitch = pitchtools.NamedPitch(number)
             new_nh.written_pitch = new_pitch
         return new_chord
     else:
         return pitch_carrier
Example #27
0
    def compute(self, computations):
        """
        Compute new columns by applying one or more :class:`.Computation` to
        each row.

        :param computations: An iterable of pairs of new column names and
            :class:`.Computation` instances.
        :returns: A new :class:`Table`.
        """
        column_names = list(copy(self._column_names))
        column_types = list(copy(self._column_types))

        for name, computation in computations:
            if not isinstance(computation, Computation):
                raise ValueError('The second element in pair must be a Computation instance.')

            column_names.append(name)
            column_types.append(computation.get_computed_column_type(self))

            computation.prepare(self)

        new_rows = []

        for row in self.rows:
            new_columns = tuple(computation.run(row) for n, c in computations)
            new_rows.append(tuple(row) + new_columns)

        return self._fork(new_rows, zip(column_names, column_types))
Example #28
0
    def __deepcopy__(self, memo):
        r'''Deepcopies q-grid.

        Returns new q-grid.
        '''
        root_node, next_downbeat = self.__getnewargs__()
        return type(self)(copy.copy(root_node), copy.copy(next_downbeat))
Example #29
0
 def restoreWindowProperties(self):
     if self.resetWindowProperties(self.current_pipe, self.current_properties):
         self.restore_failed = False
     else:
         self.notify.warning("Couldn't restore original display settings!")
         if base.appRunner and base.appRunner.windowProperties:
             fullscreen = 0
             embedded = 1
             tryProps = base.appRunner.windowProperties
             if self.resetWindowProperties(self.current_pipe, tryProps):
                 self.current_properties = copy.copy(tryProps)
                 self.restore_failed = False
                 return None
             
         
         if self.current_properties.getFullscreen():
             fullscreen = 0
             embedded = 0
             tryProps = self.current_properties
             tryProps.setFullscreen(0)
             if self.resetWindowProperties(self.current_pipe, tryProps):
                 self.current_properties = copy.copy(tryProps)
                 self.restore_failed = False
                 return None
             
         
         self.notify.error('Failed opening regular window!')
         base.panda3dRenderError()
         self.restore_failed = True
  def generateSequenceSet(self, numSequenceGroups, sequenceLength, seed):
    sequences = []
    random.seed(seed)
    symbolPool = range(self.symbolPoolSize)

    for i in range(numSequenceGroups):
      shuffledPool = copy.copy(symbolPool)
      random.shuffle(shuffledPool)
      startElement1 = [shuffledPool[0]]
      startElement2 = [shuffledPool[1]]
      endElement1 = [shuffledPool[2]]
      endElement2 = [shuffledPool[3]]
      sequenceElements = shuffledPool[4:(4+sequenceLength-2)]

      sharedSubsequence1 = copy.copy(sequenceElements)
      sharedSubsequence2 = copy.copy(sequenceElements)
      while sharedSubsequence1 == sharedSubsequence2:
        random.shuffle(sharedSubsequence1)
        random.shuffle(sharedSubsequence2)

      sequences.append(startElement1+sharedSubsequence1+endElement1)
      sequences.append(startElement2+sharedSubsequence1+endElement2)
      # sequences.append(startElement1+sharedSubsequence2+endElement2)
      # sequences.append(startElement2+sharedSubsequence2+endElement1)
    return sequences
Example #31
0
    def __init__(self,
                 arg=None,
                 color=None,
                 opacity=None,
                 size=None,
                 **kwargs):
        """
        Construct a new Marker object
        
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of :class:`plotly.graph_objs.scatterpolar.u
            nselected.Marker`
        color
            Sets the marker color of unselected points, applied
            only when a selection exists.
        opacity
            Sets the marker opacity of unselected points, applied
            only when a selection exists.
        size
            Sets the marker size of unselected points, applied only
            when a selection exists.

        Returns
        -------
        Marker
        """
        super(Marker, self).__init__("marker")

        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return

        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolar.unselected.Marker 
constructor must be a dict or 
an instance of :class:`plotly.graph_objs.scatterpolar.unselected.Marker`""")

        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)

        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("color", None)
        _v = color if color is not None else _v
        if _v is not None:
            self["color"] = _v
        _v = arg.pop("opacity", None)
        _v = opacity if opacity is not None else _v
        if _v is not None:
            self["opacity"] = _v
        _v = arg.pop("size", None)
        _v = size if size is not None else _v
        if _v is not None:
            self["size"] = _v

        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))

        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
Example #32
0
 def copy(self, **kw):
     """Return a copy of the proxied object. Keyword args will be passed through"""
     cp = copy(self.__target)
     for k, v in kw.items():
         setattr(cp, k, v)
     return cp
Example #33
0
 def __copy__(self):
     """
     Return a copy of the proxied object.
     """
     return copy(self.__target)
 def reset(self):
     self.state = copy.copy(self.mu)
def find_resolved_modules(easyconfigs, avail_modules, modtool, retain_all_deps=False):
    """
    Find easyconfigs in 1st argument which can be fully resolved using modules specified in 2nd argument

    :param easyconfigs: list of parsed easyconfigs
    :param avail_modules: list of available modules
    :param retain_all_deps: retain all dependencies, regardless of whether modules are available for them or not
    """
    ordered_ecs = []
    new_easyconfigs = []
    # copy, we don't want to modify the origin list of available modules
    avail_modules = avail_modules[:]
    _log.debug("Finding resolved modules for %s (available modules: %s)", easyconfigs, avail_modules)

    ec_mod_names = [ec['full_mod_name'] for ec in easyconfigs]
    for easyconfig in easyconfigs:
        if isinstance(easyconfig, EasyConfig):
            easyconfig._config = copy.copy(easyconfig._config)
        else:
            easyconfig = easyconfig.copy()
        deps = []
        for dep in easyconfig['dependencies']:
            dep_mod_name = dep.get('full_mod_name', ActiveMNS().det_full_module_name(dep))

            # always treat external modules as resolved,
            # since no corresponding easyconfig can be found for them
            if dep.get('external_module', False):
                _log.debug("Treating dependency marked as external module as resolved: %s", dep_mod_name)

            elif retain_all_deps and dep_mod_name not in avail_modules:
                # if all dependencies should be retained, include dep unless it has been already
                _log.debug("Retaining new dep %s in 'retain all deps' mode", dep_mod_name)
                deps.append(dep)

            # retain dep if it is (still) in the list of easyconfigs
            elif dep_mod_name in ec_mod_names:
                _log.debug("Dep %s is (still) in list of easyconfigs, retaining it", dep_mod_name)
                deps.append(dep)

            # retain dep if corresponding module is not available yet;
            # fallback to checking with modtool.exist is required,
            # for hidden modules and external modules where module name may be partial
            elif dep_mod_name not in avail_modules and not modtool.exist([dep_mod_name], skip_avail=True)[0]:
                # no module available (yet) => retain dependency as one to be resolved
                _log.debug("No module available for dep %s, retaining it", dep)
                deps.append(dep)

        # update list of dependencies with only those unresolved
        easyconfig['dependencies'] = deps

        # if all dependencies have been resolved, add module for this easyconfig in the list of available modules
        if not easyconfig['dependencies']:
            _log.debug("Adding easyconfig %s to final list" % easyconfig['spec'])
            ordered_ecs.append(easyconfig)
            mod_name = easyconfig['full_mod_name']
            avail_modules.append(mod_name)
            # remove module name from list, so dependencies can be marked as resolved
            ec_mod_names.remove(mod_name)

        else:
            new_easyconfigs.append(easyconfig)

    return ordered_ecs, new_easyconfigs, avail_modules
Example #36
0
    def __init__(self, controller=None):
        """Initialize a PlotData object
        
        """

        # Initialize the data object and read the data files
        super(ClawPlotData,self).__init__()

        # default values of attributes:

        if controller:
            controller.plotdata = self
            # inherit some values from controller
            self.add_attribute('rundir',copy.copy(controller.rundir))
            self.add_attribute('outdir',copy.copy(controller.outdir))
        else:
            self.add_attribute('rundir',os.getcwd())     # uses *.data from rundir
            self.add_attribute('outdir',os.getcwd())     # where to find fort.* files

        self.add_attribute('format','ascii')

        self.add_attribute('plotdir',os.getcwd())      # directory for plots *.png, *.html
        self.add_attribute('overwrite',True)           # ok to overwrite old plotdir?
        self.add_attribute('plotter','matplotlib')     # backend for plots
        self.add_attribute('msgfile','')               # where to write error messages
        self.add_attribute('verbose',True)             # verbose output?

        self.add_attribute('ion',False)                # call ion() or ioff()?

        self.add_attribute('printfigs',True)
        self.add_attribute('print_format','png')     
        self.add_attribute('print_framenos','all')  # which frames to plot
        self.add_attribute('print_gaugenos','all')  # which gauges to plot
        self.add_attribute('print_fignos','all')    # which figures to plot each frame

        self.add_attribute('iplotclaw_fignos','all')    # which figures to plot interactively

        self.add_attribute('latex',True)                # make latex files for figures
        self.add_attribute('latex_fname','plots')       # name of latex file
        self.add_attribute('latex_title','Clawpack Results')       
        self.add_attribute('latex_framesperpage','all') # number of frames on each page
        self.add_attribute('latex_framesperline',2)     # number of frames on each line
        self.add_attribute('latex_figsperline','all')   # number of figures on each line
        self.add_attribute('latex_makepdf',False)       # run pdflatex on latex file

        self.add_attribute('html',True)                # make html files for figures
        self.add_attribute('html_index_fname','_PlotIndex.html')   # name of html index file
        self.add_attribute('html_index_title','Plot Index')   # title at top of index page
        self.add_attribute('html_homelink',None)       # link to here from top of _PlotIndex.html
        self.add_attribute('html_movie',True)          # make html with java script for movie
        self.add_attribute('html_eagle',False)         # use EagleClaw titles on html pages?

        self.add_attribute('gif_movie',False)          # make animated gif movie of frames

        self.add_attribute('setplot',False)            # Execute setplot.py in plot routine

        self.add_attribute('mapc2p',None)              # function to map computational
	                                    # points to physical


        self.add_attribute('beforeframe',None)         # function called before all plots 
                                        # in each frame are done
        self.add_attribute('afterframe',None)          # function called after all plots 
                                        # in each frame are done

        self.add_attribute('plotfigure_dict',{})  
        self.add_attribute('otherfigure_dict',{})  

        self.add_attribute('framesoln_dict',{})        # dictionary for holding framesoln
                                        # objects associated with plots

        self.add_attribute('gaugesoln_dict',{})        # dictionary for holding gaugesoln
                                        # objects associated with plots
                                        
        self.add_attribute('save_frames',True)         # True ==> Keep a copy of any frame
                                        # read in.  False ==> Clear the frame
                                        # solution dictionary before adding
                                        # another solution

        self.add_attribute('save_figures',True)        # True ==> Keep a copy of and figure
                                        # created.  False ==> Clear the 
                                        # figure dictionary before adding
                                        # another solution

        self.add_attribute('refresh_gauges',False)     # False ==> don't re-read gaugesoln if 
                                        # already in gaugesoln_dict

        self.add_attribute('timeframes_framenos',None)
        self.add_attribute('timeframes_frametimes',None)
        self.add_attribute('timeframes_fignos',None)
        self.add_attribute('timeframes_fignames',None)



        self.add_attribute('gauges_gaugenos',None)
        self.add_attribute('gauges_fignos',None)
        self.add_attribute('gauges_fignames',None)

        self._next_FIG = 1000
        self._fignames = []
        self._fignos = []
        self._mode = 'unknown'
        self._figname_from_num = {}
        self._otherfignames = []
Example #37
0
 def _push_ns(self):
     # Save default namespace
     self._old_ns = copy(self.interp.user_ns)
Example #38
0
 def xf(x):
     x = copy.copy(x)
     for k in skip:
         x.pop(k, None)
     return self.make(**x)
Example #39
0
 def __init__(self, fmt):
     self.fmt = copy(long_form_one_format(fmt))
     self.ext = self.fmt["extension"]
     self.implementation = get_format_implementation(
         self.ext, self.fmt.get("format_name"))
Example #40
0
    def _get_queries(self, query, n):
        # Sorry about this mess

        # Check if we have any year filters (one operator or multiple years)
        year_check = [q for q in query if "year" in q]

        # If there are multiple single year queries we cant handle it yet.
        if len(year_check) > 1:
            print("Failure: Query already split by year, try refining your" +
                  "request.")
            return

        # If there isn't a year operator, that means the whole record
        elif len(year_check) == 0:
            year1 = 1900
            year2 = dt.datetime.today().year
            
        # If there is an operator, which is it and what year?
        else:
            ops = self.operator_options['CALL'].values
            yc = year_check[0]
            year = int(isDigits(yc))
            op = [o for o in ops if o in yc][0]

            # Year 1 and 2 depend on the operator
            if op == '__GE':
                year1 = year
                year2 = dt.datetime.today().year
            elif op == '__LE':
                year1 = 1900
                year2 = year
            else:
                print("Haven't worked out the " + op + "case yet. Try a " +
                      " different operator for now.")
                return

        # Remove the original year operator from the query
        query_copy = copy.copy(query)
        for yc in year_check:
            query_copy.remove(yc)

        # Now we can generate a list of single year queries
        year_queries = ["year=" + str(y) for y in range(year1, year2 + 1)]

        # And with that individual full queries
        queries = [query_copy + [yq] for yq in year_queries]

        # And with that, get a list of data frames and more too big queries
        dfs = []
        pool = Pool(int(cpu_count() - 1))
        fun = copy.copy(self._get_one_query)       
        for df in tqdm(pool.imap(fun, queries), total=len(queries),
                       position=0, leave=True):
            dfs.append(df)
        pool.close()
        missed = [df for df in dfs if type(df) is list]
        dfs = [df for df in dfs if type(df) is pd.core.frame.DataFrame]

        # Now, how to further reduce?
        if len(missed) > 0:
            print("\nThere are still sub requests with more than 50,000 " + 
                  "trying to split records using time frequency...")
            new_queries = []
            for query in missed:
                query = np.array(query)

                # Where are there repeats?
                query_ops = [o.split("_")[0] for o in query]
                query_counts = [query_ops.count(q) for q in query_ops]
                repeat_idx = np.where(np.array(query_counts) > 1)[0]
                single_idx = np.where(np.array(query_counts) == 1)[0]

                # Split the query up by repeats
                base_q = query[single_idx]
                repeat_qs = query[repeat_idx]
                new_qs = [list(base_q) + [rp] for rp in repeat_qs] 

                # Add to the new queries list
                new_queries = new_queries + new_qs

            # Okay, now request all these just like before
            new_dfs = []
            pool = Pool(int(cpu_count() - 1))
            for df in tqdm(pool.imap(fun, new_queries), total=len(new_queries),
                           position=0, leave=True):
                new_dfs.append(df)
            pool.close()
            missed2 = [df for df in new_dfs if type(df) is list]
            new_dfs = [df for df in new_dfs if
                       type(df) is pd.core.frame.DataFrame]

            # combine df lists
            dfs = dfs + new_dfs

            # If that didn't work I give up.
            if len(missed2) > 0:
                # Let's try splitting by agg_level_desc
                if "group" not in query_ops:
                    print("\nSplitting by year and time frequency wasn't " +
                          "enough. Some years still had " +
                          "more than 50,000 records, attempting to split  one " +
                          "more time using group description...")
                    group_ops = self.get_parameter_options("group_desc")
                    op_cat = "group_desc="
                elif "agg" not in query_ops:
                    print("\nSplitting by year and time frequency wasn't " +
                          "enough. Some years still had more " +
                          "than 50,000 records, attempting to split one " +
                          "more time using aggregation level description...")
                    group_ops = self.get_parameter_options("agg_level_desc")
                    op_cat = "agg_level_desc="
    
                # Building a new new set of queries
                new_queries = []
                for q in missed2:
                    new_groups = [op_cat + a for a in group_ops]
                    new_qs = [q + [ng] for ng in new_groups]
                    new_queries = new_queries + new_qs
    
                # Okay, now request all these just like before
                new_dfs = []
                pool = Pool(int(cpu_count() - 1))
                for df in pool.imap(fun, new_queries):
                    new_dfs.append(df)
                pool.close()
                new_dfs = [df for df in new_dfs if
                           type(df) is pd.core.frame.DataFrame]

                # Combine our two lists of data frames
                dfs = dfs + new_dfs

        # And if that works, concatenate these into one data frame
        df = pd.concat(dfs, sort=True)

        # Done.
        return df
Example #41
0
def gen_sample(model, x, params, gpu_flag, k=1, maxlen=30, rpos_beam=3):
    
    sample = []
    sample_score = []
    rpos_sample = []
    # rpos_sample_score = []
    relation_sample = []

    live_k = 1
    dead_k = 0  # except init, live_k = k - dead_k

    # current living paths and corresponding scores(-log)
    hyp_samples = [[]] * live_k
    hyp_scores = np.zeros(live_k).astype(np.float32)
    hyp_rpos_samples = [[]] * live_k
    hyp_relation_samples = [[]] * live_k
    # get init state, (1,n) and encoder output, (1,D,H,W)
    next_state, ctx0 = model.f_init(x)
    next_h1t = next_state
    # -1 -> My_embedding -> 0 tensor(1,m)
    next_lw = -1 * torch.ones(1, dtype=torch.int64).cuda()
    next_calpha_past = torch.zeros(1, ctx0.shape[2], ctx0.shape[3]).cuda()  # (live_k,H,W)
    next_palpha_past = torch.zeros(1, ctx0.shape[2], ctx0.shape[3]).cuda()
    nextemb_memory = torch.zeros(params['maxlen'], live_k, params['m']).cuda()
    nextePmb_memory = torch.zeros(params['maxlen'], live_k, params['m']).cuda()    

    for ii in range(maxlen):
        ctxP = ctx0.repeat(live_k, 1, 1, 1)  # (live_k,D,H,W)
        next_lpos = ii * torch.ones(live_k, dtype=torch.int64).cuda()
        next_h01, next_ma, next_ctP, next_pa, next_palpha_past, nextemb_memory, nextePmb_memory = \
                    model.f_next_parent(params, next_lw, next_lpos, ctxP, next_state, next_h1t, next_palpha_past, nextemb_memory, nextePmb_memory, ii)
        next_ma = next_ma.cpu().numpy()
        # next_ctP = next_ctP.cpu().numpy()
        next_palpha_past = next_palpha_past.cpu().numpy()
        nextemb_memory = nextemb_memory.cpu().numpy()
        nextePmb_memory = nextePmb_memory.cpu().numpy()

        nextemb_memory = np.transpose(nextemb_memory, (1, 0, 2)) # batch * Matt * dim
        nextePmb_memory = np.transpose(nextePmb_memory, (1, 0, 2))
        
        next_rpos = next_ma.argsort(axis=1)[:,-rpos_beam:] # topK parent index; batch * topK
        n_gaps = nextemb_memory.shape[1]
        n_batch = nextemb_memory.shape[0]
        next_rpos_gap = next_rpos + n_gaps * np.arange(n_batch)[:, None]
        next_remb_memory = nextemb_memory.reshape([n_batch*n_gaps, nextemb_memory.shape[-1]])
        next_remb = next_remb_memory[next_rpos_gap.flatten()] # [batch*rpos_beam, emb_dim]
        rpos_scores = next_ma.flatten()[next_rpos_gap.flatten()] # [batch*rpos_beam,]

        # next_ctPC = next_ctP.repeat(1, 1, rpos_beam)
        # next_ctPC = torch.reshape(next_ctPC, (-1, next_ctP.shape[1]))
        ctxC = ctx0.repeat(live_k*rpos_beam, 1, 1, 1)
        next_ctPC = torch.zeros(next_ctP.shape[0]*rpos_beam, next_ctP.shape[1]).cuda()
        next_h01C = torch.zeros(next_h01.shape[0]*rpos_beam, next_h01.shape[1]).cuda()
        next_calpha_pastC = torch.zeros(next_calpha_past.shape[0]*rpos_beam, next_calpha_past.shape[1], next_calpha_past.shape[2]).cuda()
        for bidx in range(next_calpha_past.shape[0]):
            for ridx in range(rpos_beam):
                next_ctPC[bidx*rpos_beam+ridx] = next_ctP[bidx]
                next_h01C[bidx*rpos_beam+ridx] = next_h01[bidx]
                next_calpha_pastC[bidx*rpos_beam+ridx] = next_calpha_past[bidx]
        next_remb = torch.from_numpy(next_remb).cuda()

        next_lp, next_rep, next_state, next_h1t, next_ca, next_calpha_past, next_re = \
                    model.f_next_child(params, next_remb, next_ctPC, ctxC, next_h01C, next_calpha_pastC)

        next_lp = next_lp.cpu().numpy()
        next_state = next_state.cpu().numpy()
        next_h1t = next_h1t.cpu().numpy()
        next_calpha_past = next_calpha_past.cpu().numpy()
        next_re = next_re.cpu().numpy()

        hyp_scores = np.tile(hyp_scores[:, None], [1, rpos_beam]).flatten()
        cand_scores = hyp_scores[:, None] - np.log(next_lp+1e-10)- np.log(rpos_scores+1e-10)[:,None]
        cand_flat = cand_scores.flatten()
        ranks_flat = cand_flat.argsort()[:(k-dead_k)]
        voc_size = next_lp.shape[1]
        trans_indices = ranks_flat // voc_size
        trans_indicesP = ranks_flat // (voc_size*rpos_beam)
        word_indices = ranks_flat % voc_size
        costs = cand_flat[ranks_flat]

        # update paths
        new_hyp_samples = []
        new_hyp_scores = np.zeros(k-dead_k).astype('float32')
        new_hyp_rpos_samples = []
        new_hyp_relation_samples = []
        new_hyp_states = []
        new_hyp_h1ts = []
        new_hyp_calpha_past = []
        new_hyp_palpha_past = []
        new_hyp_emb_memory = []
        new_hyp_ePmb_memory = []
        
        for idx, [ti, wi, tPi] in enumerate(zip(trans_indices, word_indices, trans_indicesP)):
            new_hyp_samples.append(hyp_samples[tPi]+[wi])
            new_hyp_scores[idx] = copy.copy(costs[idx])
            new_hyp_rpos_samples.append(hyp_rpos_samples[tPi]+[next_rpos.flatten()[ti]])
            new_hyp_relation_samples.append(hyp_relation_samples[tPi]+[next_re[ti]])
            new_hyp_states.append(copy.copy(next_state[ti]))
            new_hyp_h1ts.append(copy.copy(next_h1t[ti]))
            new_hyp_calpha_past.append(copy.copy(next_calpha_past[ti]))
            new_hyp_palpha_past.append(copy.copy(next_palpha_past[tPi]))
            new_hyp_emb_memory.append(copy.copy(nextemb_memory[tPi]))
            new_hyp_ePmb_memory.append(copy.copy(nextePmb_memory[tPi]))

        # check the finished samples
        new_live_k = 0
        hyp_samples = []
        hyp_scores = []
        hyp_rpos_samples = []
        hyp_relation_samples = []
        hyp_states = []
        hyp_h1ts = []
        hyp_calpha_past = []
        hyp_palpha_past = []
        hyp_emb_memory = []
        hyp_ePmb_memory = []

        for idx in range(len(new_hyp_samples)):
            if new_hyp_samples[idx][-1] == 0: # <eol>
                sample_score.append(new_hyp_scores[idx])
                sample.append(new_hyp_samples[idx])
                rpos_sample.append(new_hyp_rpos_samples[idx])
                relation_sample.append(new_hyp_relation_samples[idx])
                dead_k += 1
            else:
                new_live_k += 1
                hyp_scores.append(new_hyp_scores[idx])
                hyp_samples.append(new_hyp_samples[idx])
                hyp_rpos_samples.append(new_hyp_rpos_samples[idx])
                hyp_relation_samples.append(new_hyp_relation_samples[idx])
                hyp_states.append(new_hyp_states[idx])
                hyp_h1ts.append(new_hyp_h1ts[idx])
                hyp_calpha_past.append(new_hyp_calpha_past[idx])
                hyp_palpha_past.append(new_hyp_palpha_past[idx])
                hyp_emb_memory.append(new_hyp_emb_memory[idx])
                hyp_ePmb_memory.append(new_hyp_ePmb_memory[idx])   
                    
        hyp_scores = np.array(hyp_scores)
        live_k = new_live_k

        # whether finish beam search
        if new_live_k < 1:
            break
        if dead_k >= k:
            break

        next_lw = np.array([w[-1] for w in hyp_samples])  # each path's final symbol, (live_k,)
        next_state = np.array(hyp_states)  # h2t, (live_k,n)
        next_h1t = np.array(hyp_h1ts)
        next_calpha_past = np.array(hyp_calpha_past)  # (live_k,H,W)
        next_palpha_past = np.array(hyp_palpha_past)
        nextemb_memory = np.array(hyp_emb_memory)
        nextemb_memory = np.transpose(nextemb_memory, (1, 0, 2))
        nextePmb_memory = np.array(hyp_ePmb_memory)
        nextePmb_memory = np.transpose(nextePmb_memory, (1, 0, 2))
        next_lw = torch.from_numpy(next_lw).cuda()
        next_state = torch.from_numpy(next_state).cuda()
        next_h1t = torch.from_numpy(next_h1t).cuda()
        next_calpha_past = torch.from_numpy(next_calpha_past).cuda()
        next_palpha_past = torch.from_numpy(next_palpha_past).cuda()
        nextemb_memory = torch.from_numpy(nextemb_memory).cuda()
        nextePmb_memory = torch.from_numpy(nextePmb_memory).cuda()

    return sample_score, sample, rpos_sample, relation_sample
Example #42
0
def test_frozen_trial_validate() -> None:

    # Valid.
    valid_trial = _create_frozen_trial()
    valid_trial._validate()

    # Invalid: `datetime_start` is not set.
    invalid_trial = copy.copy(valid_trial)
    invalid_trial.datetime_start = None
    with pytest.raises(ValueError):
        invalid_trial._validate()

    # Invalid: `state` is `RUNNING` and `datetime_complete` is set.
    invalid_trial = copy.copy(valid_trial)
    invalid_trial.state = TrialState.RUNNING
    with pytest.raises(ValueError):
        invalid_trial._validate()

    # Invalid: `state` is not `RUNNING` and `datetime_complete` is not set.
    for state in [TrialState.COMPLETE, TrialState.PRUNED, TrialState.FAIL]:
        invalid_trial = copy.copy(valid_trial)
        invalid_trial.state = state
        invalid_trial.datetime_complete = None
        with pytest.raises(ValueError):
            invalid_trial._validate()

    # Invalid: `state` is `COMPLETE` and `value` is not set.
    invalid_trial = copy.copy(valid_trial)
    invalid_trial.value = None
    with pytest.raises(ValueError):
        invalid_trial._validate()

    # Invalid: Inconsistent `params` and `distributions`
    inconsistent_pairs = [
        # `params` has an extra element.
        ({
            "x": 0.1,
            "y": 0.5
        }, {
            "x": UniformDistribution(0, 1)
        }),
        # `distributions` has an extra element.
        ({
            "x": 0.1
        }, {
            "x": UniformDistribution(0, 1),
            "y": LogUniformDistribution(0.1, 1.0)
        }),
        # The value of `x` isn't contained in the distribution.
        ({
            "x": -0.5
        }, {
            "x": UniformDistribution(0, 1)
        }),
    ]  # type: List[Tuple[Dict[str, Any], Dict[str, BaseDistribution]]]

    for params, distributions in inconsistent_pairs:
        invalid_trial = copy.copy(valid_trial)
        invalid_trial.params = params
        invalid_trial.distributions = distributions
        with pytest.raises(ValueError):
            invalid_trial._validate()
Example #43
0
    def __init__(self,
                 arg=None,
                 align=None,
                 alignsrc=None,
                 bgcolor=None,
                 bgcolorsrc=None,
                 bordercolor=None,
                 bordercolorsrc=None,
                 font=None,
                 namelength=None,
                 namelengthsrc=None,
                 **kwargs):
        """
        Construct a new Hoverlabel object
        
        Parameters
        ----------
        arg
            dict of properties compatible with this constructor or
            an instance of
            :class:`plotly.graph_objs.surface.Hoverlabel`
        align
            Sets the horizontal alignment of the text content
            within hover label box. Has an effect only if the hover
            label text spans more two or more lines
        alignsrc
            Sets the source reference on Chart Studio Cloud for
            align .
        bgcolor
            Sets the background color of the hover labels for this
            trace
        bgcolorsrc
            Sets the source reference on Chart Studio Cloud for
            bgcolor .
        bordercolor
            Sets the border color of the hover labels for this
            trace.
        bordercolorsrc
            Sets the source reference on Chart Studio Cloud for
            bordercolor .
        font
            Sets the font used in hover labels.
        namelength
            Sets the default length (in number of characters) of
            the trace name in the hover labels for all traces. -1
            shows the whole name regardless of length. 0-3 shows
            the first 0-3 characters, and an integer >3 will show
            the whole name if it is less than that many characters,
            but if it is longer, will truncate to `namelength - 3`
            characters and add an ellipsis.
        namelengthsrc
            Sets the source reference on Chart Studio Cloud for
            namelength .

        Returns
        -------
        Hoverlabel
        """
        super(Hoverlabel, self).__init__("hoverlabel")

        if "_parent" in kwargs:
            self._parent = kwargs["_parent"]
            return

        # Validate arg
        # ------------
        if arg is None:
            arg = {}
        elif isinstance(arg, self.__class__):
            arg = arg.to_plotly_json()
        elif isinstance(arg, dict):
            arg = _copy.copy(arg)
        else:
            raise ValueError("""\
The first argument to the plotly.graph_objs.surface.Hoverlabel 
constructor must be a dict or 
an instance of :class:`plotly.graph_objs.surface.Hoverlabel`""")

        # Handle skip_invalid
        # -------------------
        self._skip_invalid = kwargs.pop("skip_invalid", False)

        # Populate data dict with properties
        # ----------------------------------
        _v = arg.pop("align", None)
        _v = align if align is not None else _v
        if _v is not None:
            self["align"] = _v
        _v = arg.pop("alignsrc", None)
        _v = alignsrc if alignsrc is not None else _v
        if _v is not None:
            self["alignsrc"] = _v
        _v = arg.pop("bgcolor", None)
        _v = bgcolor if bgcolor is not None else _v
        if _v is not None:
            self["bgcolor"] = _v
        _v = arg.pop("bgcolorsrc", None)
        _v = bgcolorsrc if bgcolorsrc is not None else _v
        if _v is not None:
            self["bgcolorsrc"] = _v
        _v = arg.pop("bordercolor", None)
        _v = bordercolor if bordercolor is not None else _v
        if _v is not None:
            self["bordercolor"] = _v
        _v = arg.pop("bordercolorsrc", None)
        _v = bordercolorsrc if bordercolorsrc is not None else _v
        if _v is not None:
            self["bordercolorsrc"] = _v
        _v = arg.pop("font", None)
        _v = font if font is not None else _v
        if _v is not None:
            self["font"] = _v
        _v = arg.pop("namelength", None)
        _v = namelength if namelength is not None else _v
        if _v is not None:
            self["namelength"] = _v
        _v = arg.pop("namelengthsrc", None)
        _v = namelengthsrc if namelengthsrc is not None else _v
        if _v is not None:
            self["namelengthsrc"] = _v

        # Process unknown kwargs
        # ----------------------
        self._process_kwargs(**dict(arg, **kwargs))

        # Reset skip_invalid
        # ------------------
        self._skip_invalid = False
Example #44
0
 def __getstate__(self):
     attrs = copy.copy(self.__dict__)
     if self._backend != self.backend_enum_holder.NCCL:
         del attrs['self.reduction_stream']
         return attrs
Example #45
0
def rheobase(pop, td, rt):
    if not hasattr(pop[0], 'rheobase'):
        pop = [
            WSFloatIndividual(ind) for ind in pop
            if type(ind) is not type(list)
        ]
    print(pop)
    import pdb
    pdb.set_trace()
    dtcpop = update_dtc_pop(pop, td)
    #if isinstance(dtcpop, Iterable):
    dtcpop = iter(dtcpop)
    xargs = iter(zip(dtcpop, repeat(rt), repeat('NEURON')))
    dtcpop = list(map(dtc_to_rheo, xargs))
    for ind, d in zip(pop, dtcpop):
        ind.rheobase = d.rheobase
    # TODO change the score away from Ratio to Z.

    #dtcpop = list(filter(lambda dtc: dtc.rheobase['value'] > 0.0 , dtcpop))
    #pop = list(filter(lambda p: p.rheobase['value'] > 0.0 , pop))
    return pop, dtcpop
    '''
    else:
        xargs = [ dtcpop, repeat(rt), repeat('NEURON') ]
        dtcpop = list(dtc_to_rheo(xargs))
        for ind,d in zip(pop,dtcpop):
            ind.rheobase = d.rheobase
        dtcpop = list(filter(lambda dtc: dtc.rheobase['value'] > 0.0 , dtcpop))
        pop = list(filter(lambda p: p.rheobase['value'] > 0.0 , pop))
        # Move to unit testing
        ###
        #    if type(ind) is not type(list()):
        #        assert ind in d.attrs.values()
        #    else:
        #        for j in ind:
        #            assert j in list(d.attrs.values()) #should be in a unit test.
        #
        ###
    '''
    # Rheobase value obtainment.
    orig_MU = len(pop)
    dtcpop = list(update_dtc_pop(pop, td))
    rheobase_test = tests[0]
    xargs = list(zip(dtcpop, repeat(rheobase_test), repeat('NEURON')))
    dtcpop = list(map(dtc_to_rheo, xargs))
    for i, d in enumerate(dtcpop):
        assert pop[i][0] in list(d.attrs.values())
        pop[i].rheobase = None
        pop[i].rheobase = d.rheobase

    dtcpop = list(filter(lambda dtc: dtc.rheobase['value'] > 0.0, dtcpop))
    pop = list(filter(lambda pop: pop.rheobase['value'] > 0.0, pop))

    delta = orig_MU - len(pop)
    if delta:
        # making new genes here introduces too much code complexity,
        # instead make up differences by extending existing lists with duplicates
        # from itself.
        # This will decrease diversity.
        far_back = -delta - 1
        pop.extend(pop[far_back:-1])
        dtcpop.extend(dtcpop[far_back:-1])
    # NeuronUnit testing
    xargs = zip(dtcpop, repeat(tests))
    dtcpop = list(map(format_test, xargs))
    npart = np.min([multiprocessing.cpu_count(), len(pop)])
    dtcbag = db.from_sequence(list(zip(dtcpop, repeat(tests))),
                              npartitions=npart)
    dtcpop = list(dtcbag.map(nunit_evaluation).compute())
    for i, d in enumerate(dtcpop):
        assert pop[i][0] in list(d.attrs.values())
        pop[i].dtc = None
        pop[i].dtc = copy.copy(dtcpop[i])
        assert hasattr(pop[i], 'dtc')

    invalid_dtc_not = [i for i in pop if not hasattr(i, 'dtc')]
    try:
        assert len(invalid_dtc_not) == 0
    except:
        print(len(invalid_dtc_not) > 0)
        raise ValueError('value error invalid_dtc_not')
    # https://distributed.readthedocs.io/en/latest/memory.html
    return pop
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions for.
            item_id (unicode): The ID of the item in the course to create submissions for.
            num_submissions (int): Number of submissions to create.
            percentage (int or float): Percentage for assessments to be made against submissions.
        """
        if len(args) < 4:
            raise CommandError(
                'Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS> <PERCENTAGE>'
            )

        course_id = six.text_type(args[0])
        item_id = six.text_type(args[1])

        try:
            num_submissions = int(args[2])
        except ValueError:
            raise CommandError('Number of submissions must be an integer')

        try:
            percentage = float(args[3])
            assessments_to_create = (percentage / 100) * num_submissions
        except ValueError:
            raise CommandError(
                'Percentage for completed submissions must be an integer or float'
            )

        print(u"Creating {num} submissions for {item} in {course}".format(
            num=num_submissions, item=item_id, course=course_id))

        assessments_created = 0

        for sub_num in range(num_submissions):

            print(u"Creating submission {num}".format(num=sub_num))

            # Create a dummy submission
            student_item = {
                'student_id': uuid4().hex[0:10],
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment'
            }
            submission_uuid = self._create_dummy_submission(student_item)
            self._student_items.append(student_item)

            # Create a dummy rubric
            rubric, options_selected = self._dummy_rubric()

            # Create peer assessments
            for num in range(self.NUM_PEER_ASSESSMENTS):
                print(u"-- Creating peer-workflow {num}".format(num=num))

                scorer_id = 'test_{num}'.format(num=num)

                # The scorer needs to make a submission before assessing
                scorer_student_item = copy.copy(student_item)
                scorer_student_item['student_id'] = scorer_id
                scorer_submission_uuid = self._create_dummy_submission(
                    scorer_student_item)

                # Retrieve the submission we want to score
                # Note that we are NOT using the priority queue here, since we know
                # exactly which submission we want to score.
                peer_api.create_peer_workflow_item(scorer_submission_uuid,
                                                   submission_uuid)
                if assessments_created < assessments_to_create:
                    print(u"-- Creating peer-assessment {num}".format(num=num))
                    # Create the peer assessment
                    peer_api.create_assessment(
                        scorer_submission_uuid, scorer_id, options_selected,
                        {}, "  ".join(loremipsum.get_paragraphs(2)), rubric,
                        self.NUM_PEER_ASSESSMENTS)
            assessments_created += 1

            if self.self_assessment_required:
                # Create a self-assessment
                print(u"-- Creating self assessment")
                self_api.create_assessment(
                    submission_uuid, student_item['student_id'],
                    options_selected, {},
                    "  ".join(loremipsum.get_paragraphs(2)), rubric)
        print(u"%s assessments being completed for %s submissions" %
              (assessments_created, num_submissions))
Example #47
0
 def copy(self):  # type: () -> Container
     return copy.copy(self)
Example #48
0
 def getPerformances(self):
     """ Returns the performances calculated so far. They are stored inside
         a dictionary, mapping jobs to performances. A job is a tuple of
         metaparameters.
     """
     return copy.copy(self._performances)
Example #49
0
 def _device_function(node_def):
   current_device = from_string(node_def.device or "")
   copy_spec = copy.copy(spec)
   copy_spec.merge_from(current_device)  # current_device takes precedence.
   return copy_spec
Example #50
0
 def copy(self):
     # NOTE: should not use copy.deepcopy because it is very slow
     copied = copy(self)
     return copied
Example #51
0
        'social-people':
        StaticCategory(name=_(u'Social & People')),
        'sports':
        StaticCategory(name=_(u'Sports')),
        'travel':
        StaticCategory(name=_(u'Travel')),
        'video':
        StaticCategory(name=_(u'Video'))
    },
    ADDON_LPAPP: {
        'general': StaticCategory(name=_(u'General'))
    },
}

CATEGORIES_NO_APP[_ADDON_PERSONA] = {
    slug: copy.copy(cat)
    for slug, cat in CATEGORIES_NO_APP[ADDON_STATICTHEME].items()
}

for type_ in CATEGORIES_NO_APP:
    for slug, cat in CATEGORIES_NO_APP[type_].items():
        # Flatten some values and set them, avoiding immutability
        # of `StaticCategory` by calling `object.__setattr__` directly.
        object.__setattr__(cat, 'slug', slug)
        object.__setattr__(cat, 'type', type_)
        object.__setattr__(cat, 'misc', slug in ('miscellaneous', 'other'))

# These numbers are ids for Category model instances in the database.
# For existing categories they MUST match, for the fk in AddonCategory to work.
# To add a category to an app you can use any unused id (needs a migration too)
CATEGORIES = {
Example #52
0
 def write(self, msg):
     self.logging.write('ECHO: write({!r})\n'.format(msg))
     if len(msg) > self.truncate_len:
         msg = msg[:self.truncate_len]
     self.msg = copy.copy(msg.decode())
Example #53
0
def einsum(*operands, **kwargs):
    """einsum(subscripts, *operands, dtype=False)

    Evaluates the Einstein summation convention on the operands.
    Using the Einstein summation convention, many common multi-dimensional
    array operations can be represented in a simple fashion. This function
    provides a way to compute such summations.

    .. note::
       Memory contiguity of calculation result is not always compatible with
       `numpy.einsum`.
       ``out``, ``order``, and ``casting`` options are not supported.

    Args:
        subscripts (str): Specifies the subscripts for summation.
        operands (sequence of arrays): These are the arrays for the operation.

    Returns:
        cupy.ndarray:
            The calculation based on the Einstein summation convention.

    .. seealso:: :func:`numpy.einsum`

    """

    input_subscripts, output_subscript, operands = \
        _parse_einsum_input(operands)
    assert isinstance(input_subscripts, list)
    assert isinstance(operands, list)

    dtype = kwargs.pop('dtype', None)

    # casting = kwargs.pop('casting', 'safe')
    casting_kwargs = {}  # casting is not supported yet in astype

    optimize = kwargs.pop('optimize', False)
    if optimize is True:
        optimize = 'greedy'
    if kwargs:
        raise TypeError('Did not understand the following kwargs: %s' %
                        list(kwargs.keys))

    result_dtype = cupy.result_type(*operands) if dtype is None else dtype
    operands = [cupy.asanyarray(arr) for arr in operands]

    input_subscripts = [
        _parse_ellipsis_subscript(sub, idx, ndim=arr.ndim)
        for idx, (sub, arr) in enumerate(zip(input_subscripts, operands))
    ]

    # Get length of each unique dimension and ensure all dimensions are correct
    dimension_dict = {}
    for idx, sub in enumerate(input_subscripts):
        sh = operands[idx].shape
        for axis, label in enumerate(sub):
            dim = sh[axis]
            if label in dimension_dict.keys():
                # For broadcasting cases we always want the largest dim size
                if dimension_dict[label] == 1:
                    dimension_dict[label] = dim
                elif dim not in (1, dimension_dict[label]):
                    dim_old = dimension_dict[label]
                    raise ValueError(
                        'Size of label \'%s\' for operand %d (%d) '
                        'does not match previous terms (%d).' %
                        (_chr(label), idx, dim, dim_old))
            else:
                dimension_dict[label] = dim

    if output_subscript is None:
        # Build output subscripts
        tmp_subscripts = list(itertools.chain.from_iterable(input_subscripts))
        output_subscript = [
            label for label in sorted(set(tmp_subscripts))
            if label < 0 or tmp_subscripts.count(label) == 1
        ]
    else:
        if not options['sum_ellipsis']:
            if '@' not in output_subscript and -1 in dimension_dict:
                raise ValueError(
                    'output has more dimensions than subscripts '
                    'given in einstein sum, but no \'...\' ellipsis '
                    'provided to broadcast the extra dimensions.')
        output_subscript = _parse_ellipsis_subscript(
            output_subscript,
            None,
            ellipsis_len=sum(label < 0 for label in dimension_dict.keys()))

        # Make sure output subscripts are in the input
        tmp_subscripts = set(itertools.chain.from_iterable(input_subscripts))
        for label in output_subscript:
            if label not in tmp_subscripts:
                raise ValueError(
                    'einstein sum subscripts string included output subscript '
                    '\'%s\' which never appeared in an input' % _chr(label))
        if len(output_subscript) != len(set(output_subscript)):
            for label in output_subscript:
                if output_subscript.count(label) >= 2:
                    raise ValueError(
                        'einstein sum subscripts string includes output '
                        'subscript \'%s\' multiple times' % _chr(label))

    _einsum_diagonals(input_subscripts, operands)

    # no more raises

    if len(operands) >= 2:
        if any(arr.size == 0 for arr in operands):
            return cupy.zeros(tuple(dimension_dict[label]
                                    for label in output_subscript),
                              dtype=result_dtype)

        # Don't squeeze if unary, because this affects later (in trivial sum)
        # whether the return is a writeable view.
        for idx in range(len(operands)):
            arr = operands[idx]
            if 1 in arr.shape:
                squeeze_indices = []
                sub = []
                for axis, label in enumerate(input_subscripts[idx]):
                    if arr.shape[axis] == 1:
                        squeeze_indices.append(axis)
                    else:
                        sub.append(label)
                input_subscripts[idx] = sub
                operands[idx] = cupy.squeeze(arr, axis=tuple(squeeze_indices))
                assert operands[idx].ndim == len(input_subscripts[idx])
            del arr

    # unary einsum without summation should return a (writeable) view
    returns_view = len(operands) == 1

    # unary sum
    for idx, sub in enumerate(input_subscripts):
        other_subscripts = copy.copy(input_subscripts)
        other_subscripts[idx] = output_subscript
        other_subscripts = set(itertools.chain.from_iterable(other_subscripts))
        sum_axes = tuple(axis for axis, label in enumerate(sub)
                         if label not in other_subscripts)
        if sum_axes:
            returns_view = False
            input_subscripts[idx] = [
                label for axis, label in enumerate(sub) if axis not in sum_axes
            ]

            operands[idx] = operands[idx].sum(axis=sum_axes,
                                              dtype=result_dtype)

    if returns_view:
        operands = [a.view() for a in operands]
    else:
        operands = [
            a.astype(result_dtype, copy=False, **casting_kwargs)
            for a in operands
        ]

    # no more casts

    optimize_algorithms = {
        'greedy': _greedy_path,
        'optimal': _optimal_path,
    }
    if optimize is False:
        path = [tuple(range(len(operands)))]
    elif len(optimize) and (optimize[0] == 'einsum_path'):
        path = optimize[1:]
    else:
        try:
            if len(optimize) == 2 and isinstance(optimize[1], (int, float)):
                algo = optimize_algorithms[optimize[0]]
                memory_limit = int(optimize[1])
            else:
                algo = optimize_algorithms[optimize]
                memory_limit = 2**31  # TODO(kataoka): fix?
        except (TypeError, KeyError):  # unhashable type or not found
            raise TypeError('Did not understand the path (optimize): %s' %
                            str(optimize))
        input_sets = [set(sub) for sub in input_subscripts]
        output_set = set(output_subscript)
        path = algo(input_sets, output_set, dimension_dict, memory_limit)
        if any(len(indices) > 2 for indices in path):
            warnings.warn('memory efficient einsum is not supported yet',
                          _util.PerformanceWarning)

    for idx0, idx1 in _iter_path_pairs(path):
        # "reduced" binary einsum
        arr0 = operands.pop(idx0)
        sub0 = input_subscripts.pop(idx0)
        arr1 = operands.pop(idx1)
        sub1 = input_subscripts.pop(idx1)
        sub_others = list(
            itertools.chain(output_subscript,
                            itertools.chain.from_iterable(input_subscripts)))
        arr_out, sub_out = reduced_binary_einsum(arr0, sub0, arr1, sub1,
                                                 sub_others)
        operands.append(arr_out)
        input_subscripts.append(sub_out)
        del arr0, arr1

    # unary einsum at last
    arr0, = operands
    sub0, = input_subscripts

    transpose_axes = []
    for label in output_subscript:
        if label in sub0:
            transpose_axes.append(sub0.index(label))

    arr_out = arr0.transpose(transpose_axes).reshape(
        [dimension_dict[label] for label in output_subscript])
    assert returns_view or arr_out.dtype == result_dtype
    return arr_out
Example #54
0
    def fetchcats(self, articles, debug=False):
        """ Fetches categories an overcategories for a set of articles """

        # Make a list of the categories of a given article, with one list for each level
        # > cats[article_key][level] = [cat1, cat2, ...]

        cats = {p: [[] for n in range(self.maxdepth + 1)] for p in articles}

        # Also, for each article, keep a list of category parents, so we can build
        # a path along the category tree from any matched category to the article
        # > parents[article_key][category] = parent_category
        #
        # Example:
        #                   /- cat 2
        #             /- cat1 -|
        # no:giraffe -|        \-
        #             \-
        #
        # parents['no:giraffe']['cat2'] = 'cat1'
        # parents['no:giraffe']['cat1'] = 'giraffe'
        #
        # We could also build full category trees for each article from the available
        # information, but they can grow quite big and slow to search

        parents = {p: {} for p in articles}

        #ctree = Tree()
        #for p in pages:
        #    ctree.add_child( name = p.encode('utf-8') )

        for site_key, site in self.sites.iteritems():

            if 'bot' in site.rights:
                requestlimit = 500
                returnlimit = 5000
            else:
                requestlimit = 50
                returnlimit = 500

            # Titles of articles that belong to this site
            titles = [article.name for article in articles.itervalues() if article.site().key == site_key]

            # logger.debug(' ['+site_key+':'+str(len(titles))+']')
            #.flush()
            if len(titles) > 0:

                for level in range(self.maxdepth + 1):

                    titles0 = copy(titles)
                    titles = []  # make a new list of titles to search
                    nc = 0
                    nnc = 0

                    for s0 in range(0, len(titles0), requestlimit):
                        if debug:
                            print
                            print "[%d] > Getting %d to %d of %d" % (level, s0, s0+requestlimit, len(titles0))
                        ids = '|'.join(titles0[s0:s0+requestlimit])

                        cont = True
                        clcont = {'continue': ''}
                        while cont:
                            # print clcont
                            args = {'prop': 'categories', 'titles': ids, 'cllimit': returnlimit}
                            args.update(clcont)
                            q = site.api('query', **args)

                            if 'warnings' in q:
                                raise StandardError(q['warnings']['query']['*'])

                            for pageid, page in q['query']['pages'].iteritems():
                                fulltitle = page['title']
                                shorttitle = fulltitle.split(':', 1)[-1]
                                article_key = site_key + ':' + fulltitle
                                if 'categories' in page:
                                    for cat in page['categories']:
                                        cat_title = cat['title']
                                        cat_short = cat_title.split(':', 1)[1]
                                        site_cat = site_key + ':' + cat_title
                                        follow = True
                                        for d in self.ignore:
                                            if re.search(d, cat_short):
                                                logger.debug(' - Ignore: "%s" matched "%s"', cat_title, d)
                                                follow = False
                                        if follow:
                                            nc += 1
                                            titles.append(cat_title)
                                            if level == 0:
                                                cats[article_key][level].append(site_cat)
                                                parents[article_key][site_cat] = article_key
                                                #print cat_short
                                                # use iter_search_nodes instead?
                                                #ctree.search_nodes( name = fulltitle.encode('utf-8') )[0].add_child( name = cat_short.encode('utf-8') )
                                            else:
                                                for article_key2, ccc in cats.iteritems():
                                                    if article_key in ccc[level-1]:
                                                        ccc[level].append(site_cat)
                                                        parents[article_key2][site_cat] = article_key
                                                        # print '>',article_key2, ':', site_cat,' = ',article_key

                                                        #for node in ctree.search_nodes( name = shorttitle.encode('utf-8') ):
                                                        #    if not cat_short.encode('utf-8') in [i.name for i in node.get_children()]:
                                                        #        node.add_child(name = cat_short.encode('utf-8'))
                                        else:
                                            nnc += 1
                            if 'continue' in q:
                                clcont = q['continue']
                            else:
                                cont = False
                    titles = list(set(titles))  # to remove duplicates (not order preserving)
                    #if level == 0:
                    #    cattree = [p for p in titles]
                    logger.debug(' %d', len(titles))
                    #.stdout.flush()
                    #print "Found %d unique categories (%d total) at level %d (skipped %d categories)" % (len(titles), nc, level, nnc)
        
        return cats, parents
Example #55
0
def write_line(line, fileobj):
    """Write out a manifest line"""
    # write out any comments w/o changes
    global opt_unwrap

    comments = "\n".join(line[2])
    act = line[0]
    out = line[1] + act.name

    sattrs = act.attrs
    ahash = None
    try:
        ahash = act.hash
        if ahash and ahash != "NOHASH":
            if "=" not in ahash and " " not in ahash and \
                '"' not in ahash:
                out += " " + ahash
            else:
                sattrs = copy.copy(act.attrs)
                sattrs["hash"] = ahash
                ahash = None
    except AttributeError:
        # No hash to stash.
        pass

    # high order bits in sorting
    def kvord(a):
        # Variants should always be last attribute.
        if a[0].startswith("variant."):
            return 7
        # Facets should always be before variants.
        if a[0].startswith("facet."):
            return 6
        # List attributes should be before facets and variants.
        if isinstance(a[1], list):
            return 5

        # note closure hack...
        if opt_format == FMT_V2:
            if act.name == "depend":
                # For depend actions, type should always come
                # first even though it's not the key attribute,
                # and fmri should always come after type.
                if a[0] == "fmri":
                    return 1
                elif a[0] == "type":
                    return 0
            elif act.name == "driver":
                # For driver actions, attributes should be in
                # this order: name, perms, clone_perms, privs,
                # policy, devlink, alias.
                if a[0] == "alias":
                    return 6
                elif a[0] == "devlink":
                    return 5
                elif a[0] == "policy":
                    return 4
                elif a[0] == "privs":
                    return 3
                elif a[0] == "clone_perms":
                    return 2
                elif a[0] == "perms":
                    return 1
            elif act.name != "user":
                # Place target after path, owner before group,
                # and all immediately after the action's key
                # attribute.
                if a[0] == "mode":
                    return 3
                elif a[0] == "group":
                    return 2
                elif a[0] == "owner" or a[0] == "target":
                    return 1

        # Any other attributes should come just before list, facet,
        # and variant attributes.
        if a[0] != act.key_attr:
            return 4

        # No special order for all other cases.
        return 0

    # actual key function
    def key_func(a):
        return (kvord(a), a[0])

    JOIN_TOK = " \\\n    "

    def grow(a, b, rem_values, force_nl=False):
        if opt_unwrap or not force_nl:
            lastnl = a.rfind("\n")
            if lastnl == -1:
                lastnl = 0

            if opt_format == FMT_V2 and rem_values == 1:
                # If outputting the last attribute value, then
                # use full line length.
                max_len = 80
            else:
                # If V1 format, or there are more attributes to
                # output, then account for line-continuation
                # marker.
                max_len = 78

            # Note this length comparison doesn't include the space
            # used to append the second part of the string.
            if opt_unwrap or (len(a) - lastnl + len(b) < max_len):
                return a + " " + b
        return a + JOIN_TOK + b

    def get_alias_key(v):
        """This function parses an alias attribute value into a list
                of numeric values (e.g. hex -> int) and strings that can be
                sensibly compared for sorting."""

        alias = None
        prefix = None
        for pfx in DRIVER_ALIAS_PREFIXES:
            if v.startswith(pfx):
                # Strip known prefixes before attempting
                # to create list of sort values.
                alias = v.replace(pfx, "")
                prefix = pfx
                break

        if alias is None:
            # alias didn't start with known prefix; use
            # raw value for sorting.
            return [v]

        entry = [prefix]
        for part in alias.split(","):
            for comp in part.split("."):
                try:
                    cval = int(comp, 16)
                except ValueError:
                    cval = comp
                entry.append(cval)
        return entry

    def cmp_aliases(a, b):
        if opt_format == FMT_V1:
            # Simple comparison for V1 format.
            return misc.cmp(a, b)
        # For V2 format, order aliases by interpreted value.
        return misc.cmp(get_alias_key(a), get_alias_key(b))

    def astr(aout):
        # Number of attribute values for first line and remaining.
        first_line = True
        first_attr_count = 0
        rem_attr_count = 0

        # Total number of remaining attribute values to output.
        total_count = sum(len(act.attrlist(k)) for k in sattrs)
        rem_count = total_count

        # Now build the action output string an attribute at a time.
        for k, v in sorted(six.iteritems(sattrs), key=key_func):
            # Newline breaks are only forced when there is more than
            # one value for an attribute.
            if not (isinstance(v, list) or isinstance(v, set)):
                nv = [v]
                use_force_nl = False
            else:
                nv = v
                use_force_nl = True

            cmp_attrs = None
            if k == "alias":
                cmp_attrs = cmp_to_key(cmp_aliases)
            for lmt in sorted(nv, key=cmp_attrs):
                force_nl = use_force_nl and \
                    (k == "alias" or (opt_format == FMT_V2 and
                    k.startswith("pkg.debug")))

                aout = grow(aout,
                            "=".join((k, quote_attr_value(lmt))),
                            rem_count,
                            force_nl=force_nl)

                # Must be done for each value.
                if first_line and JOIN_TOK in aout:
                    first_line = False
                    first_attr_count = \
                        (total_count - rem_count)
                    if ahash and ahash != "NOHASH":
                        first_attr_count += 1
                    rem_attr_count = rem_count

                rem_count -= 1

        return first_attr_count, rem_attr_count, aout

    first_attr_count, rem_attr_count, output = astr(out)
    if opt_format == FMT_V2 and not opt_unwrap:
        outlines = output.split(JOIN_TOK)

        # If wrapping only resulted in two lines, and the second line
        # only has one attribute and the first line had zero attributes,
        # unwrap the action.
        if first_attr_count < 2 and rem_attr_count == 1 and \
            len(outlines) == 2 and first_attr_count == 0:
            opt_unwrap = True
            output = astr(out)[-1]
            opt_unwrap = False

    if comments:
        print(comments, file=fileobj)

    if opt_format == FMT_V2:
        # Force 'dir' actions to use four spaces at beginning of lines
        # so they line up with other filesystem actions such as file,
        # link, etc.
        output = re.sub("^dir ", "dir  ", output)
    print(output, file=fileobj)
Example #56
0
            for key, second in value.iteritems():
                secondList = []
                for th in second:
                    for value1 in orgUserSegTemp:
                        for x in value1:
                            if x == th:
                                secondList.append(value1)
                result[key] = secondList
        return result


    # 根据事业部-产品大类-产品小类组合查询条件 TODO:此处需要优化
    labels = getSeg()
    label_analysis_param_list = []
    for key, value in labels.iteritems():
        label_analysis_param_tmp = copy.copy(label_analysis_param)
        label_analysis_param_tmp["orgName"] = key
        label_analysis_param_list.append(label_analysis_param_tmp)

    for key, value in labels.iteritems():
        label_analysis_param_tmp = copy.copy(label_analysis_param)
        label_analysis_param_tmp["orgName"] = key
        for value1 in value:
            for key1, value2 in value1.iteritems():
                label_analysis_param_tmp2 = copy.copy(label_analysis_param_tmp)
                label_analysis_param_tmp2["segments"] = key1
                label_analysis_param_list.append(label_analysis_param_tmp2)

    for key, value in labels.iteritems():
        label_analysis_param_tmp = copy.copy(label_analysis_param)
        label_analysis_param_tmp["orgName"] = key
def run_episode(env,
                agent,
                state_normalizer,
                memory,
                batch_size,
                discount,
                dropout_probability,
                max_step=10000):
    state = env.reset()
    if state_normalizer is not None:
        state = state_normalizer.transform(state)[0]
    done = False
    total_reward = 0
    step_durations_s = np.zeros(shape=max_step, dtype=float)
    train_duration_s = np.zeros(shape=max_step - batch_size, dtype=float)
    progress_msg = "Step {:5d}/{:5d}. Avg step duration: {:3.1f} ms. Avg train duration: {:3.1f} ms. Loss = {:2.10f}."
    loss_v = 0
    w1_m = 0
    w2_m = 0
    w3_m = 0
    i = 0
    action = 0

    #each step within an episode
    for i in range(max_step):
        t = time.time()
        if i > 0 and i % 200 == 0:
            print(
                progress_msg.format(
                    i, max_step,
                    np.mean(step_durations_s[0:i]) * 1000,
                    np.mean(train_duration_s[0:i - batch_size]) * 1000,
                    loss_v))
        if done:
            break

        action = agent.act_MCDropout_Epsilon_Greedy(state)

        #take a, get s' and reward
        state_next, reward, done, info = env.step(action)
        total_reward += reward

        if state_normalizer is not None:
            state_next = state_normalizer.transform(state_next)[0]

        memory.add((state, action, reward, state_next, done))

        if len(memory.memory) > batch_size:  # DQN Experience Replay
            states_b, actions_b, rewards_b, states_n_b, done_b = zip(
                *memory.sample(batch_size))
            states_b = np.array(states_b)
            actions_b = np.array(actions_b)
            rewards_b = np.array(rewards_b)
            states_n_b = np.array(states_n_b)
            done_b = np.array(done_b).astype(int)

            q_n_b = agent.predict_q_values(
                states_n_b)  # Action values on the arriving state

            #target - Q-learning here - taking max_a over Q(s', a)
            targets_b = rewards_b + (1. - done_b) * discount * np.amax(q_n_b,
                                                                       axis=1)

            #target function for the agent - predict based on the trained Q Network
            targets = agent.predict_q_values(states_b)
            for j, action in enumerate(actions_b):
                targets[j, action] = targets_b[j]

            t_train = time.time()

            #training the agent based on the target function
            loss_v, w1_m, w2_m, w3_m = agent.train(states_b, targets)
            train_duration_s[i - batch_size] = time.time() - t_train

        state = copy.copy(state_next)
        step_durations_s[i] = time.time() - t  # Time elapsed during this step
        step_length = time.time() - t

    return loss_v, w1_m, w2_m, w3_m, total_reward, step_length
Example #58
0
def sanity_check(localhost, duthosts, request, fanouthosts, nbrhosts, tbinfo):
    logger.info("Prepare sanity check")

    skip_sanity = False
    allow_recover = False
    recover_method = "adaptive"
    pre_check_items = copy.deepcopy(SUPPORTED_CHECKS)  # Default check items
    post_check = False

    customized_sanity_check = None
    for m in request.node.iter_markers():
        logger.info("Found marker: m.name=%s, m.args=%s, m.kwargs=%s" %
                    (m.name, m.args, m.kwargs))
        if m.name == "sanity_check":
            customized_sanity_check = m
            break

    if customized_sanity_check:
        logger.info(
            "Process marker {} in script. m.args={}, m.kwargs={}".format(
                customized_sanity_check.name, customized_sanity_check.args,
                customized_sanity_check.kwargs))
        skip_sanity = customized_sanity_check.kwargs.get("skip_sanity", False)
        allow_recover = customized_sanity_check.kwargs.get(
            "allow_recover", False)
        recover_method = customized_sanity_check.kwargs.get(
            "recover_method", "adaptive")
        if allow_recover and recover_method not in constants.RECOVER_METHODS:
            pytest.warning("Unsupported recover method")
            logger.info(
                "Fall back to use default recover method 'config_reload'")
            recover_method = "config_reload"

        pre_check_items = _update_check_items(
            pre_check_items,
            customized_sanity_check.kwargs.get("check_items", []),
            SUPPORTED_CHECKS)

        post_check = customized_sanity_check.kwargs.get("post_check", False)

    if request.config.option.skip_sanity:
        skip_sanity = True
    if skip_sanity:
        logger.info(
            "Skip sanity check according to command line argument or configuration of test script."
        )
        yield
        return

    if request.config.option.allow_recover:
        allow_recover = True

    if request.config.option.recover_method:
        recover_method = request.config.getoption("--recover_method")

    if request.config.option.post_check:
        post_check = True

    cli_check_items = request.config.getoption("--check_items")
    cli_post_check_items = request.config.getoption("--post_check_items")

    if cli_check_items:
        logger.info(
            'Fine tune pre-test check items based on CLI option --check_items')
        cli_items_list = str(cli_check_items).split(',')
        pre_check_items = _update_check_items(pre_check_items, cli_items_list,
                                              SUPPORTED_CHECKS)

    pre_check_items = filter_check_items(
        tbinfo, pre_check_items)  # Filter out un-supported checks.

    if post_check:
        # Prepare post test check items based on the collected pre test check items.
        post_check_items = copy.copy(pre_check_items)
        if customized_sanity_check:
            post_check_items = _update_check_items(
                post_check_items,
                customized_sanity_check.kwargs.get("post_check_items", []),
                SUPPORTED_CHECKS)

        if cli_post_check_items:
            logger.info(
                'Fine tune post-test check items based on CLI option --post_check_items'
            )
            cli_post_items_list = str(cli_post_check_items).split(',')
            post_check_items = _update_check_items(post_check_items,
                                                   cli_post_items_list,
                                                   SUPPORTED_CHECKS)

        post_check_items = filter_check_items(
            tbinfo, post_check_items)  # Filter out un-supported checks.
    else:
        post_check_items = set()

    logger.info("Sanity check settings: skip_sanity=%s, pre_check_items=%s, allow_recover=%s, recover_method=%s, post_check=%s, post_check_items=%s" % \
        (skip_sanity, pre_check_items, allow_recover, recover_method, post_check, post_check_items))

    pre_post_check_items = pre_check_items + [
        item for item in post_check_items if item not in pre_check_items
    ]
    for item in pre_post_check_items:
        request.fixturenames.append(item)

        # Workaround for pytest requirement.
        # Each possibly used check fixture must be executed in setup phase. Otherwise there could be teardown error.
        request.getfixturevalue(item)

    if pre_check_items:
        logger.info("Start pre-test sanity checks")

        # Dynamically attach selected check fixtures to node
        for item in set(pre_check_items):
            request.fixturenames.append(item)
        dual_tor = 'dualtor' in tbinfo['topo']['name']
        print_logs(duthosts, print_dual_tor_logs=dual_tor)

        check_results = do_checks(request,
                                  pre_check_items,
                                  stage=STAGE_PRE_TEST)
        logger.debug(
            "Pre-test sanity check results:\n%s" %
            json.dumps(check_results, indent=4, default=fallback_serializer))

        failed_results = [
            result for result in check_results if result['failed']
        ]
        if failed_results:
            if not allow_recover:
                pt_assert(False, "!!!!!!!!!!!!!!!!Pre-test sanity check failed: !!!!!!!!!!!!!!!!\n{}"\
                    .format(json.dumps(failed_results, indent=4, default=fallback_serializer)))
            else:
                dut_failed_results = defaultdict(list)
                infra_recovery_actions = []
                for failed_result in failed_results:
                    if 'host' in failed_result:
                        dut_failed_results[failed_result['host']].append(
                            failed_result)
                    if failed_result[
                            'check_item'] in constants.INFRA_CHECK_ITEMS:
                        if 'action' in failed_result and failed_result['action'] is not None \
                            and callable(failed_result['action']):
                            infra_recovery_actions.append(
                                failed_result['action'])
                for dut_name, dut_results in dut_failed_results.items():
                    # Attempt to restore DUT state
                    recover(duthosts[dut_name], localhost, fanouthosts,
                            dut_results, recover_method)
                    # Attempt to restore neighbor VM state
                    neighbor_vm_restore(duthosts[dut_name], nbrhosts, tbinfo)
                for action in infra_recovery_actions:
                    action()

                logger.info("Run sanity check again after recovery")
                new_check_results = do_checks(request,
                                              pre_check_items,
                                              stage=STAGE_PRE_TEST,
                                              after_recovery=True)
                logger.debug(
                    "Pre-test sanity check after recovery results:\n%s" %
                    json.dumps(new_check_results,
                               indent=4,
                               default=fallback_serializer))

                new_failed_results = [
                    result for result in new_check_results if result['failed']
                ]
                if new_failed_results:
                    pt_assert(False, "!!!!!!!!!!!!!!!! Pre-test sanity check after recovery failed: !!!!!!!!!!!!!!!!\n{}"\
                        .format(json.dumps(new_failed_results, indent=4, default=fallback_serializer)))

        logger.info("Done pre-test sanity check")
    else:
        logger.info(
            'No pre-test sanity check item, skip pre-test sanity check.')

    yield

    if not post_check:
        logger.info(
            "No post-test check is required. Done post-test sanity check")
        return

    if post_check_items:
        logger.info("Start post-test sanity check")
        post_check_results = do_checks(request,
                                       post_check_items,
                                       stage=STAGE_POST_TEST)
        logger.debug("Post-test sanity check results:\n%s" % json.dumps(
            post_check_results, indent=4, default=fallback_serializer))

        post_failed_results = [
            result for result in post_check_results if result['failed']
        ]
        if post_failed_results:
            pt_assert(False, "!!!!!!!!!!!!!!!! Post-test sanity check failed: !!!!!!!!!!!!!!!!\n{}"\
                .format(json.dumps(post_failed_results, indent=4, default=fallback_serializer)))

        logger.info("Done post-test sanity check")
    else:
        logger.info(
            'No post-test sanity check item, skip post-test sanity check.')
Example #59
0
def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info,
                       apk_key_map, key_passwords, platform_api_level,
                       codename_to_api_level_map):

  maxsize = max([len(os.path.basename(i.filename))
                 for i in input_tf_zip.infolist()
                 if i.filename.endswith('.apk')])
  rebuild_recovery = False
  system_root_image = misc_info.get("system_root_image") == "true"

  # tmpdir will only be used to regenerate the recovery-from-boot patch.
  tmpdir = tempfile.mkdtemp()
  # We're not setting the permissions precisely as in attr, because that work
  # will be handled by mkbootfs (using the values from the canned or the
  # compiled-in fs_config).
  def write_to_temp(fn, attr, data):
    fn = os.path.join(tmpdir, fn)
    if fn.endswith("/"):
      fn = os.path.join(tmpdir, fn)
      os.mkdir(fn)
    else:
      d = os.path.dirname(fn)
      if d and not os.path.exists(d):
        os.makedirs(d)

      if stat.S_ISLNK(attr >> 16):
        os.symlink(data, fn)
      else:
        with open(fn, "wb") as f:
          f.write(data)

  for info in input_tf_zip.infolist():
    if info.filename.startswith("IMAGES/"):
      continue

    data = input_tf_zip.read(info.filename)
    out_info = copy.copy(info)

    # Sign APKs.
    if info.filename.endswith(".apk"):
      name = os.path.basename(info.filename)
      key = apk_key_map[name]
      if key not in common.SPECIAL_CERT_STRINGS:
        print "    signing: %-*s (%s)" % (maxsize, name, key)
        signed_data = SignApk(data, key, key_passwords[key], platform_api_level,
            codename_to_api_level_map)
        common.ZipWriteStr(output_tf_zip, out_info, signed_data)
      else:
        # an APK we're not supposed to sign.
        print "NOT signing: %s" % (name,)
        common.ZipWriteStr(output_tf_zip, out_info, data)

    # System properties.
    elif info.filename in ("SYSTEM/build.prop",
                           "VENDOR/build.prop",
                           "SYSTEM/etc/prop.default",
                           "BOOT/RAMDISK/default.prop",  # legacy
                           "ROOT/default.prop",  # legacy
                           "RECOVERY/RAMDISK/prop.default",
                           "RECOVERY/RAMDISK/default.prop"):  # legacy
      print "rewriting %s:" % (info.filename,)
      if stat.S_ISLNK(info.external_attr >> 16):
        new_data = data
      else:
        new_data = RewriteProps(data, misc_info)
      common.ZipWriteStr(output_tf_zip, out_info, new_data)
      if info.filename in ("BOOT/RAMDISK/default.prop",  # legacy
                           "ROOT/default.prop",  # legacy
                           "RECOVERY/RAMDISK/prop.default",
                           "RECOVERY/RAMDISK/default.prop"):  # legacy
        write_to_temp(info.filename, info.external_attr, new_data)

    elif info.filename.endswith("mac_permissions.xml"):
      print "rewriting %s with new keys." % (info.filename,)
      new_data = ReplaceCerts(data)
      common.ZipWriteStr(output_tf_zip, out_info, new_data)

    # Trigger a rebuild of the recovery patch if needed.
    elif info.filename in ("SYSTEM/recovery-from-boot.p",
                           "SYSTEM/etc/recovery.img",
                           "SYSTEM/bin/install-recovery.sh"):
      rebuild_recovery = True

    # Don't copy OTA keys if we're replacing them.
    elif (OPTIONS.replace_ota_keys and
          info.filename in (
              "BOOT/RAMDISK/res/keys",
              "BOOT/RAMDISK/etc/update_engine/update-payload-key.pub.pem",
              "RECOVERY/RAMDISK/res/keys",
              "SYSTEM/etc/security/otacerts.zip",
              "SYSTEM/etc/update_engine/update-payload-key.pub.pem")):
      pass

    # Skip META/misc_info.txt if we will replace the verity private key later.
    elif (OPTIONS.replace_verity_private_key and
          info.filename == "META/misc_info.txt"):
      pass

    # Skip verity public key if we will replace it.
    elif (OPTIONS.replace_verity_public_key and
          info.filename in ("BOOT/RAMDISK/verity_key",
                            "ROOT/verity_key")):
      pass

    # Skip verity keyid (for system_root_image use) if we will replace it.
    elif (OPTIONS.replace_verity_keyid and
          info.filename == "BOOT/cmdline"):
      pass

    # Skip the care_map as we will regenerate the system/vendor images.
    elif info.filename == "META/care_map.txt":
      pass

    # Copy BOOT/, RECOVERY/, META/, ROOT/ to rebuild recovery patch. This case
    # must come AFTER other matching rules.
    elif (info.filename.startswith("BOOT/") or
          info.filename.startswith("RECOVERY/") or
          info.filename.startswith("META/") or
          info.filename.startswith("ROOT/") or
          info.filename == "SYSTEM/etc/recovery-resource.dat"):
      write_to_temp(info.filename, info.external_attr, data)
      common.ZipWriteStr(output_tf_zip, out_info, data)

    # A non-APK file; copy it verbatim.
    else:
      common.ZipWriteStr(output_tf_zip, out_info, data)

  if OPTIONS.replace_ota_keys:
    new_recovery_keys = ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info)
    if new_recovery_keys:
      if system_root_image:
        recovery_keys_location = "BOOT/RAMDISK/res/keys"
      else:
        recovery_keys_location = "RECOVERY/RAMDISK/res/keys"
      # The "new_recovery_keys" has been already written into the output_tf_zip
      # while calling ReplaceOtaKeys(). We're just putting the same copy to
      # tmpdir in case we need to regenerate the recovery-from-boot patch.
      write_to_temp(recovery_keys_location, 0o755 << 16, new_recovery_keys)

  # Replace the keyid string in META/misc_info.txt.
  if OPTIONS.replace_verity_private_key:
    ReplaceVerityPrivateKey(input_tf_zip, output_tf_zip, misc_info,
                            OPTIONS.replace_verity_private_key[1])

  if OPTIONS.replace_verity_public_key:
    if system_root_image:
      dest = "ROOT/verity_key"
    else:
      dest = "BOOT/RAMDISK/verity_key"
    # We are replacing the one in boot image only, since the one under
    # recovery won't ever be needed.
    new_data = ReplaceVerityPublicKey(
        output_tf_zip, dest, OPTIONS.replace_verity_public_key[1])
    write_to_temp(dest, 0o755 << 16, new_data)

  # Replace the keyid string in BOOT/cmdline.
  if OPTIONS.replace_verity_keyid:
    new_cmdline = ReplaceVerityKeyId(input_tf_zip, output_tf_zip,
      OPTIONS.replace_verity_keyid[1])
    # Writing the new cmdline to tmpdir is redundant as the bootimage
    # gets build in the add_image_to_target_files and rebuild_recovery
    # is not exercised while building the boot image for the A/B
    # path
    write_to_temp("BOOT/cmdline", 0o755 << 16, new_cmdline)

  if rebuild_recovery:
    recovery_img = common.GetBootableImage(
        "recovery.img", "recovery.img", tmpdir, "RECOVERY", info_dict=misc_info)
    boot_img = common.GetBootableImage(
        "boot.img", "boot.img", tmpdir, "BOOT", info_dict=misc_info)

    def output_sink(fn, data):
      common.ZipWriteStr(output_tf_zip, "SYSTEM/" + fn, data)

    common.MakeRecoveryPatch(tmpdir, output_sink, recovery_img, boot_img,
                             info_dict=misc_info)

  shutil.rmtree(tmpdir)
Example #60
0
def findroot(ctx, f, x0, solver=Secant, tol=None, verbose=False, verify=True, **kwargs):
    r"""
    Find a solution to `f(x) = 0`, using *x0* as starting point or
    interval for *x*.

    Multidimensional overdetermined systems are supported.
    You can specify them using a function or a list of functions.

    If the found root does not satisfy `|f(x)^2 < \mathrm{tol}|`,
    an exception is raised (this can be disabled with *verify=False*).

    **Arguments**

    *f*
        one dimensional function
    *x0*
        starting point, several starting points or interval (depends on solver)
    *tol*
        the returned solution has an error smaller than this
    *verbose*
        print additional information for each iteration if true
    *verify*
        verify the solution and raise a ValueError if `|f(x) > \mathrm{tol}|`
    *solver*
        a generator for *f* and *x0* returning approximative solution and error
    *maxsteps*
        after how many steps the solver will cancel
    *df*
        first derivative of *f* (used by some solvers)
    *d2f*
        second derivative of *f* (used by some solvers)
    *multidimensional*
        force multidimensional solving
    *J*
        Jacobian matrix of *f* (used by multidimensional solvers)
    *norm*
        used vector norm (used by multidimensional solvers)

    solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
    yielding pairs of approximative solution and estimated error (which is
    expected to be positive).
    You can use the following string aliases:
    'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
    'ridder', 'anewton', 'bisect'

    See mpmath.calculus.optimization for their documentation.

    **Examples**

    The function :func:`~mpmath.findroot` locates a root of a given function using the
    secant method by default. A simple example use of the secant method is to
    compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::

        >>> from mpmath import *
        >>> mp.dps = 30; mp.pretty = True
        >>> findroot(sin, 3)
        3.14159265358979323846264338328

    The secant method can be used to find complex roots of analytic functions,
    although it must in that case generally be given a nonreal starting value
    (or else it will never leave the real line)::

        >>> mp.dps = 15
        >>> findroot(lambda x: x**3 + 2*x + 1, j)
        (0.226698825758202 + 1.46771150871022j)

    A nice application is to compute nontrivial roots of the Riemann zeta
    function with many digits (good initial values are needed for convergence)::

        >>> mp.dps = 30
        >>> findroot(zeta, 0.5+14j)
        (0.5 + 14.1347251417346937904572519836j)

    The secant method can also be used as an optimization algorithm, by passing
    it a derivative of a function. The following example locates the positive
    minimum of the gamma function::

        >>> mp.dps = 20
        >>> findroot(lambda x: diff(gamma, x), 1)
        1.4616321449683623413

    Finally, a useful application is to compute inverse functions, such as the
    Lambert W function which is the inverse of `w e^w`, given the first
    term of the solution's asymptotic expansion as the initial value. In basic
    cases, this gives identical results to mpmath's built-in ``lambertw``
    function::

        >>> def lambert(x):
        ...     return findroot(lambda w: w*exp(w) - x, log(1+x))
        ...
        >>> mp.dps = 15
        >>> lambert(1); lambertw(1)
        0.567143290409784
        0.567143290409784
        >>> lambert(1000); lambert(1000)
        5.2496028524016
        5.2496028524016

    Multidimensional functions are also supported::

        >>> f = [lambda x1, x2: x1**2 + x2,
        ...      lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
        >>> findroot(f, (0, 0))
        [-0.618033988749895]
        [-0.381966011250105]
        >>> findroot(f, (10, 10))
        [ 1.61803398874989]
        [-2.61803398874989]

    You can verify this by solving the system manually.

    Please note that the following (more general) syntax also works::

        >>> def f(x1, x2):
        ...     return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
        ...
        >>> findroot(f, (0, 0))
        [-0.618033988749895]
        [-0.381966011250105]


    **Multiple roots**

    For multiple roots all methods of the Newtonian family (including secant)
    converge slowly. Consider this example::

        >>> f = lambda x: (x - 1)**99
        >>> findroot(f, 0.9, verify=False)
        0.918073542444929

    Even for a very close starting point the secant method converges very
    slowly. Use ``verbose=True`` to illustrate this.

    It is possible to modify Newton's method to make it converge regardless of
    the root's multiplicity::

        >>> findroot(f, -10, solver='mnewton')
        1.0

    This variant uses the first and second derivative of the function, which is
    not very efficient.

    Alternatively you can use an experimental Newtonian solver that keeps track
    of the speed of convergence and accelerates it using Steffensen's method if
    necessary::

        >>> findroot(f, -10, solver='anewton', verbose=True)
        x:     -9.88888888888888888889
        error: 0.111111111111111111111
        converging slowly
        x:     -9.77890011223344556678
        error: 0.10998877665544332211
        converging slowly
        x:     -9.67002233332199662166
        error: 0.108877778911448945119
        converging slowly
        accelerating convergence
        x:     -9.5622443299551077669
        error: 0.107778003366888854764
        converging slowly
        x:     0.99999999999999999214
        error: 10.562244329955107759
        x:     1.0
        error: 7.8598304758094664213e-18
        ZeroDivisionError: canceled with x = 1.0
        1.0

    **Complex roots**

    For complex roots it's recommended to use Muller's method as it converges
    even for real starting points very fast::

        >>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
        (0.727136084491197 + 0.934099289460529j)


    **Intersection methods**

    When you need to find a root in a known interval, it's highly recommended to
    use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
    Usually they converge faster and more reliable. They have however problems
    with multiple roots and usually need a sign change to find a root::

        >>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
        0.0

    Be careful with symmetric functions::

        >>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
        Traceback (most recent call last):
          ...
        ZeroDivisionError

    It fails even for better starting points, because there is no sign change::

        >>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
        Traceback (most recent call last):
          ...
        ValueError: Could not find root within given tolerance. (1 > 2.1684e-19)
        Try another starting point or tweak arguments.

    """
    prec = ctx.prec
    try:
        ctx.prec += 20

        # initialize arguments
        if tol is None:
            tol = ctx.eps * 2**10

        kwargs['verbose'] = kwargs.get('verbose', verbose)

        if 'd1f' in kwargs:
            kwargs['df'] = kwargs['d1f']

        kwargs['tol'] = tol
        if isinstance(x0, (list, tuple)):
            x0 = [ctx.convert(x) for x in x0]
        else:
            x0 = [ctx.convert(x0)]

        if isinstance(solver, str):
            try:
                solver = str2solver[solver]
            except KeyError:
                raise ValueError('could not recognize solver')

        # accept list of functions
        if isinstance(f, (list, tuple)):
            f2 = copy(f)
            def tmp(*args):
                return [fn(*args) for fn in f2]
            f = tmp

        # detect multidimensional functions
        try:
            fx = f(*x0)
            multidimensional = isinstance(fx, (list, tuple, ctx.matrix))
        except TypeError:
            fx = f(x0[0])
            multidimensional = False
        if 'multidimensional' in kwargs:
            multidimensional = kwargs['multidimensional']
        if multidimensional:
            # only one multidimensional solver available at the moment
            solver = MDNewton
            if not 'norm' in kwargs:
                norm = lambda x: ctx.norm(x, 'inf')
                kwargs['norm'] = norm
            else:
                norm = kwargs['norm']
        else:
            norm = abs

        # happily return starting point if it's a root
        if norm(fx) == 0:
            if multidimensional:
                return ctx.matrix(x0)
            else:
                return x0[0]

        # use solver
        iterations = solver(ctx, f, x0, **kwargs)
        if 'maxsteps' in kwargs:
            maxsteps = kwargs['maxsteps']
        else:
            maxsteps = iterations.maxsteps
        i = 0
        for x, error in iterations:
            if verbose:
                print_('x:    ', x)
                print_('error:', error)
            i += 1
            if error < tol * max(1, norm(x)) or i >= maxsteps:
                break
        if not isinstance(x, (list, tuple, ctx.matrix)):
            xl = [x]
        else:
            xl = x
        if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
            raise ValueError('Could not find root within given tolerance. '
                             '(%g > %g)\n'
                             'Try another starting point or tweak arguments.'
                             % (norm(f(*xl))**2, tol))
        return x
    finally:
        ctx.prec = prec