示例#1
0
	def pointertest(self):
		if not self.isactive: return
		self.pointercode=(self.pointercode+2)%154
		misc.debug("Pointer code: "+str(self.pointercode))
		vte.setpointer(self.pointercode)
		self.alarms.add(2,self.pointertest)
		return 0
示例#2
0
	def onkey(self,code):
		for it in self.list:
			if ord(it[0])==code:
				r=it[2]()
				if r==None: return self.exit()
				return
		if code==27: return self.exit()
		misc.debug("Unhandled key: "+str(code))
示例#3
0
def _add_to_list(ref_list, ref):
    ref_str = ref.format_ref()

    for existing_ref in ref_list:
        if ref_str == existing_ref.format_ref():
            debug("Skipping %s, already added to task" % ref_str)
            return

    ref_list.append(ref)
示例#4
0
	def tally(self, node, elemcount = 0, wordcount = 0):
		"""return elemcount, wordcount for a given node and its (recursive) subtree"""
		if node.tag not in self.IGNORE:
			words = (node.text or '').split()
			if len(words) > 1:
				wordcount += len(words)

		cls = node.attrib.get('class', None)
		if cls:
			if any((matcher.search(cls) for matcher in self.GOOD_CLASSES)):
				worcount *= 1.5
		tagdepth = 0
		for child in list(node):
			elemcount += self.ELEMENT_WEIGHTS.get(child.tag, self.DEFAULT_WEIGHT)
			if child.tag not in self.IGNORE:
				elemcount, wordcount, child_tagdepth = self.tally(child, elemcount, wordcount)
				tagdepth = max(tagdepth, child_tagdepth + 1)
		
		debug("element %s has %s nodes, %s depth and %s words" % (self._str(node), elemcount, tagdepth, wordcount))
		return max(elemcount,0), wordcount, tagdepth
示例#5
0
文件: worker.py 项目: k-danna/a2c
    def test(self,
             env,
             episodes=100,
             max_steps=10000,
             out_dir='./logs',
             print_interval=10):
        misc.debug('testing for %s episodes (%s steps max)' %
                   (episodes, max_steps))

        #init a dict of useful measurements
        stats = {
            'step': [],
            'reward': [],
        }
        test_start = time.time()
        for episode in range(episodes):
            episode_start = time.time()
            state = env.reset()
            step = reward_sum = done = 0
            while not done and step < max_steps:
                #do action
                action, _ = self.model.act(self.process_state(state),
                                           explore=False)
                state, reward, done, _ = env.step(action)

                #update
                reward_sum += reward
                step += 1

            #record episode stats
            stats['step'].append(step)
            stats['reward'].append(reward_sum)

            if (episode + 1) % print_interval == 0:
                episode_time = time.time() - episode_start
                eta = episode_time * (episodes - episode)
                misc.debug(('episode %7s: %5s steps %3s reward in %5.5ss ' +
                            '(ETA: %.3sm %.3ss)') %
                           (episode + 1, step, reward_sum, episode_time,
                            int(eta / 60), eta % 60))
        #timing
        test_time = time.time() - test_start
        test_mins = int(test_time / 60)
        test_secs = test_time % 60
        misc.debug('finished testing in %0.3sm %0.3ss (%0.5ss)' %
                   (test_mins, test_secs, test_time))
        #ez output format
        stats = pd.DataFrame(data=stats)
        print(stats.describe().loc[['min', 'max', 'mean', 'std']])
示例#6
0
	def setboth(self,a):
		config.typeface='-'.join(a)
		config.apply()
		misc.debug("new typeface: "+config.typeface)
		return 0
示例#7
0
	def __str__(self):
		debug("CONTENT: %s" % (self._str(self.content)))
		return etree.tostring(self.content, pretty_print=True)
示例#8
0
    def train(self, env, episodes=10000, max_steps=10000, 
            batch_size=20, print_interval=1000):
        misc.debug('training for %s episodes (%s steps max)' 
                % (episodes, max_steps))
        train_start = time.time()
        batch = replay_memory(params.mem_max_size)
        n_actions = env.action_space.n
        all_stats = []
        for episode in range(episodes):
            episode_start = time.time()
            state = self.process_state(env.reset())
            step = reward_sum = done = 0
            #init a dict of useful measurements
            stats = {'step': [], 'reward': [], 'loss': [],}
            while not done and step < max_steps:
                #do action
                action = self.model.act(state)
                next_state, reward, done, _ = env.step(action)

                #encourage either achieving or avoiding end state
                    #achieve when given negative rewards
                    #avoid when given positive rewards
                reward = 0 if done else reward
                #reward += params.reward_offset

                #process observation data
                next_state = self.process_state(next_state)
                action = self.to_onehot(action, n_actions)

                #add experience to batch
                batch.add((state, action, reward, done, next_state))

                #learn
                loss = self.model.learn(batch.get(batch_size))
                stats['loss'].append(loss)

                #update
                step += 1
                state = next_state
                reward_sum += reward

            #episode stats
            stats['step'].append(step)
            stats['reward'].append(reward_sum)
            self.model.add_episode_stat(reward_sum) #for tensorboard
                
            all_stats.append(stats)

            #save model on specified interval
            if (episode+1) % params.save_interval == 0:
                self.model.save()

            if (episode+1) % print_interval == 0:
                episode_time = time.time() - episode_start
                eta = episode_time * ((episodes-1) - episode)
                misc.debug(('episode %7s: %5s steps %3s reward in %5.5ss '
                        + '(ETA: %.3sm %.3ss)') % (
                        episode+1, step, reward_sum, episode_time, 
                        int(eta/60), eta%60))
        
        train_time = time.time() - train_start
        train_mins = int(train_time / 60)
        train_secs = train_time % 60
        misc.debug('finished training in %0.3sm %0.3ss (%0.5ss)' % (
                train_mins, train_secs, train_time))
示例#9
0
    def train(self,
              env,
              episodes=10000,
              max_steps=10000,
              train_interval=20,
              print_interval=1000):
        misc.debug('training for %s episodes (%s steps max)' %
                   (episodes, max_steps))
        train_start = time.time()
        batch = replay_memory()
        n_actions = env.action_space.n
        all_stats = []
        for episode in range(episodes):
            episode_start = time.time()
            done = False
            state = self.process_state(env.reset())
            step = 0
            reward_sum = 0
            #init a dict of useful measurements
            stats = {
                'step': [],
                'reward': [],
                'loss': [],
            }
            sampled = False
            while not done and step < max_steps:
                #do action
                action, value = self.model.act(state)
                next_state, reward, done, _ = env.step(action)
                reward = 0 if done else reward

                #process observation data
                next_state = self.process_state(next_state)
                action = self.to_onehot(action, n_actions)

                #add experience to batch
                batch.add((state, action, reward, value, done, next_state))

                #learn
                if batch.size == train_interval or done:
                    sample_bool = (episode +
                                   1) % print_interval == 0 and not sampled
                    loss = self.model.learn(batch.get(), sample=sample_bool)
                    stats['loss'].append(loss)
                    batch.clear()
                    sampled = True

                #update
                step += 1
                state = next_state
                reward_sum += reward

            #only take one sample per episode
            sampled = False

            #episode stats
            stats['step'].append(step)
            stats['reward'].append(reward_sum)

            all_stats.append(stats)

            if (episode + 1) % print_interval == 0:
                episode_time = time.time() - episode_start
                eta = episode_time * ((episodes - 1) - episode)
                misc.debug(
                    ('episode %7s: %5s steps in %5.5ss ' +
                     '(ETA: %.3sm %.3ss)') %
                    (episode + 1, step, episode_time, int(eta / 60), eta % 60))
                #self.test(env, episodes=10, max_steps=10000, records=0)

        train_time = time.time() - train_start
        train_mins = int(train_time / 60)
        train_secs = train_time % 60
        misc.debug('finished training in %0.3sm %0.3ss (%0.5ss)' %
                   (train_mins, train_secs, train_time))
示例#10
0
    def test(self,
             env,
             episodes=100,
             max_steps=10000,
             records=4,
             out_dir='./logs',
             print_interval=10):
        misc.debug('testing for %s episodes (%s steps max)' %
                   (episodes, max_steps))

        if records:
            #func that indicates which episodes to record and write
            vc = lambda n: n in [
                int(x) for x in np.linspace(episodes - 1, 0, records)
            ]
            #wrapper that records episodes
            env = gym.wrappers.Monitor(env,
                                       directory=out_dir,
                                       force=True,
                                       video_callable=vc)

        #init a dict of useful measurements
        stats = {
            'step': [],
            'reward': [],
        }
        test_start = time.time()
        for episode in range(episodes):
            episode_start = time.time()
            done = False
            state = self.process_state(env.reset())
            reward_sum = 0
            step = 0
            #wrapper fails on reset if game goes past max step
            #gym imposes internal max step anyways
            while not done:  #and step < max_steps:
                #do action
                action, _ = self.model.act(self.process_state(state),
                                           explore=False)
                state, reward, done, _ = env.step(action)

                #update
                reward_sum += reward
                step += 1

            #record episode stats
            stats['step'].append(step)
            stats['reward'].append(reward_sum)

            if (episode + 1) % print_interval == 0:
                episode_time = time.time() - episode_start
                eta = episode_time * (episodes - episode)
                misc.debug(
                    ('episode %7s: %5s steps in %5.5ss ' +
                     '(ETA: %5.3sm %3.3ss)') %
                    (episode + 1, step, episode_time, int(eta / 60), eta % 60))
        #timing
        test_time = time.time() - test_start
        test_mins = int(test_time / 60)
        test_secs = test_time % 60
        misc.debug('finished testing in %0.3sm %0.3ss (%0.5ss)' %
                   (test_mins, test_secs, test_time))
        #ez output format
        stats = pd.DataFrame(data=stats)
        print(stats.describe().loc[['min', 'max', 'mean', 'std']])
示例#11
0
def _get_theme_refs(fp_sys, remote_name, ref):
    theme_refs = []

    gtksettings = Gtk.Settings.get_default()

    icon_theme = "org.freedesktop.Platform.Icontheme.%s" % gtksettings.props.gtk_icon_theme_name
    gtk_theme = "org.gtk.Gtk3theme.%s" % gtksettings.props.gtk_theme_name

    def sortref(ref):
        try:
            val = float(ref.get_branch())
        except ValueError:
            val = 9.9

        return val

    for name in (icon_theme, gtk_theme):
        theme_ref = None

        try:
            print("Looking for theme %s in %s" % (name, remote_name))

            all_refs = fp_sys.list_remote_refs_sync(remote_name, None)

            matching_refs = []

            for listed_ref in all_refs:
                if listed_ref.get_name() == name:
                    matching_refs.append(listed_ref)

            if not matching_refs:
                continue

            # Sort highest version first.
            matching_refs = sorted(matching_refs, key=sortref, reverse=True)

            for matching_ref in matching_refs:
                if matching_ref.get_arch() != ref.get_arch():
                    continue
                try:
                    if float(matching_ref.get_branch()) > float(ref.get_branch()):
                        continue
                except ValueError:
                    continue

                theme_ref = matching_ref

            # if nothing is found, check other remotes
            if theme_ref == None:
                for other_remote in fp_sys.list_remotes():
                    other_remote_name = other_remote.get_name()

                    if other_remote_name == remote_name:
                        continue

                    print("Looking for theme %s in alternate remote %s" % (name, other_remote_name))

                    all_refs = fp_sys.list_remote_refs_sync(other_remote_name, None)

                    matching_refs = []

                    for listed_ref in all_refs:
                        if listed_ref.get_name() == name:
                            matching_refs.append(listed_ref)

                    if not matching_refs:
                        continue

                    # Sort highest version first.
                    matching_refs = sorted(matching_refs, key=sortref, reverse=True)

                    for matching_ref in matching_refs:
                        if matching_ref.get_arch() != ref.get_arch():
                            continue
                        try:
                            if float(matching_ref.get_branch()) > float(ref.get_branch()):
                                continue
                        except ValueError:
                            continue

                        theme_ref = matching_ref

                    if theme_ref:
                        break
                if theme_ref == None:
                    debug("Could not locate theme '%s' in any registered remotes" % name)
        except GLib.Error as e:
            theme_ref = None
            debug("Error finding themes for flatpak: %s" % e.message)

        if theme_ref:
            theme_refs.append(theme_ref)

    return theme_refs