示例#1
0
def store_counts(data, stats, now):
	logger.info('Storing count snapshot')
	with database.transaction() as t:
		t.execute('SELECT name, id FROM count_types ORDER BY ord')
		name_data = t.fetchall()
		name_order = map(lambda x: x[0], name_data)
		names = dict(name_data)
		# It seems MySQL complains with insert ... select in some cases.
		# So we do some insert-select-insert magic here. That is probably
		# slower, but no idea how to help that. And it should work.
		t.execute('SELECT name, id FROM clients WHERE name IN (' + (','.join(['%s'] * len(data))) + ')', data.keys())
		clients = dict(t.fetchall())
		# Create a snapshot for each client
		t.executemany('INSERT INTO count_snapshots (timestamp, client) VALUES(%s, %s)', map(lambda client: (now, client), clients.values()))
		t.execute('SELECT client, id FROM count_snapshots WHERE timestamp = %s', (now,))
		snapshots = dict(t.fetchall())
		# Push all the data in
		def truncate(data, limit):
			if data > 2**limit-1:
				logger.warn("Number %s overflow, truncating to %s", data, 2**limit-1)
				return 2**limit-1
			else:
				return data
		def clientdata(client):
			snapshot = snapshots[clients[client]]
			l = min(len(data[client]) / 2, len(name_order))
			return map(lambda name, index: (snapshot, names[name], truncate(data[client][index * 2], 63), truncate(data[client][index * 2 + 1], 63)), name_order[:l], range(0, l))
		def clientcaptures(client):
			snapshot = snapshots[clients[client]]
			return map(lambda i: (snapshot, i, truncate(stats[client][3 * i], 31), truncate(stats[client][3 * i + 1], 31), truncate(stats[client][3 * i + 2], 31)), range(0, len(stats[client]) / 3))
		def join_clients(c1, c2):
			c1.extend(c2)
			return c1
		t.executemany('INSERT INTO counts(snapshot, type, count, size) VALUES(%s, %s, %s, %s)', reduce(join_clients, map(clientdata, data.keys())))
		t.executemany('INSERT INTO capture_stats(snapshot, interface, captured, dropped, dropped_driver) VALUES(%s, %s, %s, %s, %s)', reduce(join_clients, map(clientcaptures, stats.keys())))
示例#2
0
def search_in_board(words, board):
    trie = Trie.create(words+words[::-1])
    acc_hash = {}
    handled_paths = []
    pos_list = [(i,j) for i in range(len(board)) for j in range(len(board[0]))]
    while len(pos_list) > 0:
        i,j = pos_list.pop(0)
        cur_char = board[i][j]
        # ((0,0),'o',[])
        cur_word_point = ([(i,j)], cur_char)
        # [((1,0),'e'),((0,1),'a')]
        neighbors = find_neighbors((i,j),board)
        cur_words = acc_hash.get((i,j), [])
        # remove all the paths which have been handled
        cur_words = filter(lambda x: x[0] not in handled_paths, cur_words)
        filtered_prefixs = filter_by_prefix(
                cur_words+[cur_word_point], neighbors, trie)
        # [((0,1),'oa',[(0,0)])]
        update_acc_hash(acc_hash, filtered_prefixs)
        # add all the paths which have been handled
        map(lambda x: handled_paths.append(x[0]), cur_words)
        # add some position for new path
        for cur_word_point in filtered_prefixs:
            cur_pos = cur_word_point[0][-1]
            if cur_pos not in pos_list:
                pos_list.append(cur_pos)


    # return acc_hash
    word_points = filter_words(acc_hash)
    return map(lambda x: (x[1], x[0]), word_points)
    def to_dict(self):
        """
        Returns the model properties as a dict
        """
        result = {}

        for attr, _ in iteritems(self.swagger_types):
            value = getattr(self, attr)
            if isinstance(value, list):
                result[attr] = list(map(
                    lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
                    value
                ))
            elif hasattr(value, "to_dict"):
                result[attr] = value.to_dict()
            elif isinstance(value, dict):
                result[attr] = dict(map(
                    lambda item: (item[0], item[1].to_dict())
                    if hasattr(item[1], "to_dict") else item,
                    value.items()
                ))
            else:
                result[attr] = value

        return result
示例#4
0
文件: utils.py 项目: Nextdoor/kingpin
def diff_dicts(dict1, dict2):
    """Compares two dicts and returns the difference as a string,
    if there is any.

    Sorts two dicts (including sorting of the lists!!) and then diffs them.
    This will ignore string types ('unicode' vs 'string').

    args:
        dict1: First dict
        dict2: Second dict

    returns:
        A diff string if there's any difference, otherwise None.
    """
    dict1 = order_dict(dict1)
    dict2 = order_dict(dict2)

    if dict1 == dict2:
        return

    dict1 = pprint.pformat(dict1).splitlines()
    dict2 = pprint.pformat(dict2).splitlines()

    # Remove unicode identifiers.
    dict1 = map(lambda line: line.replace('u\'', '\''), dict1)
    dict2 = map(lambda line: line.replace('u\'', '\''), dict2)

    return '\n'.join(difflib.unified_diff(dict1, dict2, n=2))
示例#5
0
    def schema_shell(self):
        """Performs the 'schema-shell' command."""
        schema_shell_home = self.env.get(SCHEMA_SHELL_HOME)
        assert (schema_shell_home is not None), \
            ("Environment variable undefined: %r" % SCHEMA_SHELL_HOME)
        assert os.path.isdir(schema_shell_home), \
            ("Invalid home directory for KijiSchema shell: %r" % schema_shell_home)
        schema_shell_script = os.path.join(schema_shell_home, "bin", "kiji-schema-shell")
        assert os.path.isfile(schema_shell_script), \
            ("KijiSchema shell not found: %r" % schema_shell_script)

        env = dict(self.env)

        classpath = env.get(KIJI_CLASSPATH, "").split(":") + list(self.express.get_classpath())
        env[KIJI_CLASSPATH] = ":".join(classpath)

        java_opts = env.get("JAVA_OPTS", "")
        # FIXME: I cannot find any trace of the Java system property "express.tmpjars"!
        # java_opts += (" -Dexpress.tmpjars=%s" % ???)

        # Relevant for KijiSchema 1.1 only and will be removed in Express 3.0:
        java_opts += " -Dorg.kiji.schema.impl.AvroCellEncoder.SCHEMA_VALIDATION=DISABLED"
        env["JAVA_OPTS"] = java_opts

        cmd = [schema_shell_script]
        logging.debug("Launching kiji-schema shell with:\n%s\with KIJI_CLASSPATH:\n%s",
                      " \\\n\t".join(map(repr, cmd)), "\n".join(map(tab_indent, classpath)))
        logging.debug("Computed KIJI_CLASSPATH:")
        proc = subprocess.Popen(cmd, env=env)
        try:
            return proc.wait()
        except subprocess.SubProcessError:
            proc.kill()
示例#6
0
文件: test_rdq.py 项目: jcollie/txrdq
 def testNarrowNarrowWidenNarrow(self):
     items = range(14)
     dq = ResizableDispatchQueue(self.slow, 3)
     map(dq.put, items)
     # jobs 0, 1, 2 will be dispatched at time 0.00
     reactor.callLater(0.1, dq.setWidth, 2)
     reactor.callLater(
         0.1, self._testPending, dq, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
     # At time 0.20 jobs 0, 1, and 2 will all finish, but the
     # queue now has width 2, so 3 and 4 will be dispatched.
     reactor.callLater(
         0.25, self._testPending, dq, [5, 6, 7, 8, 9, 10, 11, 12, 13])
     reactor.callLater(0.3, dq.setWidth, 1)
     # 3 and 4 will finish at 0.40, and the queue is now width 1, so 5
     # will be dispatched at 0.40. Then 6 will be dispatched at 0.60.
     reactor.callLater(0.7, dq.setWidth, 3)
     # At 0.7 the queue is widened to 3, so 2 more jobs (7, 8) will be
     # dispatched. 6 will finish at 0.80, so one more job (9) will be
     # dispatched at 0.8.
     reactor.callLater(0.85, dq.setWidth, 1)
     # 7 and 8 will have finished at 0.9, at which point the queue is
     # down to size 1, but 9 is still in progress. At 1.0, 9 will finish
     # and 10 will launch. At 1.20, 10 will finish and 11 will be
     # dispatched.  11 will finish at about 1.40.
     return self._stopAndTest(1.35, dq, [12, 13])
示例#7
0
def test_format(obj, precision=6):
    tf = lambda o: test_format(o, precision)
    delimit = lambda o: ', '.join(o)
    otype = type(obj)
    if otype is str:
        return "'%s'" % obj
    elif otype is float or otype is int:
        if otype is int:
            obj = float(obj)
        fstr = '%%.%df' % precision
        return fstr % obj
    elif otype is set:
        if len(obj) == 0:
            return 'set()'
        return '{%s}' % delimit(sorted(map(tf, obj)))
    elif otype is dict:
        return '{%s}' % delimit(sorted(tf(k)+': '+tf(v) for k,v in obj.items()))
    elif otype is list:
        return '[%s]' % delimit(map(tf, obj))
    elif otype is tuple:
        return '(%s%s)' % (delimit(map(tf, obj)), ',' if len(obj) is 1 else '')
    elif otype.__name__ in ['Vec','Mat']:
        entries = tf({x:obj.f[x] for x in obj.f if obj.f[x] != 0})
        return '%s(%s, %s)' % (otype.__name__, test_format(obj.D), entries)
    else:
        return str(obj)
示例#8
0
文件: test_rdq.py 项目: jcollie/txrdq
 def testParallel(self):
     dq = ResizableDispatchQueue(self.slow, 3)
     map(dq.put, range(3))
     # This should finish in about 0.2 seconds, with nothing on the
     # queue.  We stop after just 0.001 seconds. All three tasks should
     # be dispatched immediately.
     return self._stopAndTest(0.001, dq, [])
示例#9
0
文件: test_rdq.py 项目: jcollie/txrdq
 def testNarrowNarrowWiden(self):
     dq = ResizableDispatchQueue(self.slow, 3)
     map(dq.put, range(11))
     reactor.callLater(0.1, dq.setWidth, 2)
     reactor.callLater(0.3, dq.setWidth, 1)
     reactor.callLater(0.7, dq.setWidth, 3)
     return self._stopAndTest(0.9, dq, [10])
示例#10
0
文件: test_rdq.py 项目: jcollie/txrdq
 def testJustOne(self):
     dq = ResizableDispatchQueue(self.slow, 1)
     map(dq.put, range(3))
     # This should finish in about 0.2 seconds, with 2 things still on
     # the queue because we stop it after 0.001 so task 0 is already
     # executing but 1 and 2 are queued.
     return self._stopAndTest(0.001, dq, [1, 2])
示例#11
0
文件: test_rdq.py 项目: jcollie/txrdq
 def testSequential(self):
     dq = ResizableDispatchQueue(self.slow, 1)
     map(dq.put, range(3))
     # This should finish in about 0.6 seconds, with nothing still on
     # the queue because we stop after 0.5 seconds so all three tasks
     # will have been dispatched one after another.
     return self._stopAndTest(0.5, dq, [])
示例#12
0
def calculateSparseDictCOO(data_set, data_label_hash, jump=1, valid_flag=False):
	row = []
	col = []
	data = []
	row_valid = []
	col_valid = []
	data_valid = []

	doc_ids = set(sorted(map(lambda row:int(row[0]), data_set)))
	base_ids_list = filter(lambda ids: ids % jump == 0, doc_ids)
	train_ids = base_ids_list
	valid_ids = set()
	if valid_flag:
		valid_index = filter(lambda ids: ids % validation_perc == 0, range(len(base_ids_list)))
		valid_ids = [base_ids_list[i] for i in valid_index]
		base_ids = set(base_ids_list)
		train_ids = sorted(base_ids - set(valid_ids))

	labels = map(lambda trid: int(data_label_hash[trid]), train_ids)
	labels_valid = map(lambda vlid: int(data_label_hash[vlid]), valid_ids)
	for i in range(len(data_set)):
		if int(data_set[i][0]) in train_ids:
			row.append(int(data_set[i][0]))
			col.append(int(data_set[i][1])-1)
			data.append(int(data_set[i][2]))
			# labels.append(int(data_label_hash[int(data_set[i][0])]))
		elif int(data_set[i][0]) in valid_ids:
			row_valid.append(int(data_set[i][0]))
			col_valid.append(int(data_set[i][1])-1)
			data_valid.append(int(data_set[i][2]))
			# labels_valid.append(int(data_label_hash[int(data_set[i][0])]))

	train = translate(row), col, data, labels
	valid = translate(row_valid), col_valid, data_valid, labels_valid
	return train, valid
示例#13
0
def trigger_mopage_refresh(obj, event):
    event_pages = filter(None,
                          map(lambda parent: IEventPage(parent, None),
                              aq_chain(obj)))
    if not event_pages:
        # We are not within an event page.
        # We only trigger when publishing an event page
        # or a child of an event page.
        return

    triggers = filter(None,
                      map(lambda parent: IPublisherMopageTrigger(parent, None),
                          aq_chain(obj)))
    if not triggers or not triggers[0].is_enabled():
        return

    for events in event_pages:
        IMopageModificationDate(events).touch()

    from collective.taskqueue import taskqueue

    trigger_url = triggers[0].build_trigger_url()
    callback_path = '/'.join(getSite().getPhysicalPath()
                             + ('taskqueue_events_trigger_mopage_refresh',))
    taskqueue.add(callback_path, params={'target': trigger_url})
示例#14
0
文件: html.py 项目: allieus/askbot3
def moderate_tags(html):
    """replaces instances of <a> and <img>
    with "item in moderation" alerts
    """
    from askbot.conf import settings
    soup = BeautifulSoup(html, 'html5lib')
    replaced = False
    if settings.MODERATE_LINKS:
        links = soup.find_all('a')
        if links:
            template = get_template('widgets/moderated_link.jinja')
            aviso = BeautifulSoup(template.render(), 'html5lib').find('body')
            map(lambda v: v.replaceWith(aviso), links)
            replaced = True

    if settings.MODERATE_IMAGES:
        images = soup.find_all('img')
        if images:
            template = get_template('widgets/moderated_link.jinja')
            aviso = BeautifulSoup(template.render(), 'html5lib').find('body')
            map(lambda v: v.replaceWith(aviso), images)
            replaced = True

    if replaced:
        return force_text(soup.find('body').renderContents(), 'utf-8')

    return html
示例#15
0
def parsexyz(filename):
    lst = parseALL(filename," Number     Number       Type","         ")
    lines = map(float,lst[1:-1])
    xyz = [lines[x:x+3] for x in xrange(0, len(lines), 3)] 
    b = xyz[1::2]
    Z = map(int,lines[1::6])
    return (Z,np.array(b)) 
示例#16
0
    def vectorize(self):
        ld = '('
        rd = ')'
        outstr = ''

        if self.__dict__[field_rarity]:
            outstr += ld + self.__dict__[field_rarity] + rd + ' '

        coststr = self.__dict__[field_cost].vectorize(delimit = True)
        if coststr:
            outstr += coststr + ' '

        typestr = ' '.join(map(lambda s: '(' + s + ')',
                               self.__dict__[field_supertypes] + self.__dict__[field_types]))
        if typestr:
            outstr += typestr + ' '

        if self.__dict__[field_subtypes]:
            outstr += ' '.join(self.__dict__[field_subtypes]) + ' '

        if self.__dict__[field_pt]:
            outstr += ' '.join(map(lambda s: '(' + s + ')',
                                   self.__dict__[field_pt].replace('/', '/ /').split()))
            outstr += ' '
        
        if self.__dict__[field_loyalty]:
            outstr += '((' + self.__dict__[field_loyalty] + ')) '
            
        outstr += self.__dict__[field_text].vectorize()

        if self.bside:
            outstr = '_ASIDE_ ' + outstr + '\n\n_BSIDE_ ' + self.bside.vectorize()

        return outstr
示例#17
0
文件: api.py 项目: mrooney/mintapi
    def get_budgets(self):  # {{{
        # Get categories
        categories = self.get_categories()

        # Issue request for budget utilization
        first_of_this_month = date.today().replace(day=1)
        eleven_months_ago = (first_of_this_month - timedelta(days=330)).replace(day=1)
        url = "{}/getBudget.xevent".format(MINT_ROOT_URL)
        params = {
            'startDate': eleven_months_ago.strftime('%m/%d/%Y'),
            'endDate': first_of_this_month.strftime('%m/%d/%Y'),
            'rnd': Mint.get_rnd(),
        }
        response = json.loads(self.get(url, params=params, headers=JSON_HEADER).text)

        # Make the skeleton return structure
        budgets = {
            'income': response['data']['income'][
                str(max(map(int, response['data']['income'].keys())))
            ]['bu'],
            'spend': response['data']['spending'][
                str(max(map(int, response['data']['income'].keys())))
            ]['bu']
        }

        # Fill in the return structure
        for direction in budgets.keys():
            for budget in budgets[direction]:
                budget['cat'] = self.get_category_from_id(
                    budget['cat'],
                    categories
                )

        return budgets
示例#18
0
文件: __init__.py 项目: AEliu/calibre
def initialize_constants():
    global __version__, __appname__, modules, functions, basenames, scripts

    src = open('src/calibre/constants.py', 'rb').read()
    nv = re.search(r'numeric_version\s+=\s+\((\d+), (\d+), (\d+)\)', src)
    __version__ = '%s.%s.%s'%(nv.group(1), nv.group(2), nv.group(3))
    __appname__ = re.search(r'__appname__\s+=\s+(u{0,1})[\'"]([^\'"]+)[\'"]',
            src).group(2)
    epsrc = re.compile(r'entry_points = (\{.*?\})', re.DOTALL).\
            search(open('src/calibre/linux.py', 'rb').read()).group(1)
    entry_points = eval(epsrc, {'__appname__': __appname__})

    def e2b(ep):
        return re.search(r'\s*(.*?)\s*=', ep).group(1).strip()

    def e2s(ep, base='src'):
        return (base+os.path.sep+re.search(r'.*=\s*(.*?):', ep).group(1).replace('.', '/')+'.py').strip()

    def e2m(ep):
        return re.search(r'.*=\s*(.*?)\s*:', ep).group(1).strip()

    def e2f(ep):
        return ep[ep.rindex(':')+1:].strip()

    basenames, functions, modules, scripts = {}, {}, {}, {}
    for x in ('console', 'gui'):
        y = x + '_scripts'
        basenames[x] = list(map(e2b, entry_points[y]))
        functions[x] = list(map(e2f, entry_points[y]))
        modules[x] = list(map(e2m, entry_points[y]))
        scripts[x] = list(map(e2s, entry_points[y]))
示例#19
0
文件: core.py 项目: TYP30/apptools
	def render(self, path, context={}, elements={}, content_type='text/html', headers={}, **kwargs):

		''' Return a response containing a rendered Jinja template. Creates a session if one doesn't exist. '''
		
		if isinstance(self.context, dict) and len(self.context) > 0:
			tmp_context = self.context
			self.context = self.baseContext
			map(self._setcontext, tmp_context)
		else:
			self.context = self.baseContext
				
		# Build response HTTP headers
		response_headers = {}
		for key, value in self.baseHeaders.items():
			response_headers[key] = value
		if len(headers) > 0:
			for key, value in headers.items():
				response_headers[key] = value
		
		# Consider kwargs
		if len(kwargs) > 0:
			for k, v in kwargs.items():
				self.context[k] = v
		
		# Bind runtime-level template context
		try:
			self.context = self._bindRuntimeTemplateContext(self.context)
		except NotImplementedError, e:
			if config.debug:
				raise ## in production, the show must go on...
			else:
				pass
示例#20
0
文件: fetcher.py 项目: kapv89/slither
  def child_urls(self):
    if self.response().status_code != 200 or self.response().headers['content-type'] != 'text/html' :
      return []

    hrefs = map(lambda a: str(a.get('href')), self.soup().find_all('a'))
    childrens = filter(bool, map(self.process_href, hrefs))
    return childrens
示例#21
0
	def __call__(self, url, count_of_crawler):
		"""
		Function which fetch the content from the given URL and collect all the
		URL in the content and pass the first url of the page to fetch the
		content.
		"""
		try:
			page = urllib2.urlopen(url)
			soup = BeautifulSoup(page.read())	

			links_on_page = map(lambda anchor: anchor.get('href'), 
						soup.find_all('a'))

			cleaned_url = map(lambda link: link if urlparse(link).scheme 
	 				and urlparse(url).netloc else (urlparse(url)
					.scheme+"://"+urlparse(url).netloc+link if 
					link[0] == "/" else url+link), links_on_page)
			visited_url.append(url)
			total_collected_url.append(cleaned_url)
			next_url_to_visit = [next_url for next_url in cleaned_url\
				 if not next_url in visited_url and not "#" in next_url][0]
		
			if count_of_crawler and next_url_to_visit:	
				count_of_crawler = crawler(next_url_to_visit, 
								count_of_crawler-1)
	
		except:
			print "It seems there is some issue in URL "+url
	
		return count_of_crawler
示例#22
0
 def test_0(self):
     accuracies = map(get_lfw_restricted_accuracy,
                      map(lambda x: '%02i' % x, range(1, 11)))
     accuracy_avg = np.mean(np.array(accuracies))
     accuracy_std = np.std(np.array(accuracies))
     print('Accuracy on LFW test set (10 fold CV): %4.3g +/- %4.3g' % (
         accuracy_avg, accuracy_std))
示例#23
0
def run_clustering_example(run):
  global current_graph_state
  n = 100
  accs = []
  for i in range(100):
    current_graph_state = GraphState()
    sampler = run()
    samples = [sampler() for i in range(n)]
    templ = current_graph_state.to_JSON()
    rand_params = hs_rand_template_params(templ)
    print hs_sample_bayes_net(templ, rand_params)
    varvals = state_to_varvals(hs_sample_bayes_net(templ, rand_params))
    frozen_samples = [freeze_value(samp, varvals) for samp in samples]
    true_latents = [x[0] for x in frozen_samples]
    print true_latents
    templ = condition_on_frozen_samples(current_graph_state, samples, [x[1] for x in frozen_samples])
    print 'best score', params_score(templ, rand_params)
    state_params_list = infer_states_and_parameters(templ)
    rand_cs = params_to_cluster_centers(rand_params)
    iter_accs = []
    j = 0
    for (state, params, score) in state_params_list:
      print 'score', score
      cs = params_to_cluster_centers(params)
      # if j > 1:
      #   varvals = state_to_varvals(state)
      #   state_latents = [freeze_value(samp[0], varvals) for samp in samples]
      #   acc = cluster_assignment_accuracy(true_latents, state_latents)
      #   iter_accs.append(acc)
      j += 1
    accs.append(iter_accs)
  print map(mean, zip(*accs))
示例#24
0
 def step(self):
     r = self.table.step(self.tps)
     self.t += self.tps
     if not self.table.fullyOcc():
         self.lastseent = self.t
         self.lastseen = map(int,self.table.ball.getpos())
     #print self.lastseen, self.lastseent
     
     # Update particles
     weights = [p.weight for p in self.particles]
     ps = self.getPartPs()
     newws = [w*p for w,p in zip(weights, ps)]
     newws.append(self.newp)
     newws = map(lambda x: np.power(x,self.temp), newws)
     totw = sum(newws)
     newws = map(lambda x: x / totw, newws)
     #seff = sum(map(lambda w: 1 / (w*w), newws))
     newparts = copy.copy(self.particles); newparts.append("Empty")
     newps = selectReplace(newparts,newws,len(self.particles))
     rejns = 0.
     for i in range(len(newps)):
         if newps[i] == "Empty": newps[i] = Particle(self.table,self.kapv,self.kapb,self.kapm,self.perr,self.tps,self.lastseent, self.lastseen); rejns += 1.
         #else: newps[i] = copy.deepcopy(newps[i])
     for p in newps: p.weight = 1
     self.lastrej = rejns / self.npart
     self.particles = newps
     return r
示例#25
0
    def get_disks(self):
        disks_to_attach_names = self.__get_disks_to_attach_names()
        self.disks = map(lambda disk: self.__add_attach_data_to_disk(disk, disks_to_attach_names), self.disks)

        return map(lambda disk: {"name": disk.getAttribute('ovf:diskId'),
                                 "capacity": int(disk.getAttribute('ovf:capacity')) / (2**30),
                                 "attach": disk.getAttribute('attach')}, self.disks)
def farpairs_ranges(ratings):
    pairs = []
    for i in xrange(len(ratings)):
        for j in xrange(len(ratings)):
            dist = abs(i-j)
            std1 = ratings[i][2]
            avg1 = ratings[i][1]
            std2 = ratings[j][2]
            avg2 = ratings[j][1]

            if (avg1 + std1 > avg2 - std2 and avg1 + std1 <= avg2 + std2) or \
               (avg2 + std2 > avg1 - std1 and avg2 + std2 <= avg1 + std1):
                pairs.append([i, j, dist])
    pairs.sort(lambda x,y: x[2] < y[2] and -1 or 1)
    pairs = map(lambda x: x[:1], pairs)
    idxs = []
    for p in pairs:
        idxs.extend(p)
    subset = set()
    while len(idxs) > 0 and len(subset) < WINDOW:
        subset.add(idxs.pop())
    
    subset = filter(lambda x: x in subset, range(len(ratings)))
    subset = map(lambda x: ratings[x], subset)
            
    return reorder(ratings, subset)
示例#27
0
文件: offline.py 项目: minrk/phoenix
    def plot_data(self,v):
        """
        Simple plot window that can be updated very fast.
        No grid or resize like plot()
        """
        if self.plotwin == None:
            self.plotwin = Tk()
            self.plotwin.title('Phoenix plot')
            self.plotwin.protocol("WM_DELETE_WINDOW", self.clean_qplot)
            self.canvas = Canvas(self.plotwin, background='white', width=WIDTH + 20, height=HALF_HEIGHT*2 + 20)
            self.canvas.pack()

            self.canvas.create_rectangle(10, 10, WIDTH+10, HALF_HEIGHT*2 + 10, outline='#009900')
            self.canvas.create_line([(10, HALF_HEIGHT+10), (WIDTH+10, HALF_HEIGHT+10)], fill='#00ff00')
        if len(self.plot_trace) != 0:
            map(lambda x: self.canvas.delete(x), self.plot_trace)
            self.plot_trace = []
            self.plotwin.update()
                    
        numchans = len(v[0]) - 1
        npoints = len(v)
        xscale = WIDTH/v[-1][0]
        yscale = HALF_HEIGHT/YMAX
        for ch in range(numchans):
            a = []
            for i in range(npoints):
                x = 10 + v[i][0] * xscale
                y = (HALF_HEIGHT + 10) - v[i][ch+1] * yscale
                a.append((x, y))
            line = self.canvas.create_line(a, fill=self.colors[ch])            
            self.plot_trace.append(line)
        self.plotwin.update()
示例#28
0
def find_segments(doc, key, use_segment_table = True):
    key_pieces = key.split(':')
    while len(key_pieces) < 3:
        key_pieces.append('*')

    filter_func = lambda x: str(x.ifos) == key_pieces[0] and (str(x.name) == key_pieces[1] or key_pieces[1] == '*') and (str(x.version) == key_pieces[2] or key_pieces[2] == '*') 

    # Find all segment definers matching the critieria
    seg_def_table = lsctables.SegmentDefTable.get_table(doc)
    seg_defs      = filter(filter_func, seg_def_table)
    seg_def_ids   = map(lambda x: str(x.segment_def_id), seg_defs)

    # Find all segments belonging to those definers
    if use_segment_table:
        seg_table     = lsctables.SegmentTable.get_table(doc)
        seg_entries   = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_table)
    else:
        seg_sum_table = lsctables.SegmentSumTable.get_table(doc)
        seg_entries   = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_sum_table)

    # Combine into a segmentlist
    ret = segmentlist(map(lambda x: segment(x.start_time, x.end_time), seg_entries))

    ret.coalesce()

    return ret
示例#29
0
	def test_001_diff_phasor_vcc(self):
		a = [1+2j,2+3.5j,3.5+4j,4+5j,5+6j]
		b = [1j,1j,1j,1j,1j]
		c = [-1j+3,1j,-7+0j,2.5j+0.333,3.2j]
		d = [(0.35979271051026462+0.89414454782483865j),
		     (0.19421665709046287+0.024219594550527801j),
		     (0.12445564785882557+0.40766238899138718j),
		     (0.041869638845043688+0.97860437393366329j),
		     (0.068927762235083234+0.16649764877365247j)]
		e = [(0.16207552830286298+0.435385030608331j),
		     (0.47195779613669675+0.37824764113272558j),
		     (0.13911998015446148+0.6585095669811617j),
		     (0.093510743358783954+0.98446560079828938j),
		     (0.86036393297704694+0.72043005342024602j)]
		multconj = lambda x,y: x.conjugate()*y
		src_data        = a+b+c+d+e
		expected_result = [0j,0j,0j,0j,0j]+map(multconj,a,b)+map(multconj,b,c)+map(multconj,c,d)+map(multconj,d,e)
		src = blocks.vector_source_c(src_data)
		s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, 5)
		diff_phasor_vcc = grdab.diff_phasor_vcc(5)
		v2s = blocks.vector_to_stream(gr.sizeof_gr_complex, 5)
		dst = blocks.vector_sink_c()
		self.tb.connect(src, s2v, diff_phasor_vcc, v2s, dst)
		self.tb.run()
		result_data = dst.data()
		# print expected_result
		# print result_data
		self.assertComplexTuplesAlmostEqual(expected_result, result_data, 6)
示例#30
0
def reverse_lists(lists):
	"""
	>>> reverse_lists([[1,2,3], [4,5,6]])
	[[3, 2, 1], [6, 5, 4]]
	"""

	return list(map(list, map(reversed, lists)))
示例#31
0
#anju
X,D,Y=map(str,input().split())
N=int(X)
M=int(Y)
if D=="/":
    print(int(N/M))
if D=="%":
    print(int(N%M))
示例#32
0
文件: c.py 项目: RyoSci/AtCoder
import sys
sys.setrecursionlimit(10**6)
input = sys.stdin.readline

x, y, a, b = list(map(int, input().split()))
ans = 0

if x*a >= x+b:
    ans += (y-x)//b
    if (y-x) % b == 0:
        ans -= 1
    print(ans)
else:
    now = x
    for i in range(1, 100):
        if now*a <= now+b:
            if now*a < y:
                now *= a
                ans += 1
            else:
                break
        else:
            if now+b < y:
                ans += (y-now)//b
                if (y-now) % b == 0:
                    ans -= 1
                break
            else:
                break
    print(ans)
示例#33
0
# 1546번

case = int(input())
score = list(map(int, input().split()))

good = max(score)
new_score = 0
for i in range(len(score)):
    new_score += score[i] / good * 100

print(new_score / case) 
示例#34
0
def analyze_course_content(course_id, 
                           listings_file=None,
                           basedir="X-Year-2-data-sql", 
                           datedir="2013-09-21", 
                           use_dataset_latest=False,
                           do_upload=False,
                           courses=None,
                           verbose=True,
                           pin_date=None,
                           ):
    '''
    Compute course_content table, which quantifies:

    - number of chapter, sequential, vertical modules
    - number of video modules
    - number of problem, *openended, mentoring modules
    - number of dicussion, annotatable, word_cloud modules

    Do this using the course "xbundle" file, produced when the course axis is computed.

    Include only modules which had nontrivial use, to rule out the staff and un-shown content. 
    Do the exclusion based on count of module appearing in the studentmodule table, based on 
    stats_module_usage for each course.

    Also, from the course listings file, compute the number of weeks the course was open.

    If do_upload (triggered by --force-recompute) then upload all accumulated data to the course report dataset 
    as the "stats_course_content" table.  Also generate a "course_summary_stats" table, stored in the
    course_report_ORG or course_report_latest dataset.  The course_summary_stats table combines
    data from many reports,, including stats_course_content, the medians report, the listings file,
    broad_stats_by_course, and time_on_task_stats_by_course.
    
    '''

    if do_upload:
        if use_dataset_latest:
            org = "latest"
        else:
            org = courses[0].split('/',1)[0]	# extract org from first course_id in courses

        crname = 'course_report_%s' % org

        gspath = gsutil.gs_path_from_course_id(crname)
        gsfnp = gspath / CCDATA
        gsutil.upload_file_to_gs(CCDATA, gsfnp)
        tableid = "stats_course_content"
        dataset = crname

        mypath = os.path.dirname(os.path.realpath(__file__))
        SCHEMA_FILE = '%s/schemas/schema_content_stats.json' % mypath

        try:
            the_schema = json.loads(open(SCHEMA_FILE).read())[tableid]
        except Exception as err:
            print "Oops!  Failed to load schema file for %s.  Error: %s" % (tableid, str(err))
            raise

        if 0:
            bqutil.load_data_to_table(dataset, tableid, gsfnp, the_schema, wait=True, verbose=False,
                                      format='csv', skiprows=1)

        table = 'course_metainfo'
        course_tables = ',\n'.join([('[%s.course_metainfo]' % bqutil.course_id2dataset(x)) for x in courses])
        sql = "select * from {course_tables}".format(course_tables=course_tables)
        print "--> Creating %s.%s using %s" % (dataset, table, sql)

        if 1:
            metainfo_dataset = bqutil.get_bq_table(dataset, table, sql=sql, 
                                          newer_than=datetime.datetime(2015, 1, 16, 3, 0),
                                          )
            # bqutil.create_bq_table(dataset, table, sql, overwrite=True)


        #-----------------------------------------------------------------------------
        # make course_summary_stats table
        #
        # This is a combination of the broad_stats_by_course table (if that exists), and course_metainfo.
        # Also use (and create if necessary) the nregistered_by_wrap table.

        # get the broad_stats_by_course data
        bsbc = bqutil.get_table_data(dataset, 'broad_stats_by_course')

        table_list = bqutil.get_list_of_table_ids(dataset)

        latest_person_course = max([ x for x in table_list if x.startswith('person_course_')])
        print "Latest person_course table in %s is %s" % (dataset, latest_person_course)
        
        sql = """
                SELECT pc.course_id as course_id, 
                    cminfo.wrap_date as wrap_date,
                    count(*) as nregistered,
                    sum(case when pc.start_time < cminfo.wrap_date then 1 else 0 end) nregistered_by_wrap,
                    sum(case when pc.start_time < cminfo.wrap_date then 1 else 0 end) / nregistered * 100 nregistered_by_wrap_pct,
                FROM
                    [{dataset}.{person_course}] as pc
                left join (
                 SELECT course_id,
                      TIMESTAMP(concat(wrap_year, "-", wrap_month, '-', wrap_day, ' 23:59:59')) as wrap_date,
                 FROM (
                  SELECT course_id, 
                    regexp_extract(value, r'(\d+)/\d+/\d+') as wrap_month,
                    regexp_extract(value, r'\d+/(\d+)/\d+') as wrap_day,
                    regexp_extract(value, r'\d+/\d+/(\d+)') as wrap_year,
                  FROM [{dataset}.course_metainfo]
                  where key='listings_Course Wrap'
                 )) as cminfo
                on pc.course_id = cminfo.course_id
                
                group by course_id, wrap_date
                order by course_id
        """.format(dataset=dataset, person_course=latest_person_course)

        nr_by_wrap = bqutil.get_bq_table(dataset, 'nregistered_by_wrap', sql=sql, key={'name': 'course_id'})

        # rates for registrants before and during course
        
        sql = """
                SELECT 
                    *,
                    ncertified / nregistered * 100 as pct_certified_of_reg,
                    ncertified_and_registered_before_launch / nregistered_before_launch * 100 as pct_certified_reg_before_launch,
                    ncertified_and_registered_during_course / nregistered_during_course * 100 as pct_certified_reg_during_course,
                    ncertified / nregistered_by_wrap * 100 as pct_certified_of_reg_by_wrap,
                    ncertified / nviewed * 100 as pct_certified_of_viewed,
                    ncertified / nviewed_by_wrap * 100 as pct_certified_of_viewed_by_wrap,
                    ncertified_by_ewrap / nviewed_by_ewrap * 100 as pct_certified_of_viewed_by_ewrap,
                FROM
                (
                # ------------------------
                # get aggregate data
                SELECT pc.course_id as course_id, 
                    cminfo.wrap_date as wrap_date,
                    count(*) as nregistered,
                    sum(case when pc.certified then 1 else 0 end) ncertified,
                    sum(case when (TIMESTAMP(pc.cert_created_date) < cminfo.ewrap_date) and (pc.certified and pc.viewed) then 1 else 0 end) ncertified_by_ewrap,
                    sum(case when pc.viewed then 1 else 0 end) nviewed,
                    sum(case when pc.start_time < cminfo.wrap_date then 1 else 0 end) nregistered_by_wrap,
                    sum(case when pc.start_time < cminfo.wrap_date then 1 else 0 end) / nregistered * 100 nregistered_by_wrap_pct,
                    sum(case when (pc.start_time < cminfo.wrap_date) and pc.viewed then 1 else 0 end) nviewed_by_wrap,
                    sum(case when (pc.start_time < cminfo.ewrap_date) and pc.viewed then 1 else 0 end) nviewed_by_ewrap,
                    sum(case when pc.start_time < cminfo.launch_date then 1 else 0 end) nregistered_before_launch,
                    sum(case when pc.start_time < cminfo.launch_date 
                              and pc.certified
                              then 1 else 0 end) ncertified_and_registered_before_launch,
                    sum(case when (pc.start_time >= cminfo.launch_date) 
                              and (pc.start_time < cminfo.wrap_date) then 1 else 0 end) nregistered_during_course,
                    sum(case when (pc.start_time >= cminfo.launch_date) 
                              and (pc.start_time < cminfo.wrap_date) 
                              and pc.certified
                              then 1 else 0 end) ncertified_and_registered_during_course,
                FROM
                    [{dataset}.{person_course}] as pc
                left join (
                
                # --------------------
                #  get course launch and wrap dates from course_metainfo

       SELECT AA.course_id as course_id, 
              AA.wrap_date as wrap_date,
              AA.launch_date as launch_date,
              BB.ewrap_date as ewrap_date,
       FROM (
               #  inner get course launch and wrap dates from course_metainfo
                SELECT A.course_id as course_id,
                  A.wrap_date as wrap_date,
                  B.launch_date as launch_date,
                from
                (
                 SELECT course_id,
                      TIMESTAMP(concat(wrap_year, "-", wrap_month, '-', wrap_day, ' 23:59:59')) as wrap_date,
                 FROM (
                  SELECT course_id, 
                    regexp_extract(value, r'(\d+)/\d+/\d+') as wrap_month,
                    regexp_extract(value, r'\d+/(\d+)/\d+') as wrap_day,
                    regexp_extract(value, r'\d+/\d+/(\d+)') as wrap_year,
                  FROM [{dataset}.course_metainfo]
                  where key='listings_Course Wrap'
                 )
                ) as A
                left outer join 
                (
                 SELECT course_id,
                      TIMESTAMP(concat(launch_year, "-", launch_month, '-', launch_day)) as launch_date,
                 FROM (
                  SELECT course_id, 
                    regexp_extract(value, r'(\d+)/\d+/\d+') as launch_month,
                    regexp_extract(value, r'\d+/(\d+)/\d+') as launch_day,
                    regexp_extract(value, r'\d+/\d+/(\d+)') as launch_year,
                  FROM [{dataset}.course_metainfo]
                  where key='listings_Course Launch'
                 )
                ) as B
                on A.course_id = B.course_id 
                # end inner course_metainfo subquery
            ) as AA
            left outer join
            (
                 SELECT course_id,
                      TIMESTAMP(concat(wrap_year, "-", wrap_month, '-', wrap_day, ' 23:59:59')) as ewrap_date,
                 FROM (
                  SELECT course_id, 
                    regexp_extract(value, r'(\d+)/\d+/\d+') as wrap_month,
                    regexp_extract(value, r'\d+/(\d+)/\d+') as wrap_day,
                    regexp_extract(value, r'\d+/\d+/(\d+)') as wrap_year,
                  FROM [{dataset}.course_metainfo]
                  where key='listings_Empirical Course Wrap'
                 )
            ) as BB
            on AA.course_id = BB.course_id

                # end course_metainfo subquery
                # --------------------
                
                ) as cminfo
                on pc.course_id = cminfo.course_id
                
                group by course_id, wrap_date
                order by course_id
                # ---- end get aggregate data
                )
                order by course_id
        """.format(dataset=dataset, person_course=latest_person_course)

        print "--> Assembling course_summary_stats from %s" % 'stats_cert_rates_by_registration'
        sys.stdout.flush()
        cert_by_reg = bqutil.get_bq_table(dataset, 'stats_cert_rates_by_registration', sql=sql, 
                                          newer_than=datetime.datetime(2015, 1, 16, 3, 0),
                                          key={'name': 'course_id'})

        # start assembling course_summary_stats

        c_sum_stats = defaultdict(OrderedDict)
        for entry in bsbc['data']:
            course_id = entry['course_id']
            cmci = c_sum_stats[course_id]
            cmci.update(entry)
            cnbw = nr_by_wrap['data_by_key'][course_id]
            nbw = int(cnbw['nregistered_by_wrap'])
            cmci['nbw_wrap_date'] = cnbw['wrap_date']
            cmci['nregistered_by_wrap'] = nbw
            cmci['nregistered_by_wrap_pct'] = cnbw['nregistered_by_wrap_pct']
            cmci['frac_female'] = float(entry['n_female_viewed']) / (float(entry['n_male_viewed']) + float(entry['n_female_viewed']))
            ncert = float(cmci['certified_sum'])
            if ncert:
                cmci['certified_of_nregistered_by_wrap_pct'] = nbw / ncert * 100.0
            else:
                cmci['certified_of_nregistered_by_wrap_pct'] = None
            cbr = cert_by_reg['data_by_key'][course_id]
            for field, value in cbr.items():
                cmci['cbr_%s' % field] = value

        # add medians for viewed, explored, and certified

        msbc_tables = {'msbc_viewed': "viewed_median_stats_by_course",
                       'msbc_explored': 'explored_median_stats_by_course',
                       'msbc_certified': 'certified_median_stats_by_course',
                       'msbc_verified': 'verified_median_stats_by_course',
                       }
        for prefix, mtab in msbc_tables.items():
            print "--> Merging median stats data from %s" % mtab
            sys.stdout.flush()
            bqdat = bqutil.get_table_data(dataset, mtab)
            for entry in bqdat['data']:
                course_id = entry['course_id']
                cmci = c_sum_stats[course_id]
                for field, value in entry.items():
                    cmci['%s_%s' % (prefix, field)] = value

        # add time on task data

        tot_table = "time_on_task_stats_by_course"
        prefix = "ToT"
        print "--> Merging time on task data from %s" % tot_table
        sys.stdout.flush()
        try:
            bqdat = bqutil.get_table_data(dataset, tot_table)
        except Exception as err:
            bqdat = {'data': {}}
        for entry in bqdat['data']:
            course_id = entry['course_id']
            cmci = c_sum_stats[course_id]
            for field, value in entry.items():
                if field=='course_id':
                    continue
                cmci['%s_%s' % (prefix, field)] = value

        # add serial time on task data

        tot_table = "time_on_task_serial_stats_by_course"
        prefix = "SToT"
        print "--> Merging serial time on task data from %s" % tot_table
        sys.stdout.flush()
        try:
            bqdat = bqutil.get_table_data(dataset, tot_table)
        except Exception as err:
            bqdat = {'data': {}}
        for entry in bqdat['data']:
            course_id = entry['course_id']
            cmci = c_sum_stats[course_id]
            for field, value in entry.items():
                if field=='course_id':
                    continue
                cmci['%s_%s' % (prefix, field)] = value

        # add show_answer stats

        tot_table = "show_answer_stats_by_course"
        prefix = "SAS"
        print "--> Merging show_answer stats data from %s" % tot_table
        sys.stdout.flush()
        try:
            bqdat = bqutil.get_table_data(dataset, tot_table)
        except Exception as err:
            bqdat = {'data': {}}
        for entry in bqdat['data']:
            course_id = entry['course_id']
            cmci = c_sum_stats[course_id]
            for field, value in entry.items():
                if field=='course_id':
                    continue
                cmci['%s_%s' % (prefix, field)] = value

        # setup list of keys, for CSV output

        css_keys = c_sum_stats.values()[0].keys()

        # retrieve course_metainfo table, pivot, add that to summary_stats

        print "--> Merging course_metainfo from %s" % table
        sys.stdout.flush()
        bqdat = bqutil.get_table_data(dataset, table)

        listings_keys = map(make_key, ["Institution", "Semester", "New or Rerun", "Andrew Recodes New/Rerun", 
                                       "Course Number", "Short Title", "Andrew's Short Titles", "Title", 
                                       "Instructors", "Registration Open", "Course Launch", "Course Wrap", "course_id",
                                       "Empirical Course Wrap", "Andrew's Order", "certifies", "MinPassGrade",
                                       '4-way Category by name', "4-way (CS, STEM, HSocSciGov, HumHistRel)"
                                       ])
        listings_keys.reverse()
        
        for lk in listings_keys:
            css_keys.insert(1, "listings_%s" % lk)

        COUNTS_TO_KEEP = ['discussion', 'problem', 'optionresponse', 'checkboxgroup', 'optioninput', 
                          'choiceresponse', 'video', 'choicegroup', 'vertical', 'choice', 'sequential', 
                          'multiplechoiceresponse', 'numericalresponse', 'chapter', 'solution', 'img', 
                          'formulaequationinput', 'responseparam', 'selfassessment', 'track', 'task', 'rubric', 
                          'stringresponse', 'combinedopenended', 'description', 'textline', 'prompt', 'category', 
                          'option', 'lti', 'annotationresponse', 
                          'annotatable', 'colgroup', 'tag_prompt', 'comment', 'annotationinput', 'image', 
                          'options', 'comment_prompt', 'conditional', 
                          'answer', 'poll_question', 'section', 'wrapper', 'map', 'area', 
                          'customtag', 'transcript', 
                          'split_test', 'word_cloud', 
                          'openended', 'openendedparam', 'answer_display', 'code', 
                          'drag_and_drop_input', 'customresponse', 'draggable', 'mentoring', 
                          'textannotation', 'imageannotation', 'videosequence', 
                          'feedbackprompt', 'assessments', 'openassessment', 'assessment', 'explanation', 'criterion']

        for entry in bqdat['data']:
            thekey = make_key(entry['key'])
            # if thekey.startswith('count_') and thekey[6:] not in COUNTS_TO_KEEP:
            #     continue
            if thekey.startswith('listings_') and thekey[9:] not in listings_keys:
                # print "dropping key=%s for course_id=%s" % (thekey, entry['course_id'])
                continue
            c_sum_stats[entry['course_id']][thekey] = entry['value']
            #if 'certifies' in thekey:
            #    print "course_id=%s, key=%s, value=%s" % (entry['course_id'], thekey, entry['value'])
            if thekey not in css_keys:
                css_keys.append(thekey)

        # compute forum_posts_per_week
        for course_id, entry in c_sum_stats.items():
            nfps = entry.get('nforum_posts_sum', 0)
            if nfps:
                fppw = int(nfps) / float(entry['nweeks'])
                entry['nforum_posts_per_week'] = fppw
                print "    course: %s, assessments_per_week=%s, forum_posts_per_week=%s" % (course_id, entry['total_assessments_per_week'], fppw)
            else:
                entry['nforum_posts_per_week'] = None
        css_keys.append('nforum_posts_per_week')

        # read in listings file and merge that in also
        if listings_file:
            if listings_file.endswith('.csv'):
                listings = csv.DictReader(open(listings_file))
            else:
                listings = [ json.loads(x) for x in open(listings_file) ]
            for entry in listings:
                course_id = entry['course_id']
                if course_id not in c_sum_stats:
                    continue
                cmci = c_sum_stats[course_id]
                for field, value in entry.items():
                    lkey = "listings_%s" % make_key(field)
                    if not (lkey in cmci) or (not cmci[lkey]):
                        cmci[lkey] = value

        print "Storing these fields: %s" % css_keys

        # get schema
        mypath = os.path.dirname(os.path.realpath(__file__))
        the_schema = json.loads(open('%s/schemas/schema_combined_course_summary_stats.json' % mypath).read())
        schema_dict = { x['name'] : x for x in the_schema }

        # write out CSV
        css_table = "course_summary_stats"
        ofn = "%s__%s.csv" % (dataset, css_table)
        ofn2 = "%s__%s.json" % (dataset, css_table)
        print "Writing data to %s and %s" % (ofn, ofn2)

        ofp = open(ofn, 'w')
        ofp2 = open(ofn2, 'w')
        dw = csv.DictWriter(ofp, fieldnames=css_keys)
        dw.writeheader()
        for cid, entry in c_sum_stats.items():
            for ek in entry:
                if ek not in schema_dict:
                    entry.pop(ek)
                # entry[ek] = str(entry[ek])	# coerce to be string
            ofp2.write(json.dumps(entry) + "\n")
            for key in css_keys:
                if key not in entry:
                    entry[key] = None
            dw.writerow(entry)
        ofp.close()
        ofp2.close()

        # upload to bigquery
        # the_schema = [ { 'type': 'STRING', 'name': x } for x in css_keys ]
        if 1:
            gsfnp = gspath / dataset / (css_table + ".json")
            gsutil.upload_file_to_gs(ofn2, gsfnp)
            # bqutil.load_data_to_table(dataset, css_table, gsfnp, the_schema, wait=True, verbose=False,
            #                           format='csv', skiprows=1)
            bqutil.load_data_to_table(dataset, css_table, gsfnp, the_schema, wait=True, verbose=False)

        return

    
    print "-"*60 + " %s" % course_id

    # get nweeks from listings
    lfn = path(listings_file)
    if not lfn.exists():
        print "[analyze_content] course listings file %s doesn't exist!" % lfn
        return

    data = None
    if listings_file.endswith('.json'):
        data_feed = map(json.loads, open(lfn))
    else:
        data_feed = csv.DictReader(open(lfn))
    for k in data_feed:
        if not 'course_id' in k:
            print "Strange course listings row, no course_id in %s" % k
            raise Exception("Missing course_id")
        if k['course_id']==course_id:
            data = k
            break

    if not data:
        print "[analyze_content] no entry for %s found in course listings file %s!" % (course_id, lfn)
        return

    def date_parse(field):
        (m, d, y) = map(int, data[field].split('/'))
        return datetime.datetime(y, m, d)

    launch = date_parse('Course Launch')
    wrap = date_parse('Course Wrap')
    ndays = (wrap - launch).days
    nweeks = ndays / 7.0

    print "Course length = %6.2f weeks (%d days)" % (nweeks, ndays)

    if pin_date:
        datedir = pin_date
    course_dir = find_course_sql_dir(course_id, basedir, datedir, use_dataset_latest and not pin_date)
    cfn = gsutil.path_from_course_id(course_id)

    xbfn = course_dir / ("xbundle_%s.xml" % cfn)
    
    if not xbfn.exists():
        print "[analyze_content] cannot find xbundle file %s for %s!" % (xbfn, course_id)

        if use_dataset_latest:
            # try looking in earlier directories for xbundle file
            import glob
            spath = course_dir / ("../*/xbundle_%s.xml" % cfn)
            files = list(glob.glob(spath))
            if files:
                xbfn = path(files[-1])
            if not xbfn.exists():
                print "   --> also cannot find any %s ; aborting!" % spath
            else:
                print "   --> Found and using instead: %s " % xbfn
        if not xbfn.exists():
            raise Exception("[analyze_content] missing xbundle file %s" % xbfn)

    # if there is an xbundle*.fixed file, use that instead of the normal one
    if os.path.exists(str(xbfn) + ".fixed"):
        xbfn = path(str(xbfn) + ".fixed")

    print "[analyze_content] For %s using %s" % (course_id, xbfn)
    
    # get module usage data
    mudata = get_stats_module_usage(course_id, basedir, datedir, use_dataset_latest)

    xml = etree.parse(open(xbfn)).getroot()
    
    counts = defaultdict(int)
    nexcluded = defaultdict(int)

    IGNORE = ['html', 'p', 'div', 'iframe', 'ol', 'li', 'ul', 'blockquote', 'h1', 'em', 'b', 'h2', 'h3', 'body', 'span', 'strong',
              'a', 'sub', 'strike', 'table', 'td', 'tr', 's', 'tbody', 'sup', 'sub', 'strike', 'i', 's', 'pre', 'policy', 'metadata',
              'grading_policy', 'br', 'center',  'wiki', 'course', 'font', 'tt', 'it', 'dl', 'startouttext', 'endouttext', 'h4', 
              'head', 'source', 'dt', 'hr', 'u', 'style', 'dd', 'script', 'th', 'p', 'P', 'TABLE', 'TD', 'small', 'text', 'title']

    problem_stats = defaultdict(int)

    def does_problem_have_random_script(problem):
        '''
        return 1 if problem has a script with "random." in it
        else return 0
        '''
        for elem in problem.findall('.//script'):
            if elem.text and ('random.' in elem.text):
                return 1
        return 0

    # walk through xbundle 
    def walk_tree(elem, policy=None):
        '''
        Walk XML tree recursively.
        elem = current element
        policy = dict of attributes for children to inherit, with fields like due, graded, showanswer
        '''
        policy = policy or {}
        if  type(elem.tag)==str and (elem.tag.lower() not in IGNORE):
            counts[elem.tag.lower()] += 1
        if elem.tag in ["sequential", "problem", "problemset", "course", "chapter"]:	# very old courses may use inheritance from course & chapter
            keys = ["due", "graded", "format", "showanswer", "start"]
            for k in keys:		# copy inheritable attributes, if they are specified
                val = elem.get(k)
                if val:
                    policy[k] = val
        if elem.tag=="problem":	# accumulate statistics about problems: how many have show_answer = [past_due, closed] ?  have random. in script?
            problem_stats['n_capa_problems'] += 1
            if policy.get('showanswer'):
                problem_stats["n_showanswer_%s" % policy.get('showanswer')] += 1
            else:
                problem_stats['n_shownanswer_finished'] += 1	# DEFAULT showanswer = finished  (make sure this remains true)
                # see https://github.com/edx/edx-platform/blob/master/common/lib/xmodule/xmodule/capa_base.py#L118
                # finished = Show the answer after the student has answered the problem correctly, the student has no attempts left, or the problem due date has passed.
            problem_stats['n_random_script'] += does_problem_have_random_script(elem)

            if policy.get('graded')=='true' or policy.get('graded')=='True':
                problem_stats['n_capa_problems_graded'] += 1
                problem_stats['n_graded_random_script'] += does_problem_have_random_script(elem)
                if policy.get('showanswer'):
                    problem_stats["n_graded_showanswer_%s" % policy.get('showanswer')] += 1
                else:
                    problem_stats['n_graded_shownanswer_finished'] += 1	# DEFAULT showanswer = finished  (make sure this remains true)
            
        for k in elem:
            midfrag = (k.tag, k.get('url_name_orig', None))
            if (midfrag in mudata) and int(mudata[midfrag]['ncount']) < 20:
                nexcluded[k.tag] += 1
                if verbose:
                    try:
                        print "    -> excluding %s (%s), ncount=%s" % (k.get('display_name', '<no_display_name>').encode('utf8'), 
                                                                       midfrag, 
                                                                       mudata.get(midfrag, {}).get('ncount'))
                    except Exception as err:
                        print "    -> excluding ", k
                continue
            walk_tree(k, policy.copy())

    walk_tree(xml)
    print "--> Count of individual element tags throughout XML: ", counts
    
    print "--> problem_stats:", json.dumps(problem_stats, indent=4)

    # combine some into "qual_axis" and others into "quant_axis"
    qual_axis = ['openassessment', 'optionresponse', 'multiplechoiceresponse', 
                 # 'discussion', 
                 'choiceresponse', 'word_cloud', 
                 'combinedopenended', 'choiceresponse', 'stringresponse', 'textannotation', 'openended', 'lti']
    quant_axis = ['formularesponse', 'numericalresponse', 'customresponse', 'symbolicresponse', 'coderesponse',
                  'imageresponse']

    nqual = 0
    nquant = 0
    for tag, count in counts.items():
        if tag in qual_axis:
            nqual += count
        if tag in quant_axis:
            nquant += count
    
    print "nqual=%d, nquant=%d" % (nqual, nquant)

    nqual_per_week = nqual / nweeks
    nquant_per_week = nquant / nweeks
    total_per_week = nqual_per_week + nquant_per_week

    print "per week: nqual=%6.2f, nquant=%6.2f total=%6.2f" % (nqual_per_week, nquant_per_week, total_per_week)

    # save this overall data in CCDATA
    lock_file(CCDATA)
    ccdfn = path(CCDATA)
    ccd = {}
    if ccdfn.exists():
        for k in csv.DictReader(open(ccdfn)):
            ccd[k['course_id']] = k
    
    ccd[course_id] = {'course_id': course_id,
                      'nweeks': nweeks,
                      'nqual_per_week': nqual_per_week,
                      'nquant_per_week': nquant_per_week,
                      'total_assessments_per_week' : total_per_week,
                      }

    # fields = ccd[ccd.keys()[0]].keys()
    fields = ['course_id', 'nquant_per_week', 'total_assessments_per_week', 'nqual_per_week', 'nweeks']
    cfp = open(ccdfn, 'w')
    dw = csv.DictWriter(cfp, fieldnames=fields)
    dw.writeheader()
    for cid, entry in ccd.items():
        dw.writerow(entry)
    cfp.close()
    lock_file(CCDATA, release=True)

    # store data in course_metainfo table, which has one (course_id, key, value) on each line
    # keys include nweeks, nqual, nquant, count_* for module types *

    cmfields = OrderedDict()
    cmfields['course_id'] = course_id
    cmfields['course_length_days'] = str(ndays)
    cmfields.update({ make_key('listings_%s' % key) : value for key, value in data.items() })	# from course listings
    cmfields.update(ccd[course_id].copy())

    # cmfields.update({ ('count_%s' % key) : str(value) for key, value in counts.items() })	# from content counts

    cmfields['filename_xbundle'] = xbfn
    cmfields['filename_listings'] = lfn

    for key in sorted(counts):	# store counts in sorted order, so that the later generated CSV file can have a predictable structure
        value = counts[key]
        cmfields['count_%s' % key] =  str(value) 	# from content counts

    for key in sorted(problem_stats):	# store problem stats
        value = problem_stats[key]
        cmfields['problem_stat_%s' % key] =  str(value)

    cmfields.update({ ('nexcluded_sub_20_%s' % key) : str(value) for key, value in nexcluded.items() })	# from content counts

    course_dir = find_course_sql_dir(course_id, basedir, datedir, use_dataset_latest)
    csvfn = course_dir / CMINFO

    # manual overriding of the automatically computed fields can be done by storing course_id,key,value data
    # in the CMINFO_OVERRIDES file

    csvfn_overrides = course_dir / CMINFO_OVERRIDES
    if csvfn_overrides.exists():
        print "--> Loading manual override information from %s" % csvfn_overrides
        for ovent in csv.DictReader(open(csvfn_overrides)):
            if not ovent['course_id']==course_id:
                print "===> ERROR! override file has entry with wrong course_id: %s" % ovent
                continue
            print "    overriding key=%s with value=%s" % (ovent['key'], ovent['value'])
            cmfields[ovent['key']] = ovent['value']

    print "--> Course metainfo writing to %s" % csvfn

    fp = open(csvfn, 'w')

    cdw = csv.DictWriter(fp, fieldnames=['course_id', 'key', 'value'])
    cdw.writeheader()

    for k, v in cmfields.items():
        cdw.writerow({'course_id': course_id, 'key': k, 'value': v})
        
    fp.close()

    # build and output course_listings_and_metainfo 

    dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)

    mypath = os.path.dirname(os.path.realpath(__file__))
    clm_table = "course_listing_and_metainfo"
    clm_schema_file = '%s/schemas/schema_%s.json' % (mypath, clm_table)
    clm_schema = json.loads(open(clm_schema_file).read())

    clm = {}
    for finfo in clm_schema:
        field = finfo['name']
        clm[field] = cmfields.get(field)
    clm_fnb = clm_table + ".json"
    clm_fn = course_dir / clm_fnb
    open(clm_fn, 'w').write(json.dumps(clm))

    gsfnp = gsutil.gs_path_from_course_id(course_id, use_dataset_latest=use_dataset_latest) / clm_fnb
    print "--> Course listing + metainfo uploading to %s then to %s.%s" % (gsfnp, dataset, clm_table)
    sys.stdout.flush()
    gsutil.upload_file_to_gs(clm_fn, gsfnp)
    bqutil.load_data_to_table(dataset, clm_table, gsfnp, clm_schema, wait=True, verbose=False)

    # output course_metainfo

    table = 'course_metainfo'
    dataset = bqutil.course_id2dataset(course_id, use_dataset_latest=use_dataset_latest)

    gsfnp = gsutil.gs_path_from_course_id(course_id, use_dataset_latest=use_dataset_latest) / CMINFO
    print "--> Course metainfo uploading to %s then to %s.%s" % (gsfnp, dataset, table)
    sys.stdout.flush()

    gsutil.upload_file_to_gs(csvfn, gsfnp)

    mypath = os.path.dirname(os.path.realpath(__file__))
    SCHEMA_FILE = '%s/schemas/schema_course_metainfo.json' % mypath
    the_schema = json.loads(open(SCHEMA_FILE).read())[table]

    bqutil.load_data_to_table(dataset, table, gsfnp, the_schema, wait=True, verbose=False, format='csv', skiprows=1)
示例#35
0
 def date_parse(field):
     (m, d, y) = map(int, data[field].split('/'))
     return datetime.datetime(y, m, d)
    for w in windows:

        user50 = """SELECT user_id FROM
        (SELECT user_id, count(*) n FROM [data-science-55:movielens100k.ratings] GROUP BY 1)
        WHERE n>50
        """

        core = """
        SELECT 
            a.user_id user_id, b.movie_id movie_id, b.rating rating, b.timestamp timestamp
        FROM (""" + user50 + """) as a
        LEFT JOIN [data-science-55:movielens100k.ratings] as b
        ON a.user_id = b.user_id
        """

        dGenres = ", ".join(list(map(lambda x: "d." + x + " " + x, genres)))
        core_genres = """
        SELECT c.user_id user_id, c.rating rating, c.timestamp timestamp, """ + dGenres + """ 
        FROM (""" + core + """) as c 
        LEFT JOIN [data-science-55:movielens100k.movies] as d
        ON c.movie_id = d.movie_id
        """

        lag = "SELECT user_id, timestamp, " + genre + ", rating"

        for i in range(1, w + 1):
            lag += ", LAG(" + genre + "," + str(
                i
            ) + ") OVER (PARTITION BY user_id ORDER BY timestamp ASC) as genre_" + str(
                i)
def main():
    """Create the model and start the training."""

    gpu_id_2 = 1
    gpu_id_1 = 0
    
    w, h = map(int, args.input_size.split(','))
    input_size = (w, h)

    w, h = map(int, args.input_size_target.split(','))
    input_size_target = (w, h)

    cudnn.enabled = True
    # gpu = args.gpu

    # Create network
    if args.model == 'DeepLab':
        model = DeeplabMulti(num_classes=args.num_classes)
        if args.restore_from[:4] == 'http' :
            print("from url")
            saved_state_dict = model_zoo.load_url(args.restore_from)
        else:
            print("from restore")
            saved_state_dict = torch.load(args.restore_from)
            #saved_state_dict = torch.load('/home/zhangjunyi/hjy_code/AdaptSegNet/snapshots/GTA2Cityscapes_multi_result1/GTA5_60000.pth')
            #model.load_state_dict(saved_state_dict)

        new_params = model.state_dict().copy()
        for i in saved_state_dict:
            # Scale.layer5.conv2d_list.3.weight
            i_parts = i.split('.')
            # print i_parts
            if not args.num_classes == 19 or not i_parts[1] == 'layer5':
                new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
                # print i_parts
        # model.load_state_dict(new_params)

    model.train()
    model.cuda(gpu_id_2)

    cudnn.benchmark = True

    # init D
    model_D1 = FCDiscriminator(num_classes=args.num_classes)
    #model_D2 = model_D1
    model_D2 = FCDiscriminator(num_classes=args.num_classes)

    model_D1.train()
    model_D1.cuda(gpu_id_1)

    model_D2.train()
    model_D2.cuda(gpu_id_1)

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    trainloader = data.DataLoader(
        GTA5DataSet(args.data_dir, args.data_list, max_iters=args.num_steps * args.iter_size * args.batch_size,
                    crop_size=input_size,
                    scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN),
        batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)

    trainloader_iter = enumerate(trainloader)
    _, batch_last = trainloader_iter.__next__()

    targetloader = data.DataLoader(cityscapesDataSet(args.data_dir_target, args.data_list_target,
                                                     max_iters=args.num_steps * args.iter_size * args.batch_size,
                                                     crop_size=input_size_target,
                                                     scale=False, mirror=args.random_mirror, mean=IMG_MEAN,
                                                     set=args.set),
                                   batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers,
                                   pin_memory=True)
    # print(args.num_steps * args.iter_size * args.batch_size, trainloader.__len__())

    targetloader_iter = enumerate(targetloader)
    _, batch_last_target = targetloader_iter.__next__()

    # for i in range(200):
    #     _, batch = targetloader_iter.____next____()
    # exit()

    # implement model.optim_parameters(args) to handle different models' lr setting

    optimizer = optim.SGD(model.optim_parameters(args),
                          lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
    optimizer.zero_grad()

    optimizer_D1 = optim.Adam(model_D1.parameters(), lr=args.learning_rate_D, betas=(0.9, 0.99))
    optimizer_D1.zero_grad()

    optimizer_D2 = optim.Adam(model_D2.parameters(), lr=args.learning_rate_D, betas=(0.9, 0.99))
    optimizer_D2.zero_grad()

    bce_loss = torch.nn.MSELoss()

    def upsample_(input_):
        return nn.functional.interpolate(input_, size=(input_size[1], input_size[0]), mode='bilinear', align_corners=False)

    def upsample_target(input_):
        return nn.functional.interpolate(input_, size=(input_size_target[1], input_size_target[0]), mode='bilinear', align_corners=False)

    interp = upsample_
    interp_target = upsample_target

    # labels for adversarial training
    source_label = 1
    target_label = -1
    mix_label = 0

    for i_iter in range(args.num_steps):

        loss_seg_value1 = 0
        loss_adv_target_value1 = 0
        loss_D_value1 = 0
        
        number1 = 0
        number2 = 0

        loss_seg_value2 = 0
        loss_adv_target_value2 = 0
        loss_D_value2 = 0

        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)

        optimizer_D1.zero_grad()
        optimizer_D2.zero_grad()
        adjust_learning_rate_D(optimizer_D1, i_iter)
        adjust_learning_rate_D(optimizer_D2, i_iter)
        
        for sub_i in range(args.iter_size):

            # train G

            # don't accumulate grads in D
            for param in model_D1.parameters():
                param.requires_grad = False

            for param in model_D2.parameters():
                param.requires_grad = False

            def result_model(batch, interp_):
                images, labels, _, name = batch
                images = Variable(images).cuda(gpu_id_2)
                labels = Variable(labels.long()).cuda(gpu_id_1)
                pred1, pred2 = model(images)
                pred1 = interp_(pred1)
                pred2 = interp_(pred2)
                pred1_ = pred1.cuda(gpu_id_1)
                pred2_ = pred2.cuda(gpu_id_1)
                return pred1_, pred2_, labels

            beta = 0.95
            # train with source
            # _, batch = trainloader_iter.__next__()
            _, batch = trainloader_iter.__next__()
            _, batch_target = targetloader_iter.__next__()
            pred1, pred2, labels = result_model(batch, interp)
            loss_seg1, new_labels = loss_calc(pred1, labels, gpu_id_1, beta)
            labels = new_labels
            number1 = torch.sum(labels==255).item()
            loss_seg2, new_labels = loss_calc(pred2, labels, gpu_id_1, beta)
            loss = loss_seg2 + args.lambda_seg * loss_seg1
            loss = loss / args.iter_size
            loss.backward()
            loss_seg_1 = loss_seg1.data.cpu().numpy() / args.iter_size
            loss_seg_2 = loss_seg2.data.cpu().numpy() / args.iter_size
            # print(loss_seg_1, loss_seg_2)
            
            pred1, pred2, labels = result_model(batch_target, interp_target)
            loss_seg1, new_labels = loss_calc(pred1, labels, gpu_id_1)
            labels = new_labels
            number2 = torch.sum(labels==255).item()
            loss_seg2, new_lables = loss_calc(pred2, labels, gpu_id_1)
            loss = loss_seg2 + args.lambda_seg * loss_seg1
            loss = loss / args.iter_size
            loss.backward()
            
            loss_seg_value1 += loss_seg1.data.cpu().numpy() / args.iter_size
            loss_seg_value2 += loss_seg2.data.cpu().numpy() / args.iter_size
            
            pred1_last_target, pred2_last_target, labels_last_target = result_model(batch_last_target, interp_target)
            pred1_target, pred2_target, labels_target = result_model(batch_target, interp_target)
            
            pred1_target_D = F.softmax((pred1_target), dim=1)
            pred2_target_D = F.softmax((pred2_target), dim=1)
            pred1_last_target_D = F.softmax((pred1_last_target), dim=1)
            pred2_last_target_D = F.softmax((pred2_last_target), dim=1)
            fake1_D = torch.cat((pred1_target_D, pred1_last_target_D), dim=1)
            fake2_D = torch.cat((pred2_target_D, pred2_last_target_D), dim=1)
            D_out_fake_1 = model_D1(fake1_D)
            D_out_fake_2 = model_D2(fake2_D)

            loss_adv_fake1 = bce_loss(D_out_fake_1,
                                       Variable(torch.FloatTensor(D_out_fake_1.data.size()).fill_(source_label)).cuda(
                                            gpu_id_1))

            loss_adv_fake2 = bce_loss(D_out_fake_2,
                                        Variable(torch.FloatTensor(D_out_fake_2.data.size()).fill_(source_label)).cuda(
                                            gpu_id_1))
                                            
            loss_adv_target1 = loss_adv_fake1
            loss_adv_target2 = loss_adv_fake2
            loss = args.lambda_adv_target1 * loss_adv_target1.cuda(gpu_id_1) + args.lambda_adv_target2 * loss_adv_target2.cuda(gpu_id_1)
            loss = loss / args.iter_size
            loss.backward()
            
            pred1, pred2, labels = result_model(batch, interp)
            pred1_target, pred2_target, labels_target = result_model(batch_target, interp_target)
            
            pred1_target_D = F.softmax((pred1_target), dim=1)
            pred2_target_D = F.softmax((pred2_target), dim=1)
            pred1_D = F.softmax((pred1), dim=1)
            pred2_D = F.softmax((pred2), dim=1)
            mix1_D = torch.cat((pred1_target_D, pred1_D), dim=1)
            mix2_D = torch.cat((pred2_target_D, pred2_D), dim=1)


            D_out_mix_1 = model_D1(mix1_D)
            D_out_mix_2 = model_D2(mix2_D)
            
            # D_out1 = D_out1.cuda(gpu_id_1)
            # D_out2 = D_out2.cuda(gpu_id_1)

            
            loss_adv_mix1 = bce_loss(D_out_mix_1,
                                       Variable(torch.FloatTensor(D_out_mix_1.data.size()).fill_(source_label)).cuda(
                                            gpu_id_1))

            loss_adv_mix2 = bce_loss(D_out_mix_2,
                                        Variable(torch.FloatTensor(D_out_mix_2.data.size()).fill_(source_label)).cuda(
                                            gpu_id_1))

            loss_adv_target1 = loss_adv_mix1*2
            loss_adv_target2 = loss_adv_mix2*2

            loss = args.lambda_adv_target1 * loss_adv_target1.cuda(gpu_id_1) + args.lambda_adv_target2 * loss_adv_target2.cuda(gpu_id_1)
            loss = loss / args.iter_size
            loss.backward()
            loss_adv_target_value1 += loss_adv_target1.data.cpu().numpy() / args.iter_size
            loss_adv_target_value2 += loss_adv_target2.data.cpu().numpy() / args.iter_size
            
            # train D

            # bring back requires_grad
            for param in model_D1.parameters():
                param.requires_grad = True

            for param in model_D2.parameters():
                param.requires_grad = True

            pred1_last, pred2_last, labels_last = result_model(batch_last, interp)

            # train with source
            
            pred1 = pred1.detach().cuda(gpu_id_1)
            pred2 = pred2.detach().cuda(gpu_id_1)
            pred1_target = pred1_target.detach().cuda(gpu_id_1)
            pred2_target = pred2_target.detach().cuda(gpu_id_1)
            pred1_last = pred1_last.detach().cuda(gpu_id_1)
            pred2_last = pred2_last.detach().cuda(gpu_id_1)
            pred1_D = F.softmax((pred1), dim=1)
            pred2_D = F.softmax((pred2), dim=1)
            pred1_last_D = F.softmax((pred1_last), dim=1)
            pred2_last_D = F.softmax((pred2_last), dim=1)
            pred1_target_D = F.softmax((pred1_target), dim=1)
            pred2_target_D = F.softmax((pred2_target), dim=1)

            real1_D = torch.cat((pred1_D, pred1_last_D), dim=1)
            real2_D = torch.cat((pred2_D, pred2_last_D), dim=1)
            mix1_D_ = torch.cat((pred1_last_D, pred1_target_D), dim=1)
            mix2_D_ = torch.cat((pred2_last_D, pred2_target_D), dim=1)

            D_out1_real = model_D1(real1_D)
            D_out2_real = model_D2(real2_D)
            D_out1_mix = model_D1(mix1_D_)
            D_out2_mix = model_D2(mix2_D_)

            # D_out1 = D_out1.cuda(gpu_id_1)
            # D_out2 = D_out2.cuda(gpu_id_1)

            loss_D1 = bce_loss(D_out1_real,
                              Variable(torch.FloatTensor(D_out1_real.data.size()).fill_(source_label)).cuda(gpu_id_1))

            loss_D2 = bce_loss(D_out2_real,
                               Variable(torch.FloatTensor(D_out2_real.data.size()).fill_(source_label)).cuda(gpu_id_1))

            loss_D3 = bce_loss(D_out1_mix,
                              Variable(torch.FloatTensor(D_out1_mix.data.size()).fill_(mix_label)).cuda(gpu_id_1))

            loss_D4 = bce_loss(D_out2_mix,
                               Variable(torch.FloatTensor(D_out2_mix.data.size()).fill_(mix_label)).cuda(gpu_id_1))

            loss_D1 = (loss_D1 + loss_D3) / args.iter_size / 2
            loss_D2 = (loss_D2 + loss_D4) / args.iter_size / 2

            loss_D1.backward()
            loss_D2.backward()

            loss_D_value1 += loss_D1.data.cpu().numpy()
            loss_D_value2 += loss_D2.data.cpu().numpy()

            # train with target

            pred1 = pred1.detach().cuda(gpu_id_1)
            pred2 = pred2.detach().cuda(gpu_id_1)
            pred1_target = pred1_target.detach().cuda(gpu_id_1)
            pred2_target = pred2_target.detach().cuda(gpu_id_1)
            pred1_last_target = pred1_last_target.detach().cuda(gpu_id_1)
            pred2_last_target = pred2_last_target.detach().cuda(gpu_id_1)

            pred1_D = F.softmax((pred1), dim=1)
            pred2_D = F.softmax((pred2), dim=1)
            pred1_last_target_D = F.softmax((pred1_last_target), dim=1)
            pred2_last_target_D = F.softmax((pred2_last_target), dim=1)
            pred1_target_D = F.softmax((pred1_target), dim=1)
            pred2_target_D = F.softmax((pred2_target), dim=1)

            fake1_D_ = torch.cat((pred1_target_D, pred1_target_D), dim=1)
            fake2_D_ = torch.cat((pred2_target_D, pred2_target_D), dim=1)
            mix1_D__ = torch.cat((pred1_D, pred1_last_target_D), dim=1)
            mix2_D__ = torch.cat((pred2_D, pred2_last_target_D), dim=1)

            # pred_target1 = pred_target1.detach().cuda(gpu_id_1)
            # pred_target2 = pred_target2.detach().cuda(gpu_id_1)

            D_out1 = model_D1(fake1_D_)
            D_out2 = model_D2(fake2_D_)
            D_out3 = model_D1(mix1_D__)
            D_out4 = model_D2(mix2_D__)

           
            # D_out1 = D_out1.cuda(gpu_id_1)
            # D_out2 = D_out2.cuda(gpu_id_1)

            loss_D1 = bce_loss(D_out1,
                              Variable(torch.FloatTensor(D_out1.data.size()).fill_(target_label)).cuda(gpu_id_1))

            loss_D2 = bce_loss(D_out2,
                               Variable(torch.FloatTensor(D_out2.data.size()).fill_(target_label)).cuda(gpu_id_1))
            
            loss_D3 = bce_loss(D_out3,
                              Variable(torch.FloatTensor(D_out3.data.size()).fill_(mix_label)).cuda(gpu_id_1))

            loss_D4 = bce_loss(D_out4,
                               Variable(torch.FloatTensor(D_out4.data.size()).fill_(mix_label)).cuda(gpu_id_1))

            loss_D1 = (loss_D1+loss_D3) / args.iter_size / 2
            loss_D2 = (loss_D2+loss_D4) / args.iter_size / 2

            loss_D1.backward()
            loss_D2.backward()

            batch_last, batch_last_target = batch, batch_target
            loss_D_value1 += loss_D1.data.cpu().numpy()
            loss_D_value2 += loss_D2.data.cpu().numpy()

        optimizer.step()
        optimizer_D1.step()
        optimizer_D2.step()

        print('exp = {}'.format(args.snapshot_dir))
        print(
        'iter = {0:8d}/{1:8d}, loss_seg1 = {2:.3f} loss_seg2 = {3:.3f} loss_adv1 = {4:.3f}, loss_adv2 = {5:.3f} loss_D1 = {6:.3f} loss_D2 = {7:.3f}, number1 = {8}, number2 = {9}'.format(
            i_iter, args.num_steps, loss_seg_value1, loss_seg_value2, loss_adv_target_value1, loss_adv_target_value2, loss_D_value1, loss_D_value2, number1, number2))

        if i_iter >= args.num_steps_stop - 1:
            print ('save model ...')
            torch.save(model.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(args.num_steps_stop) + '.pth'))
            torch.save(model_D1.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(args.num_steps_stop) + '_D1.pth'))
            torch.save(model_D2.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(args.num_steps_stop) + '_D2.pth'))
            break

        if i_iter % args.save_pred_every == 0:
            print ('taking snapshot ...')
            torch.save(model.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '.pth'))
            torch.save(model_D1.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '_D1.pth'))
            torch.save(model_D2.state_dict(), osp.join(args.snapshot_dir, 'GTA5_' + str(i_iter) + '_D2.pth'))
示例#38
0
def getKSubset(items, k): #get all k sized subsets of items (given in parameter items)
    return map(set, itertools.combinations(items, k))
示例#39
0

QCoreApplication.setOrganizationName("QGIS")
QCoreApplication.setOrganizationDomain("qgis.org")
QCoreApplication.setApplicationName("QGIS3")

s = QSettings()

ba = bytes(s.value("/UI/geometry"))

f = open("src/app/ui_defaults.h", "w")

f.write("#ifndef UI_DEFAULTS_H\n#define UI_DEFAULTS_H\n\nstatic const unsigned char defaultUIgeometry[] =\n{\n")

for chunk in chunks(ba, 16):
    f.write("  %s,\n" % ", ".join(map(lambda x: "0x%02x" % x, chunk)))

f.write("};\n\nstatic const unsigned char defaultUIstate[] =\n{\n")

ba = bytes(s.value("/UI/state"))

for chunk in chunks(ba, 16):
    f.write("  %s,\n" % ", ".join(map(lambda x: "0x%02x" % x, chunk)))

try:
    ba = bytes(s.value("/Composer/geometry"))

    f.write("};\n\nstatic const unsigned char defaultComposerUIgeometry[] =\n{\n")

    for chunk in chunks(ba, 16):
        f.write("  %s,\n" % ", ".join(map(lambda x: "0x%02x" % x, chunk)))
示例#40
0
文件: a.py 项目: kirdmiv/Code
k, n, s, p = map(int, input().split())
print(((n + s - 1) // s * k + p - 1) // p)
示例#41
0
文件: 1149.py 项目: khj68/workspace
n = int(input())


dp = [[-1]*3 for _ in range(n)]
cost = []
for i in range(n) :
    cost.append(list(map(int, input().split())))


# print(cost)

# print(dp)


for i in range(n) :
  if i == 0 :
    dp[i][0] = cost[i][0]
    dp[i][1] = cost[i][1]
    dp[i][2] = cost[i][2]
    continue

  dp[i][0] = cost[i][0] + min(dp[i-1][1], dp[i-1][2])
  dp[i][1] = cost[i][1] + min(dp[i-1][0], dp[i-1][2])
  dp[i][2] = cost[i][2] + min(dp[i-1][0], dp[i-1][1])

print(min(dp[i][0], dp[i][1], dp[i][2]))
  
示例#42
0
from math import pi
T = int(input())
for _ in range(T):
	R, C = map(int, input().split())
	h = R - (R**2 - ((C**2)/4))**0.5
	A = pi*(R-h)**2
	print(A)
示例#43
0
 def scoreboard(self):
     inning_scores = ' | '.join(list(map(str, self.scores)))
     return ' | '.join(
         [self.__short_name, inning_scores,
          str(self.total_score)]) + ' | '
示例#44
0
 def formatted_id(self):
     if isinstance(self.id, basestring):
         return self.id
     return self.separator.join(map(unicode, self.id))
	for i in range(len(list_start)):
		length_list.append([list_start[i],list_end[i]])

	for elem in length_list:
		if elem not in length_list_nodup:
			length_list_nodup.append(elem)

	length_list = length_list_nodup

	if(len(list_start) == 1):
		out1.write(end_output[k1]['chr'] + "\t" + str(list_start[-1]) + "\t" + str(list_end[-1]) + "\t" + str(k1) + "\t" + end_output[k1]['strand'] + "\t" + "single_0" + "\n")
		out2.write(end_output[k1]['chr'] + "\t" + str(list_start[-1]) + "\t" + str(list_end[-1]) + "\t" + str(k1) + "\t" + end_output[k1]['strand'] + "\t" + "single_0" + "\n")
		#print(k1 + "\t" + "single_0")
		continue

	if(all(map(lambda x: x == list_start[0], list_start))):
		end_output[k1]['result'] = "Tandem UTRs"
	else:
		end_output[k1]['result'] = "Alternative Last Exons"
	
	#print(k1 + "\t" + end_output[k1]['result'])
	

	out1.write(end_output[k1]['chr'] + "\t" + str(list_start[-1]) + "\t" + str(list_end[-1]) + "\t" + str(k1)+";")
	for elem in length_list:
		out1.write(str(elem[0]) + "," + str(elem[1]) + ";")
	out1.write("\t" + end_output[k1]['strand'] + "\t" + end_output[k1]['result'] + "\n")
	out2.write(end_output[k1]['chr'] + "\t" + str(list_start[-1]) + "\t" + str(list_end[-1]) + "\t" + str(k1) + "\t" + end_output[k1]['strand'] + "\t" + end_output[k1]['result'] + "\n")

out1.close()
out2.close()
示例#46
0
 def __create_teams(self):
     return list(map(Team.create, self.__TEAM_NAMES))
示例#47
0
文件: generator.py 项目: ceolson/flu
def headstem_parser(string):
    head,stem = map(int,string.split(','))
    return head,stem
示例#48
0
k, l = map(int, input().split())
students = []

for i in range(l):
    students.append(input())

filt = set(students)

last = []
for i in range(l - 1, -1, -1):
    if not {students[i]}.issubset(filt):
        last.append(students[i])

print(students)
print(last)
for i in range(len(last) - 1, len(last) - k - 1, -1):
    print(last[i])

#
# for i in range(l):
#     temp = int(input())
#     for j in range(len(students)):
#         if students[j] == temp:
#             del students[j]
#             break
#     students.append(temp)
#     if len(students) > k:
#         break
#
# for i in range(k):
示例#49
0
def url_list(repo_urls):
    if IS_USER:
        repo_urls = [
            ensure_ssh_url(url) if can_push(url) else url for url in repo_urls
        ]
    return map(unixpath, repo_urls)
示例#50
0
文件: 16.py 项目: Hopw06/Python
# Question: Use a list comprehension to square each odd number in a list. The list is input by a sequence of comma-separated numbers. Suppose the following input is supplied to the program: 1,2,3,4,5,6,7,8,9 Then, the output should be: 1,3,5,7,9
inps = map(int, input().split(','))
print([x**2 for x in inps if x & 1 == 1])
示例#51
0
#ma1= ma.masked_where(ind!=8.0,ma1)
#ma2= ma.masked_where(ind!=8.0,ma2)

cmap = plt.cm.YlGn
#bounds=[-0.1,0.0,0.01,0.05,0.1,0.5,1,2,3,4,5,6]
bounds=[0.0,20,40,60,80,100,120,140,160,180,200,220]

norm = colors.BoundaryNorm(bounds, cmap.N)

fig = plt.figure(figsize=(20,10))

ax1 = fig.add_subplot(222)
#ax1.set_title("Maize ISAM (t grains / ha cropland)",fontsize=20)
map = Basemap(projection ='cyl', llcrnrlat=-62, urcrnrlat=90,llcrnrlon=-180, urcrnrlon=180, resolution='c')
#map = Basemap(projection ='cyl', llcrnrlat=-15, urcrnrlat=40,llcrnrlon=55, urcrnrlon=145, resolution='c')
x,y = map(lona11,lat)

ma1= ma.masked_where(ma1<=0.0,ma1)

map.drawcoastlines()
map.drawcountries()
map.drawmapboundary()
#ncvar_y=maskoceans(x1,y1,ncvar_y)
gk=(1/meareaisam*gridarea/10000)
a1=ma1/gridarea*10000
cs1 = map.pcolormesh(x,y,ma1,cmap=cmap,norm=norm)
#cs1 = map.pcolormesh(x,y,ma1,cmap=plt.cm.gist_earth,vmin=0,vmax=10)

plt.axis('off')
cbar = map.colorbar(cs1,location='bottom',size="5%",pad="2%",ticks=bounds)
cbar.ax.tick_params(labelsize=16)
示例#52
0
import sys

N = int(sys.stdin.readline())
numbers = []

for _ in range(N):
    numbers.append(list(map(int, sys.stdin.readline().split())))
    numbers.reverse()

numbers.sort(key=lambda x:(x[1], x[0]))

for i in numbers:
    print("%d %d" %(i[0], i[1]))
for t in range(1, int(input()) + 1):
    W, H, N = map(int, input().split())
    road = []
    for n in range(N):
        x, y = map(int, input().split())
        road.append([y, x])

    cnt = 0
    for i in range(N - 1):
        # 둘 다 증가 혹은 둘 다 감소할 때는 y,x 차 중 큰 값만 더 해준다.
        pre_y, next_y = road[i][0], road[i + 1][0]
        pre_x, next_x = road[i][1], road[i + 1][1]
        if (pre_y < next_y and pre_x < next_x) or (pre_y > next_y
                                                   and pre_x > next_x):
            my, mx = abs(next_y - pre_y), abs(next_x - pre_x)
            if my >= mx:
                cnt += my
            else:
                cnt += mx
        # 한쪽만 증가 혹은 감소를 할 경우는 각각의 차를 더해준다.
        else:
            my, mx = abs(next_y - pre_y), abs(next_x - pre_x)
            cnt += my + mx

    print('#{} {}'.format(t, cnt))
示例#54
0
def repo_list(repo_urls, checkout_dir):
    repo_dirs = get_repo_dirs(repo_urls, checkout_dir)
    repo_dirs = map(unixpath, repo_dirs)
    return repo_urls, repo_dirs
示例#55
0
def main():
    random.seed(64)

    # create an initial population of 300 individuals (where
    # each individual is a list of integers)
    pop = toolbox.population(n=300)

    # CXPB  is the probability with which two individuals
    #       are crossed
    #
    # MUTPB is the probability for mutating an individual
    #
    # NGEN  is the number of generations for which the
    #       evolution runs
    CXPB, MUTPB, NGEN = 0.5, 0.2, 40

    print("Start of evolution")

    # Evaluate the entire population
    fitnesses = list(map(toolbox.evaluate, pop))
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit

    print("  Evaluated %i individuals" % len(pop))

    # Begin the evolution
    for g in range(NGEN):
        print("-- Generation %i --" % g)

        # Select the next generation individuals
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals
        offspring = list(map(toolbox.clone, offspring))

        # Apply crossover and mutation on the offspring
        for child1, child2 in zip(offspring[::2], offspring[1::2]):

            # cross two individuals with probability CXPB
            if random.random() < CXPB:
                toolbox.mate(child1, child2)

                # fitness values of the children
                # must be recalculated later
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:

            # mutate an individual with probability MUTPB
            if random.random() < MUTPB:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals with an invalid fitness
        invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
        fitnesses = map(toolbox.evaluate, invalid_ind)
        for ind, fit in zip(invalid_ind, fitnesses):
            ind.fitness.values = fit

        print("  Evaluated %i individuals" % len(invalid_ind))

        # The population is entirely replaced by the offspring
        pop[:] = offspring

        # Gather all the fitnesses in one list and print the stats
        fits = [ind.fitness.values[0] for ind in pop]

        length = len(pop)
        mean = sum(fits) / length
        sum2 = sum(x * x for x in fits)
        std = abs(sum2 / length - mean**2)**0.5

        print("  Min %s" % min(fits))
        print("  Max %s" % max(fits))
        print("  Avg %s" % mean)
        print("  Std %s" % std)

    print("-- End of (successful) evolution --")

    best_ind = tools.selBest(pop, 1)[0]
    print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
示例#56
0
文件: core.py 项目: zonca/astropy
def strip_units(*arrs):
    strip = lambda a: None if a is None else np.asarray(a)
    if len(arrs) == 1:
        return strip(arrs[0])
    else:
        return map(strip, arrs)
示例#57
0
#람다식: 함수의 형태를 더욱 짧게 쓸 수 있도록 해주는 문법
#map(): 다수의 원소에 대한 함수의 결과를 한 번에 얻을 수 있도록 도와줌
'''
add= lambda x, y: x+y
print(add(1, 2))
'''

list1=[1,2,3,4,5]
list2=[6,7,8,9,10]
my_function=lambda a, b: a*b
result=map(my_function, list1, list2) #같은 인덱스 자리에 있는 것끼리 더하고 빼고 할 수 있음
print(list(result))
示例#58
0
#!usr/bin/env python

import sys

TABLE_CRAZY = (
    (1, 0, 0),
    (1, 0, 2),
    (2, 2, 1)
)

ENCRYPT = list(map(ord,
              '5z]&gqtyfr$(we4{WP)H-Zn,[%\\3dL+Q;>U!pJS72FhOA1CB'\
              '6v^=I_0/8|jsb9m<.TVac`uY*MK\'X~xDl}REokN:#?G\"i@'))

OPS_VALID = (4, 5, 23, 39, 40, 62, 68, 81)

POW9, POW10 = 3**9, 3**10

# --------------------------------------------------

def rotate(n):
    return POW9*(n%3) + n/3

def crazy(a, b):
    result = 0
    d = 1

    for i in range(10):
        result += TABLE_CRAZY[int((b/d)%3)][int((a/d)%3)] * d
        d *= 3
示例#59
0
tot = 0
n = float(input())
m = int(input())
for _ in range(m):
  l, w = map(float, input().split())
  tot += l * w * n
  print(tot)




示例#60
0
dy = [1, 0, -1, 0]


def dfs(x, y, ground, N, M):
    ground[x][y] = False

    for i in range(4):
        nx = x + dx[i]
        ny = y + dy[i]

        if 0 <= nx < M and 0 <= ny < N and ground[nx][ny]:
            dfs(nx, ny, ground, N, M)


for _ in range(int(input())):
    result = 0
    M, N, K = map(int, input().split())
    ground = [[False] * N for _ in range(M)]

    for _ in range(K):
        X, Y = map(int, input().split())
        ground[X][Y] = True

    for i in range(M):
        for j in range(N):
            if ground[i][j]:
                dfs(i, j, ground, N, M)
                result += 1

    print(result)