示例#1
0
def lollipopController(name, color = 0, size = [1, 1, 1], lockAttr = ['tr', 'ro', 'sc', 'vi'], lock = True):

        #cmds.createNode('transform', n =  '%s_grp' %name)
        cmds.curve(n = name, d = 1, p = [ (0, 0, 0), (0, 3, 0),(0, 4, 1),(0, 5, 0), (0, 4, -1),(0, 3, 0),(0, 4, 1),(0, 4, -1)], k = [1,2,3,4,5,6,7,8])
        cmds.scale(size[0],size[1],size[2]) 
        for attr in lockAttr:
                if attr == 'tr':
                        cmds.setAttr('%s.translateX' %name, lock = lock, keyable = False, channelBox = False)
                        cmds.setAttr('%s.translateY' %name, lock = lock, keyable = False, channelBox = False)
                        cmds.setAttr('%s.translateZ' %name, lock = lock, keyable = False, channelBox = False)
                elif attr == 'ro':
                        cmds.setAttr('%s.rotateX' %name, lock = lock, keyable = False, channelBox = False)
                        cmds.setAttr('%s.rotateY' %name, lock = lock, keyable = False, channelBox = False)
                        cmds.setAttr('%s.rotateZ' %name, lock = lock, keyable = False, channelBox = False)
                elif attr == 'sc':
                        cmds.setAttr('%s.scaleX' %name, lock = lock, keyable = False, channelBox = False)
                        cmds.setAttr('%s.scaleY' %name, lock = lock, keyable = False, channelBox = False)
                        cmds.setAttr('%s.scaleZ' %name, lock = lock, keyable = False, channelBox = False)
                elif attr == 'vi':
                        cmds.setAttr('%s.visibility' %name, lock = True, keyable = False, channelBox = False)
                
        shapeNode = cmds.listRelatives(name)
                
        cmds.setAttr('%s.overrideEnabled' %shapeNode[0], 1)
        cmds.setAttr('%s.overrideColor' %shapeNode[0], color)

        util.group(name, '%s_grp' %name)
        
        return name
示例#2
0
def filter_train_inverses(train_graph, dset):

    inverse = {
        '_member_of_domain_topic': '_synset_domain_topic_of',
        '_member_meronym': '_member_holonym',
        '_derivationally_related_form': '_derivationally_related_form',
        '_member_of_domain_region': '_synset_domain_region_of',
        '_hypernym': '_hyponym',
        '_member_holonym': '_member_meronym',
        '_instance_hypernym': '_instance_hyponym',
        '_synset_domain_topic_of': '_member_of_domain_topic',
        '_hyponym': '_hypernym',
        '_instance_hyponym': '_instance_hypernym',
        '_synset_domain_usage_of': '_member_of_domain_usage',
        '_has_part': '_part_of',
        '_part_of': '_has_part',
        '_synset_domain_region_of': '_member_of_domain_region',
    }

    def has_train_inverse(pq):
        s, r, t = pq.s, pq.r[0], pq.t
        if r not in inverse:
            return False

        try:
            return s in train_graph.neighbors[t][inverse[r]]
        except KeyError:
            return False

    grouped = util.group(dset, has_train_inverse)
    easy, hard = grouped[True], grouped[False]

    return easy, hard
def filter_train_inverses(train_graph, dset):

    inverse = {
        '_member_of_domain_topic': '_synset_domain_topic_of',
        '_member_meronym': '_member_holonym',
        '_derivationally_related_form': '_derivationally_related_form',
        '_member_of_domain_region': '_synset_domain_region_of',
        '_hypernym': '_hyponym',
        '_member_holonym': '_member_meronym',
        '_instance_hypernym': '_instance_hyponym',
        '_synset_domain_topic_of': '_member_of_domain_topic',
        '_hyponym': '_hypernym',
        '_instance_hyponym': '_instance_hypernym',
        '_synset_domain_usage_of': '_member_of_domain_usage',
        '_has_part': '_part_of',
        '_part_of': '_has_part',
        '_synset_domain_region_of': '_member_of_domain_region',
    }

    def has_train_inverse(pq):
        s, r, t = pq.s, pq.r[0], pq.t
        if r not in inverse:
            return False

        try:
            return s in train_graph.neighbors[t][inverse[r]]
        except KeyError:
            return False

    grouped = util.group(dset, has_train_inverse)
    easy, hard = grouped[True], grouped[False]

    return easy, hard
示例#4
0
def flattenDependencyList(dependencies):
	flatlist = []
	for file in dependencies.keys():
		if dependencies[file]:
			filename = os.path.splitext(os.path.split(file)[1])[0]
			deplist = sorted(dependencies[file], key=str.lower)
			for depgroup in group(deplist, DEPENDENCIES_PER_LINE):
				flatlist.append("$(O)\\%s.obj: %s" % (filename, " ".join(depgroup)))
	return flatlist
示例#5
0
def flattenDependencyList(dependencies):
    flatlist = []
    for file in dependencies.keys():
        if dependencies[file]:
            filename = os.path.splitext(os.path.split(file)[1])[0]
            deplist = sorted(dependencies[file], key=str.lower)
            for depgroup in group(deplist, DEPENDENCIES_PER_LINE):
                flatlist.append("$(O)\\%s.obj: %s" % (filename, " ".join(depgroup)))
    return flatlist
def flattenDependencyList(dependencies):
	flatlist = []
	for file in dependencies.keys():
		if dependencies[file]:
			opath = getObjectPath(file)
			filename = os.path.splitext(os.path.split(file)[1])[0]
			# TODO: normalizing paths already in prependPath makes getObjectPath fail under cygwin
			deplist = sorted(dependencies[file], key=lambda s: str.lower(s.replace("/", "\\")))
			for depgroup in group(deplist, DEPENDENCIES_PER_LINE):
				flatlist.append("%s\\%s.obj: %s" % (opath, filename, " ".join(depgroup)))
	return flatlist
示例#7
0
def flattenDependencyList(dependencies):
	flatlist = []
	for file in dependencies.keys():
		if dependencies[file]:
			opath = getObjectPath(file)
			filename = os.path.splitext(os.path.split(file)[1])[0]
			# TODO: normalizing paths already in prependPath makes getObjectPath fail under cygwin
			deplist = sorted(dependencies[file], key=lambda s: str.lower(s.replace("/", "\\")))
			for depgroup in group(deplist, DEPENDENCIES_PER_LINE):
				flatlist.append("%s\\%s.obj: %s" % (opath, filename, " ".join(depgroup)))
	return flatlist
示例#8
0
def compute_best_thresholds(examples, debug=False):
    # per-relation thresholds
    ex_by_rel = util.group(examples, lambda q: q.r[0])
    thresholds = {}
    for relation, examples_r in util.verboserate(ex_by_rel.items()):
        if debug:
            print relation
        scores = [ex.score for ex in examples_r]
        labels = [ex.label for ex in examples_r]
        thresholds[relation] = util.best_threshold(scores, labels, debug)

    return thresholds
def compute_best_thresholds(examples, debug=False):
    # per-relation thresholds
    ex_by_rel = util.group(examples, lambda q: q.r[0])
    thresholds = {}
    for relation, examples_r in util.verboserate(ex_by_rel.items()):
        if debug:
            print relation
        scores = [ex.score for ex in examples_r]
        labels = [ex.label for ex in examples_r]
        thresholds[relation] = util.best_threshold(scores, labels, debug)

    return thresholds
示例#10
0
def segmented_evaluation(file_path, categorize=None):
    queries = []
    with open(file_path, 'r') as f:
        for line in util.verboserate(f):
            items = line.split('\t')
            s, r, t = items[0], tuple(items[1].split(',')), items[2]
            q = PathQuery(s, r, t)
            quantile_str = items[3]
            q.quantile = float(quantile_str)
            q.num_candidates = int(items[4])
            queries.append(q)

    def single_relation(query):
        if len(query.r) != 1:
            return False
        r = query.r[-1]
        if inverted(r):
            return False
        return r

    # group queries
    if categorize is None:
        categorize = single_relation

    groups = util.group(queries, categorize)

    print 'computing grouped stats'
    stats = defaultdict(dict)
    for key, queries in util.verboserate(groups.iteritems()):
        scores = [q.quantile for q in queries]
        score = np.nanmean(scores)

        def inv_sigmoid(y):
            return -np.log(1.0 / y - 1)

        score2 = inv_sigmoid(score)

        total = len(scores)
        nontrivial = np.count_nonzero(~np.isnan(scores))

        stats[key] = {
            'score': score,
            'score2': score2,
            'total_eval': total,
            'nontrivial_eval': nontrivial
        }

    stats.pop(False, None)
    return pd.DataFrame(stats).transpose()
示例#11
0
	def drawCostellationsFigures(self):
		from reportlab.lib.colors import Color
		self.c.setStrokeColor(Color( 0.1,0.2, 0.7, alpha=0.5))
		self.c.setLineWidth(0.2)
		figures=catalogues.CostellationFigures()
		costellations=set(map(lambda x:x[0],figures))
		for costellation in costellations:
			data=filter(lambda x:x[0]==costellation,figures)[0]		
			data=list(util.group(data[2:],2))
			for s in data:	
				star1=self.H.search(s[0])
				star2=self.H.search(s[1])
				if star1!=None and star2!=None:
					costellation_line=((star1[4]*180/pi,star1[5]*180/pi),(star2[4]*180/pi,star2[5]*180/pi))
					self.drawLine(costellation_line)
示例#12
0
def multiperson(img, func, mode):
    if mode == 'multi':
        scales = [2., 1.8, 1.5, 1.3, 1.2, 1., 0.7, 0.5, 0.25]
    else:
        scales = [1]

    height, width = img.shape[0:2]  # 582,800; 这里假设height,width中最大的为800,以下的shape都是依据800计算的
    center = (width/2, height/2)
    dets, tags = None, []
    for idx, i in enumerate(scales):
        scale = max(height, width)/200
        inp_res = int((i * max(512, max(height, width)) + 3)//4 * 4)
        res = (inp_res, inp_res)  # 800,800
        inp = crop(img, center, scale, res)

        tmp = func([inp, inp[:,::-1]])
        det = tmp['det'][0,:,:] + tmp['det'][1,:,::-1][:,:,flipRef] #det shape [200,200,17]
        if idx == 0:
            dets = det
            mat = get_transform(center, scale, res)[:2]  # shape:[2,3] ;mat=[1 0 0;0 1 109],表示没有缩放,只有y轴方向偏移109=(800-582)/2
            mat = np.linalg.pinv(np.array(mat).tolist() + [[0,0,1]])[:2] # 计算仿射变换矩阵mat的伪逆
        else:
            dets = dets + resize(det, dets.shape[0:2]) 

        if abs(i-1)<0.5:
            res = dets.shape[0:2]  # res shape:[200,200]
            tags += [resize(tmp['tag'][0,:,:,:], res), resize(tmp['tag'][1,:,::-1][:,:,flipRef], res)]

    tags = np.concatenate([i[:,:,:,None] for i in tags], axis=3)  #tags shape:[200,200,17,2]
    dets = dets/len(scales)/2
    import cv2
    cv2.imwrite('det.jpg', (tags.mean(axis=3).mean(axis=2) *255).astype(np.uint8))
    grouped = group(dets, tags, 30)  
    # grouped shape:[30,17,5] 表示检测到30个人,每个人17个关键点,每个关键点前三个数表示x,y,prediction
    grouped[:,:,:2] = kpt_affine(grouped[:,:,:2], mat) 

    # 筛选并整合人体关键点信息
    persons = []
    for val in grouped: # val为某一个人的关键点信息
        if val[:, 2].max()>0: # 某个人的17个关键点中最大的prediction必须大于0
            tmp = {"keypoints": [], "score":float(val[:, 2].mean())}  # 将17个关键点的平均值作为score分数值
            for j in val:  # j表示17个关键点中的某一个
                if j[2]>0.: # 关键点的prediction必须大于0,否则认为检测错误,记为[0,0,0]
                    tmp["keypoints"]+=[float(j[0]), float(j[1]), float(j[2])]
                else:
                    tmp["keypoints"]+=[0, 0, 0]
            persons.append(tmp)
    return persons # 返回满足要求的所有人
示例#13
0
def multiperson(img, func, mode):
    if mode == 'multi':
        scales = [2., 1.8, 1.5, 1.3, 1.2, 1., 0.7, 0.5, 0.25]
    else:
        scales = [1]

    height, width = img.shape[0:2]
    center = (width/2, height/2)
    dets, tags = None, []
    for idx, i in enumerate(scales):
        scale = max(height, width)/200
        inp_res = int((i * max(512, max(height, width)) + 3)//4 * 4)
        res = (inp_res, inp_res)
        inp = crop(img, center, scale, res)

        tmp = func([inp, inp[:,::-1]])
        det = tmp['det'][0,:,:] + tmp['det'][1,:,::-1][:,:,flipRef]
        if idx == 0:
            dets = det
            mat = get_transform(center, scale, res)[:2]
            mat = np.linalg.pinv(np.array(mat).tolist() + [[0,0,1]])[:2]
        else:
            dets = dets + resize(det, dets.shape[0:2]) 

        if abs(i-1)<0.5:
            res = dets.shape[0:2]
            tags += [resize(tmp['tag'][0,:,:,:], res), resize(tmp['tag'][1,:,::-1][:,:,flipRef], res)]

    tags = np.concatenate([i[:,:,:,None] for i in tags], axis=3)
    dets = dets/len(scales)/2
    #import cv2
    #cv2.imwrite('det.jpg', (tags.mean(axis=3).mean(axis=2) *255).astype(np.uint8))
    grouped = group(dets, tags, 30)
    grouped[:,:,:2] = kpt_affine(grouped[:,:,:2], mat)

    persons = []
    for val in grouped:
        if val[:, 2].max()>0:
            tmp = {"keypoints": [], "score":float(val[:, 2].mean())}
            for j in val:
                if j[2]>0.:
                    tmp["keypoints"]+=[float(j[0]), float(j[1]), float(j[2])]
                else:
                    tmp["keypoints"]+=[0, 0, 0]
            persons.append(tmp)
    return persons
示例#14
0
 def drawCostellationsFigures(self):
     from reportlab.lib.colors import Color
     self.c.setStrokeColor(Color(0.1, 0.2, 0.7, alpha=0.5))
     self.c.setLineWidth(0.2)
     figures = catalogues.CostellationFigures()
     costellations = set(map(lambda x: x[0], figures))
     for costellation in costellations:
         data = filter(lambda x: x[0] == costellation, figures)[0]
         data = list(util.group(data[2:], 2))
         for s in data:
             star1 = self.H.search(s[0])
             star2 = self.H.search(s[1])
             if star1 != None and star2 != None:
                 costellation_line = ((star1[4] * 180 / pi,
                                       star1[5] * 180 / pi),
                                      (star2[4] * 180 / pi,
                                       star2[5] * 180 / pi))
                 self.drawLine(costellation_line)
def segmented_evaluation(file_path, categorize=None):
    queries = []
    with open(file_path, 'r') as f:
        for line in util.verboserate(f):
            items = line.split('\t')
            s, r, t = items[0], tuple(items[1].split(',')), items[2]
            q = PathQuery(s, r, t)
            quantile_str = items[3]
            q.quantile = float(quantile_str)
            q.num_candidates = int(items[4])
            queries.append(q)

    def single_relation(query):
        if len(query.r) != 1:
            return False
        r = query.r[-1]
        if inverted(r):
            return False
        return r

    # group queries
    if categorize is None:
        categorize = single_relation

    groups = util.group(queries, categorize)

    print 'computing grouped stats'
    stats = defaultdict(dict)
    for key, queries in util.verboserate(groups.iteritems()):
        scores = [q.quantile for q in queries]
        score = np.nanmean(scores)

        def inv_sigmoid(y):
            return -np.log(1.0 / y - 1)

        score2 = inv_sigmoid(score)

        total = len(scores)
        nontrivial = np.count_nonzero(~np.isnan(scores))

        stats[key] = {'score': score, 'score2': score2, 'total_eval': total, 'nontrivial_eval': nontrivial}

    stats.pop(False, None)
    return pd.DataFrame(stats).transpose()
示例#16
0
def parseFlopRoundLevel(flopcards):
    "Return a value indicating how high the hand ranks"
    # counts元组保存每种牌型值的个数
    # points元组保存不同牌型值,并且按照大小排序(count值越大越优先)
    # Eg. '7 T 7 9 7' => counts=(3,1,1) points=(7,10,9)
    groups = group([card.point for card in flopcards])
    (counts, points) = unzip(groups)

    # 对于顺子(A,2,3,4,5), 规定其值为(1,2,3,4,5)
    if points == (14, 5, 4, 3, 2):
        points = (5, 4, 3, 2, 1)

    # 判断是否为顺子:
    # 五张牌数值各不同,同时最大牌与最小牌相差4
    straight = (len(points) == 5) and (max(points) - min(points) == 4)

    # 判断是否为同花:
    # 五张牌花色相同
    flush = len(set([card.color for card in flopcards])) == 1

    # 这里我们判断9种牌型:同花顺、四条、葫芦、同花、顺子、三条、两对、一对、高牌
    level = (9 if straight and flush else 8 if (4, 1) == counts else 7 if
             (3, 2) == counts else 6 if flush else 5 if straight else 4 if
             (3, 1, 1) == counts else 3 if (2, 2, 1) == counts else 2 if
             (2, 1, 1, 1) == counts else 1)
    '''
	# 打印该五张牌的信息
	print 'All five cards information:'
	for card in flopcards:
		print getColorByIndex(card.color) + '-' + getPointByIndex(card.point)

	# 打印该五张牌的牌型有多少种大小
	print 'Points Count: ', len(points)

	# 计算牌型价值
	'''
    value = computeCardsValue(level, points)
    print 'Cards Value: ', value

    return value, level
示例#17
0
def gen_c_code(langs_ex, strings_dict, file_name, lang_index):
    # This is just to make the order the same as the old code that was parsing
    # just one translation file, to avoid a diff in generated c code when switching
    # from the old to the new method
    langs = [cols[0] for cols in lang_index]
    assert DEFAULT_LANG == langs[0]
    langs_count = len(langs)
    translations_count = len(strings_dict)
    langs_c = [c_escape(tmp) for tmp in langs]
    langs_c = ",\n    ".join([", ".join(ten) for ten in group(langs_c, 10)])

    keys = strings_dict.keys()
    keys.sort(cmp=key_sort_func)
    lines = []
    for lang in langs:
        if DEFAULT_LANG == lang:
            trans = keys
        else:
            trans = get_trans_for_lang(strings_dict, keys, lang)
        lines.append("")
        lines.append("  /* Translations for language %s */" % lang)
        lines += ["  %s," % c_escape(t) for t in trans]
    translations = "\n".join(lines)

    langs_ex.sort(lang_sort_func)
    lang_ids = make_lang_ids(langs_ex, lang_index)
    lang_layouts = make_lang_layouts(lang_index)
    lang_names = [
        '{ "%s", %s, %s, %d },' %
        (lang[0], c_escape(lang[1]), lang_ids[lang[0]], lang_layouts[lang[0]])
        for lang in langs_ex
    ]
    lang_names = "\n    ".join(lang_names)

    file_content = TRANSLATIONS_TXT_C % locals()
    file(file_name, "wb").write(file_content)
示例#18
0
def createFastSelector(fullList, nameList, funcName, type):
	cases = ["case %s:" % value for (name, value) in fullList if name in nameList]
	return unTab(Template_Selector % (funcName, type, "\n	".join([" ".join(part) for part in group(cases, 4)])))
示例#19
0
def createTypeEnum(list, type, default):
	list = sorted(list, key=lambda a: a[0])
	parts = group([item[1] for item in list] + [default], 5)
	return unTab(Template_Enumeration % (type, ",\n	".join([", ".join(part) for part in parts])))
示例#20
0
	def readTLE(self,url):
		url=url
		f=urllib.urlopen(url)
		data=f.read().split('\r\n')
		s=util.group(data,3)
		return s
def createFastSelector(fullList, nameList, funcName, type):
    cases = [
        "case %s:" % value for (name, value) in fullList if name in nameList
    ]
    return unTab(Template_Selector % (funcName, type, "\n	".join(
        [" ".join(part) for part in group(cases, 4)])))
def createTypeEnum(list, type, default):
    list = sorted(list, key=lambda a: a[0])
    parts = group([item[1] for item in list] + [default], 5)
    return unTab(Template_Enumeration %
                 (type, ",\n	".join([", ".join(part) for part in parts])))