Exemple #1
0
def __mkEdge(key, ef1, ef2, el, ec, ed, ev, ei, norm, mapFile, oFile):

    # mcal cat用のlabel項目の作成
    label = []
    if el:
        for nml in el:
            label.append("$s{" + nml + "}")

    evcdStr = []
    #エッジの拡大率は固定
    er = 10
    if ev:
        evcdStr.append(ev + ":ev")
    if ec:
        evcdStr.append(ec + ":ec")
    if ed:
        evcdStr.append(ed + ":ed")

    f = None
    if el:
        f <<= nm.mcal(c='cat(\"_\",%s)' % (','.join(label)), a="##label", i=ei)
    else:
        f <<= nm.msetstr(v="", a="##label", i=ei)

    if len(evcdStr) == 0:
        f <<= nm.mcut(f="%s:key,%s:nam1,%s:nam2,##label:el" % (key, ef1, ef2))
    else:
        f <<= nm.mcut(f="%s:key,%s:nam1,%s:nam2,##label:el,%s" %
                      (key, ef1, ef2, ','.join(evcdStr)))

    if not ev:
        f <<= nm.msetstr(v="", a="ev")

    if not ed:
        f <<= nm.msetstr(v="", a="ed")

    if not ec:
        f <<= nm.msetstr(v="", a="ec")

    f <<= nm.mnullto(f="key", v="##NULL##")
    f <<= nm.mjoin(k="key", K="nam", m=mapFile, f="num:keyNum")
    f <<= nm.mjoin(k="nam1", K="nam", m=mapFile, f="num:num1,leaf:leaf1")
    f <<= nm.mjoin(k="nam2", K="nam", m=mapFile, f="num:num2,leaf:leaf2")
    if norm:
        f <<= nm.mnormalize(f="ev:ev2", c="range")
        f <<= nm.mcal(c='${ev2}*(%s-1)+1' % (er), a="evv")
        f <<= nm.mcut(
            f="key,nam1,nam2,keyNum,num1,num2,el,evv:ev,ed,ec,leaf1,leaf2",
            o=oFile)
    else:
        f <<= nm.mcut(
            f="key,nam1,nam2,keyNum,num1,num2,el,ev,ed,ec,leaf1,leaf2",
            o=oFile)

    f.run()
Exemple #2
0
def __mkMap(key, nf, ni, ef1, ef2, ei, oFile):

    # leaf nodeの構築
    infL = [
        nm.mcommon(k=ef1, K=key, m=ei, r=True, i=ei).mcut(f="%s:nam" % (ef1)),
        nm.mcommon(k=ef2, K=key, m=ei, r=True, i=ei).mcut(f="%s:nam" % (ef2))
    ]
    if ni:
        infL.append(
            nm.mcommon(k=nf, K=key, m=ei, r=True,
                       i=ni).mcut(f="%s:nam" % (nf)))

    xleaf = nm.muniq(i=infL, k="nam")
    xleaf <<= nm.msetstr(v=1, a="leaf")

    if ni:
        inp = [
            nm.mcut(f="%s:nam" % (nf), i=ni),
            nm.mcut(f="%s:nam" % (key), i=ni)
        ]

    else:
        inp = [
            nm.mcut(f="%s:nam" % (ef1), i=ei),
            nm.mcut(f="%s:nam" % (ef2), i=ei),
            nm.mcut(f="%s:nam" % (key), i=ei)
        ]

    f = None
    f <<= nm.muniq(k="nam", i=inp)
    f <<= nm.mjoin(k="nam", m=xleaf, f="leaf", n=True)
    # nullは最初に来るはずなので、mcalでなくmnumberでもnullを0に採番できるはずだが念のために
    f <<= nm.mcal(c='if(isnull($s{nam}),0,line()+1)', a="num")
    f <<= nm.mnullto(f="nam", v="##NULL##", o=oFile)
    f.run()
Exemple #3
0
    def __convertToNumeric(self):
        wf1 = self.__tempW.file()
        wf2 = self.__tempW.file()
        wf3 = self.__tempW.file()
        self.mFile = self.__temp.file()
        nm.mcut(f="e1:node", i=self.__wfE, o=wf1).run()
        nm.mcut(f="e2:node", i=self.__wfE, o=wf2).run()
        mcmd = None
        if self.__wfN is not None:
            nm.mcut(f="n:node", i=self.__wfN, o=wf3).run()
            mcmd <<= nm.mcat(i=wf1 + "," + wf2 + "," + wf3, f="node")
        else:
            mcmd <<= nm.mcat(i=wf1 + "," + wf2, f="node")
        mcmd <<= nm.muniq(k="node")
        mcmd <<= nm.mnumber(q=True, a="id", o=self.mFile)
        mcmd.run()
        #エッジファイル変換・保存
        self.eFile = self.__temp.file()
        mcmd = nm.mjoin(i=self.__wfE,
                        m=self.mFile,
                        f="id:id1",
                        k="e1",
                        K="node")
        mcmd <<= nm.mjoin(m=self.mFile, f="id:id2", k="e2", K="node")
        mcmd <<= nm.muniq(k="id1,id2")
        mcmd <<= nm.msortf(f="id1%n,id2%n")
        mcmd <<= nm.mcut(f="id1:e1,id2:e2,no:row_index", o=self.eFile)
        mcmd.run()
        #take内部用エッジファイル保存
        self.eFileT = self.__temp.file()
        mcmd = nm.mcal(i=self.eFile, c="cat(\" \",$s{e1},$s{e2})", a="edge")
        mcmd <<= nm.mcut(nfno=True, f="edge", o=self.eFileT)
        mcmd.run()
        #ノードファイル変換・保存
        self.nFile = self.__temp.file()
        if self.__wfN is not None:
            mcmd = nm.mjoin(i=self.__wfN,
                            m=self.mFile,
                            f="id",
                            k="n",
                            K="node")
            mcmd <<= nm.muniq(k="id")
            mcmd <<= nm.msortf(f="id%n")
            mcmd <<= nm.mcut(f="id:n,no:row_index", o=self.nFile)
            mcmd.run()
        else:
            #エッジよりノード生成
            wf4 = self.__tempW.file()
            wf5 = self.__tempW.file()
            nm.mcut(i=self.eFile, f="e1:n", o=wf4).run()
            nm.mcut(i=self.eFile, f="e2:n", o=wf5).run()
            mcmd = nm.mcat(i="%s,%s" % (wf4, wf5))
            mcmd <<= nm.muniq(k="n")
            mcmd <<= nm.msortf(f="n%n")
            mcmd <<= nm.mnumber(q=True, a="row_index", o=self.nFile)
            mcmd.run()

        #ワークファイル削除
        self.__tempW.rm()
Exemple #4
0
def mkIndex(oFile, iFile):
    f = None
    f <<= nm.mcat(i=iFile)
    f <<= nm.mcut(f="date,c")
    f <<= nm.mavg(k="date", f="c")
    f <<= nm.mcal(c="round(${c},1)", a="i")
    f <<= nm.mcut(f="date,i", o="%s" % oFile)
    f.run()
Exemple #5
0
def calTime(iFile, oFile):
    f = None
    f <<= nm.mnumber(q=True, a="id", i=iFile)
    f <<= nm.mcal(c='$s{mean}+"("+$s{sd}+")"', a="time")
    f <<= nm.m2cross(k="method", s="dataSize", f="time")
    f <<= nm.msortf(f="id%n")
    f <<= nm.mcut(f="method,10000:small,1000000:middle,100000000:large")
    f <<= nm.mfldname(q=True, o=oFile)
    f.run()
Exemple #6
0
    def convSim(self, ifile, ofile, map1, logDir):

        f = None
        f <<= nm.mcut(nfni=True, f="0:tra", i=ifile)
        f <<= nm.msed(f="tra", c=' $', v="")
        f <<= nm.mnumber(q=True, S=1, a="num1")
        f <<= nm.mtra(r=True, f="tra:num11")
        f <<= nm.mnumber(q=True, S=1, a="order")
        f <<= nm.mcal(c='${num11}+1', a="num1")
        f <<= nm.mjoin(k="num1", m=map1, f=self.ef1)
        f <<= nm.mtra(k="num0", s="order%n,num1%n", f=self.ef1)
        f <<= nm.mcut(f=self.ef1, o="{}/{}".format(logDir, ofile))
        f.run()
Exemple #7
0
def calRelative(iFile, oFile):
    mcut = None
    mcut <<= nm.mselstr(f="method", v="mcut", i="methods.csv")

    f = None
    f <<= nm.mnumber(q=True, a="id", i=iFile)
    f <<= nm.mjoin(k="dataSize", m=mcut, f="mean:base")
    f <<= nm.mcal(c='round(${mean}/${base},0.1)', a="score")
    f <<= nm.m2cross(k="method", s="dataSize", f="score")
    f <<= nm.msortf(f="id%n")
    f <<= nm.mcut(f="method,10000:small,1000000:middle,100000000:large")
    f <<= nm.mfldname(q=True, o=oFile)
    f.run()
Exemple #8
0
def mkdata(rowSize, oFile):
    f = None
    f <<= nm.mnewnumber(a="id", l=rowSize)
    f <<= nm.mrand(min=0, max=9, int=True, S=1111, a="key1")
    f <<= nm.mrand(min=100, max=999, int=True, S=1113, a="key2")
    f <<= nm.mrand(min=10000, max=99999, int=True, S=1117, a="key3")
    f <<= nm.mcal(c='randi(0,999,101)', a="int1")
    f <<= nm.mcal(c='randi(-999,999,111)', a="int2")
    f <<= nm.mcal(c='rand(991)', a="float1")
    f <<= nm.mcal(c='rand(997)*200-100', a="float2")
    f <<= nm.mcal(c='0d20100101+randi(0,3650,137)', a="date")
    f <<= nm.mcal(c='right(t2s(0t000000+randi(0,86399,133)),6)',
                  a="time",
                  o=oFile)
    f.run()
    def enumerate(self, eArgs):

        pFiles = []
        tFiles = []
        tf = mtemp.Mtemp()
        for cName, posSize in self.db.clsNameRecSize.items():
            negSize = self.db.traSize - posSize
            if "minGR" in eArgs:
                self.minGR = eArgs["minGR"]
            else:
                minProb = eArgs["minProb"] if ("minProb" in eArgs) else 0.5
                if "uniform" in eArgs and eArgs["uniform"] == True:
                    self.minGR = (minProb / (1 - minProb)) * (
                        self.db.clsSize - 1)  # マニュアルの式(4)
                else:
                    self.minGR = (minProb / (1 - minProb)) * (
                        float(negSize) / float(posSize))  # マニュアルの式(4)

            # 最小サポートと最小サポート件数
            # s=0.05
            # s=c1:0.05,c2:0.06
            # S=10
            # S=c1:10,c2:15
            if "minCnt" in eArgs:
                if isinstance(eArgs["minCnt"], dict):
                    self.minPos = eArgs["minCnt"][cName]
                else:
                    self.minPos = eArgs["minCnt"]
            else:
                if isinstance(eArgs["minSup"], dict):
                    self.minPos = int(eArgs["minSup"][cName] * float(posSize) +
                                      0.99)
                else:
                    self.minPos = int(eArgs["minSup"] * flost(posSize) + 0.99)

            # 最大サポートと最大サポート件数
            if "maxCnt" in eArgs:
                if isinstance(eArgs["maxCnt"], dict):
                    self.maxPos = eArgs["maxCnt"][cName]
                else:
                    self.maxPos = eArgs["maxCnt"]

            elif "maxSup" in eArgs:
                if isinstance(eArgs["maxSup"], dict):
                    self.maxPos = int(eArgs["maxSup"][cName] * float(posSize) +
                                      0.99)
                else:
                    self.maxPos = int(eArgs["maxSup"] * float(posSize) + 0.99)
            else:
                self.maxPos = None

            self.sigma[cName] = self.calSigma(self.minPos, self.minGR, posSize,
                                              negSize)

            # lcmのパラメータ設定と実行
            # 頻出パターンがなかった場合、lcm出力ファイルが生成されないので
            # そのときのために空ファイルを生成しておいく。
            lcmout = tf.file()  # lcm出力ファイル
            with open(lcmout, "w") as efile:
                pass

            runPara = {}

            if self.msgoff:
                runPara["type"] = eArgs["type"] + "IA_"
            else:
                runPara["type"] = eArgs["type"] + "IA"

            #if self.maxPos: #rubyだとif @maxCntなってる(どこにも設定されてないので)動いてないはず
            if self.maxPos:
                runPara["U"] = self.maxPos

            if "minLen" in eArgs:
                runPara["l"] = str(eArgs["minLen"])

            if "maxLen" in eArgs:
                runPara["u"] = str(eArgs["maxLen"])

            runPara["w"] = self.weightFile[cName]

            runPara["i"] = self.file

            runPara["sup"] = str(self.sigma[cName])

            runPara["o"] = lcmout

            # lcm実行
            #MCMD::msgLog("#{run}")
            #TAKE::run_lcm(run)
            #print(self.sigma)
            #print(runPara)
            #MCMD::msgLog("output patterns to CSV file ...")

            extTake.lcm(runPara)

            pFiles.append(self.temp.file())

            transle = tf.file()
            extTake.lcmtrans(lcmout, "e", transle)

            f = nm.mdelnull(f="pattern", i=transle)
            f <<= nm.mcal(c='round(${countN},1)', a="neg")
            f <<= nm.mcal(c='round(${countP}/%s,1)' % (self.posWeight[cName]),
                          a="pos")
            f <<= nm.mdelnull(f="pattern")  #いる?
            f <<= nm.msetstr(v=cName, a="class")
            f <<= nm.msetstr(v=posSize, a="posTotal")
            f <<= nm.msetstr(v=self.minGR, a="minGR")
            f <<= nm.mcut(f="class,pid,pattern,size,pos,neg,posTotal,minGR",
                          o=pFiles[-1])
            f.run()

            #s = nutil.mrecount(i=self.file)
            #MCMD::msgLog("the number of contrast patterns on class `#{cName}' enumerated is #{s}")

            if self.outtf:
                # トランザクション毎に出現するパターンを書き出す
                #MCMD::msgLog("output tid-patterns ...")
                tFiles.append(self.temp.file())
                xxw = tf.file()

                xxw = nm.mcut(f=self.db.idFN, i=self.db.file)
                xxw <<= nm.muniq(k=self.db.idFN)
                xxw <<= nm.mnumber(S=0, a="__tid", q=True)

                translt = self.temp.file()
                extTake.lcmtrans(lcmout, "t", translt)

                f = nm.mjoin(k="__tid", m=xxw, f=self.db.idFN, i=translt)
                f <<= nm.msetstr(v=cName, a="class")
                f <<= nm.mcut(f=self.db.idFN + ",class,pid", o=tFiles[-1])
                f.run()

        # クラス別のパターンとtid-pidファイルを統合して最終出力
        self.pFile = self.temp.file()
        self.tFile = self.temp.file()

        # パターンファイル併合
        xxpCat = tf.file()
        f = nm.mcat(i=",".join(pFiles))
        f <<= nm.msortf(f="class,pid")
        f <<= nm.mnumber(s="class,pid", S=0, a="ppid", o=xxpCat)
        f.run()

        # パターンファイル計算
        items = self.db.items
        f = nm.mcut(f="class,ppid:pid,pattern,size,pos,neg,posTotal,minGR",
                    i=xxpCat)
        f <<= nm.msetstr(v=self.db.traSize, a="total")
        f <<= nm.mcal(c='${total}-${posTotal}', a="negTotal")  # negのトータル件数
        f <<= nm.mcal(c='${pos}/${posTotal}', a="support")  # サポートの計算
        f <<= nm.mcal(
            c=
            'if(${neg}==0,1.797693135e+308,(${pos}/${posTotal})/(${neg}/${negTotal}))',
            a="growthRate")

        if "uniform" in eArgs and eArgs["uniform"] == True:
            f <<= nm.mcal(
                c='(${pos}/${posTotal})/(${pos}/${posTotal}+(%s-1)*${neg}/${negTotal})'
                % (self.db.clsSize),
                a="postProb")
        else:
            f <<= nm.mcal(c='${pos}/(${pos}+${neg})', a="postProb")

        f <<= nm.msel(c='${pos}>=%s&&${growthRate}>=${minGR}' %
                      (self.minPos))  # minSupとminGRによる選択
        f <<= nm.mvreplace(vf="pattern",
                           m=items.file,
                           K=items.idFN,
                           f=items.itemFN)
        f <<= nm.mcut(
            f="class,pid,pattern,size,pos,neg,posTotal,negTotal,total,support,growthRate,postProb"
        )
        f <<= nm.mvsort(vf="pattern")
        f <<= nm.msortf(f="class%nr,postProb%nr,pos%nr", o=self.pFile)
        f.run()

        # アイテムを包含している冗長なタクソノミを削除
        if items.taxonomy:
            #MCMD::msgLog("reducing redundant rules in terms of taxonomy ...")
            ##ここは後で
            zdd = VSOP.constant(0)
            dt = nm.mcut(i=self.pFile, f="pattern")

            for fldVal in dt:
                zdd = zdd + VSOP.itemset(fldVal[0])

            zdd = self.reduceTaxo(zdd, self.db.items)

            xxp1 = tf.file()
            xxp2 = tf.file()
            xxp3 = tf.file()
            zdd.csvout(xxp1)

            nm.mcut(nfni=True, f="1:pattern",
                    i=xxp1).mvsort(vf="pattern").msortf(f="pattern",
                                                        o=xxp2).run()
            nm.msortf(f="pattern", i=self.pFile).mcommon(
                k="pattern", m=xxp2).msortf(f="class%nr,postProb%nr,pos%nr",
                                            o=xxp3).run()
            shutil.move(xxp3, self.pFile)

        if self.outtf:
            # 列挙されたパターンを含むtraのみ選択するためのマスタ
            xxp4 = nm.mcut(f="class,pid", i=self.pFile)
            f = nm.mcat(i=",".join(tFiles))
            f <<= nm.mjoin(k="class,pid", m=xxpCat,
                           f="ppid")  # 全クラス統一pid(ppid)結合
            f <<= nm.mcommon(k="class,ppid", K="class,pid",
                             m=xxp4)  # 列挙されたパターンの選択
            f <<= nm.mcut(f=self.db.idFN + ",class,ppid:pid")
            f <<= nm.msortf(f=self.db.idFN + ",class,pid", o=self.tFile)
            f.run()
	def enumerate(self,eArgs):
		"""
		eArgsで与えられた条件で、頻出アイテム集合の列挙を実行する。

		:type eArgs: dict
		:type eArgs['type']: str
		:type eArgs['minCnt']: int
		:type eArgs['minSup']: float
		:type eArgs['maxCnt']: int
		:type eArgs['maxSup']: float
		:type eArgs['minLen']: int
		:type eArgs['maxLen']: int
		:type eArgs['top']: int
		:type eArgs['skipTP']: bool【default:False】
		:param eArgs: 各種列挙パラメータ
		:param eArgs['type']: 抽出するアイテム集合の型【'F':頻出集合, 'C':飽和集合, 'M':極大集合】
		:param eArgs['minCnt']: 最小サポート(件数)
		:param eArgs['minSup']: 最小サポート(確率)
		:param eArgs['maxCnt']: 最大サポート(件数)
		:param eArgs['maxSup']: 最大サポート(確率)
		:param eArgs['minLen']: アイテム集合の最小アイテム数(件数)
		:param eArgs['maxLen']: アイテム集合の最大アイテム数(件数)
		:param eArgs['top']: 列挙するサポート上位件数(件数)
		:param eArgs['skipTP']: トランザクションにマッチするパターン(アイテム集合)の出力を行わない。
		"""

		tf=mtemp.Mtemp()
		self.eArgs=eArgs
		self.type = eArgs["type"]

		if "minCnt" in eArgs and eArgs["minCnt"] != None:
			self.minCnt = int(eArgs["minCnt"])
			self.minSup = float(self.minCnt) / float(self.db.traSize)
		else:
			self.minSup = float(eArgs["minSup"])
			self.minCnt = int(self.minSup * float(self.db.traSize) + 0.99)

		# 最大サポートと最大サポート件数
		self.maxCnt=None
		if ("maxCnt" in eArgs and  eArgs["maxCnt"]!= None) or ( "maxSup" in eArgs and eArgs["maxSup"]!= None):
			if "maxCnt" in eArgs and eArgs["maxCnt"]!= None:
				self.maxCnt = int(eArgs["maxCnt"])
				self.maxSup = float(self.maxCnt) / float(self.db.traSize)
			else:
				self.maxSup    = float(eArgs["maxSup"])
				self.maxCnt = int(self.maxSup * float(self.db.traSize) + 0.99)


		params = {}
		if self.msgoff:
			params["type"] ="%sIf_"%(self.type)
		else:
			params["type"] ="%sIf"%(self.type)


		if self.maxCnt :
			params["U"] = str(self.maxCnt)

		if "minLen" in eArgs and eArgs["minLen"] != None :
			params["l"] = str(eArgs['minLen'])
		
		if "maxLen" in eArgs and eArgs["maxLen"] != None :
			params["u"] = str(eArgs['maxLen'])

		# 列挙パターン数上限が指定されれば、一度lcmを実行して最小サポートを得る
		if "top" in eArgs and eArgs["top"] != None :
			self.top = eArgs["top"]

		if self.top and self.top>0 :

			xxtop = tf.file()
			import copy
			top_params = copy.deepcopy(params)
			top_params["i"] = self.file
			top_params["sup"] = "1"
			top_params["K"] = str(self.top)
			top_params["so"] = xxtop
			import re
			top_params["type"] = re.sub('_$', '', top_params["type"] )

			extTake.lcm(top_params)

			with open(xxtop, "r") as rfile:
				self.minCnt = int(rfile.read().strip())

			if self.minCnt<0 :
				self.minCnt=1 


		self.skipTP=False
		if "skipTP" in eArgs:
			self.skipTP=eArgs["skipTP"]

		# lcm_seq出力ファイル
		lcmout = tf.file()

		# 頻出パターンがなかった場合、lcm出力ファイルが生成されないので
		# そのときのために空ファイルを生成しておいく。
		with open(lcmout, "w") as efile:
			pass

		# lcm実行
		params["i"] = self.file
		params["sup"] = str(self.minCnt)
		params["o"] = lcmout
		extTake.lcm(params)

		# caliculate one itemset for lift value
		xxone= tf.file()
		tpstr = "FIf_" if self.msgoff else "FIf"

		extTake.lcm(type=tpstr,i=self.file,sup=1,o=xxone,l=1,u=1)


		# パターンのサポートを計算しCSV出力する
		#MCMD::msgLog("output patterns to CSV file ...")

		xxp0 = tf.file()
		self.pFile = self.temp.file()
		items=self.db.items
		trans0 = self.temp.file()

		extTake.lcmtrans(lcmout,"p",trans0)

		f =   nm.mdelnull(i=trans0,f="pattern")
		f <<= nm.mvreplace(vf="pattern",m=items.file,K=items.idFN,f=items.itemFN)
		f <<= nm.msetstr(v=self.db.traSize,a="total")
		f <<= nm.mcal(c='${count}/${total}',a="support")
		f <<= nm.mcut(f="pid,pattern,size,count,total,support")
		f <<= nm.mvsort(vf="pattern")
		f <<= nm.msortf(f="pid",o=xxp0)
		f.run()


		# xxp0
		# pid,count,total,support,pattern
		# 0,13,13,1,A
		# 4,6,13,0.4615384615,A B
		xxp1=tf.file()

		# taxonomy指定がない場合(2010/11/20追加)
		if items.taxonomy==None:
			shutil.move(xxp0,xxp1)
		# taxonomy指定がある場合
		else:
			#MCMD::msgLog("reducing redundant rules in terms of taxonomy ...")

			zdd=VSOP.constant(0)
			fobj = nm.mcut(i=xxp0,f='pattern')
			for fldVal in fobj:
				zdd=zdd+VSOP.itemset(fldVal[0])

			
			zdd=self.reduceTaxo(zdd,self.db.items)
			xxz1=tf.file()
			xxz2=tf.file()
			zdd.csvout(xxz1)

			f0=None
			f0 <<= nm.mcut(nfni=True,f="1:pattern",i=xxz1)
			f0 <<= nm.mvsort(vf="pattern")
			f0 <<= nm.msortf(f="pattern")

			f=None
			f <<= nm.msortf(f="pattern",i=xxp0)
			f <<= nm.mcommon(k="pattern",m=f0)
			f <<= nm.msortf(f="pid",o=xxp1)
			f.run()


		# lift値の計算		
		transl = tf.file()
		extTake.lcmtrans(xxone,"p",transl)

		xxp2 =   nm.mdelnull(i=transl,f="pattern")
		xxp2 <<= nm.mvreplace(vf="pattern",m=items.file,K=items.idFN,f=items.itemFN)
		xxp2 <<= nm.msortf(f="pattern")

		xxp3 =   nm.mcut(f="pid,pattern",i=xxp1)
		xxp3 <<= nm.mtra(f="pattern",r=True)
		xxp3 <<= nm.mjoin(k="pattern",m=xxp2,f="count:c1")
		xxp3 <<= nm.mcal(c='ln(${c1})',a="c1ln")
		xxp3 <<= nm.msum(k="pid",f="c1ln")

		# p3
		# pid,pattern,c1,c1ln
		# 0,A,13,2.564949357
		# 1,E,7,1.945910149
		
		#おかしくなる?=>OK
		f3 =   nm.mjoin(k="pid",f="c1ln",i=xxp1,m=xxp3)
		f3 <<= nm.mcal(c='round(exp(ln(${count})-${c1ln}+(${size}-1)*ln(${total})),0.0001)',a="lift")
		f3 <<= nm.mcut(f="pid,size,count,total,support,lift,pattern")
		f3 <<= nm.msortf(f="support%nr",o=self.pFile)
		f3.run()

		#self.size = mrecount.mrecount(i=self.file)

		#MCMD::msgLog("the number of patterns enumerated is #{@size}")

		if not self.skipTP:
			# トランザクション毎に出現するシーケンスを書き出す
			#MCMD::msgLog("output tid-patterns ...")

			self.tFile = self.temp.file()
			xxw3i = tf.file()
			extTake.lcmtrans(lcmout,"t",xxw3i)

			xxw1 = nm.mcut(f=self.db.idFN,i=self.db.file).muniq(k=self.db.idFN).mnumber(S=0,a="__tid",q=True).msortf(f="__tid")
			xxw2 = nm.mcut(f="pid",i=self.pFile)
			xxw3 = nm.mcommon(k="pid",i=xxw3i,m=xxw2).mjoin(k="__tid",m=xxw1,f=self.db.idFN).mcut(f=self.db.idFN+",pid",o=self.tFile)
			xxw3.run()
Exemple #11
0
	def run(self):

		from datetime import datetime	
		t = datetime.now()

		temp=nu.Mtemp()
		xxsspcin=temp.file()
		xxmap=temp.file()

		# traファイルの変換とマップファイルの作成
		if self.num :
			total = self.convN(self.iFile,self.idFN,self.itemFN,xxsspcin,xxmap)
		else:
			total = self.conv(self.iFile,self.idFN,self.itemFN,xxsspcin,xxmap)

		# system "head xxsspcin"
		# 3 5 0 2
		# 4 1 2
		# 0 2 3 1
		# 1 0 2
		# 3 4 0 1
		# system "head xxmap"
		# ##item,##freq%0nr,##num
		# b,4,0
		# d,4,1
		# f,4,2
		minSupp = int(total*self.minSupPrb)	if self.minSupPrb else self.minSupCnt
			

		# sspc用simの文字列
		if self.sim :
			if self.sim=="J":
				sspcSim="R"
			elif self.sim=="P":
				sspcSim="P"
			elif self.sim=="C":
				sspcSim="i"
		# sim=省略時はRでth=0とする(sim制約なし)
		else:
			sspcSim="R"
			self.th=0

		############ 列挙本体 ############
		xxsspcout=temp.file()
		tpstr =  sspcSim+"ft_" if self.msgoff else sspcSim+"ft"
		extTake.sspc(type=tpstr,TT=minSupp,i=xxsspcin,th=self.th,o=xxsspcout)

		##################################

		xxtmmp=temp.file()
		
		f =   nm.mread(i=xxsspcout) 
		f <<= nm.cmd("tr ' ()' ','") 
		f <<= nm.mcut(f="1:i1,2:i2,0:frequency,4:sim",nfni=True)

		if self.num :

			f <<= nm.mfldname(f="i1:node1,i2:node2")
			if self.sim!="C":
				f <<= nm.mfsort(f="node1,node2")
			
			f <<= nm.mjoin(k="node1",K="##item",m=xxmap,f="##freq:frequency1")
			f <<= nm.mjoin(k="node2",K="##item",m=xxmap,f="##freq:frequency2") 
			
		else:

			f <<= nm.mjoin(k="i1",K="##num",m=xxmap,f="##item:node1,##freq:frequency1")
			f <<= nm.mjoin(k="i2",K="##num",m=xxmap,f="##item:node2,##freq:frequency2") 

			if self.sim!="C":

				f <<= nm.mcut(f="i1,i2,frequency,sim,node1,node2,frequency1,frequency2,node1:node1x,node2:node2x")
				f <<= nm.mfsort(f="node1x,node2x")
				f <<= nm.mcal(c='if($s{node1}==$s{node1x},$s{frequency1},$s{frequency2})',a="freq1")
				f <<= nm.mcal(c='if($s{node2}==$s{node2x},$s{frequency2},$s{frequency1})',a="freq2")
				f <<= nm.mcut(f="i1,i2,frequency,sim,node1x:node1,node2x:node2,freq1:frequency1,freq2:frequency2")

		f <<= nm.msetstr(v=total,a="total")
		f <<= nm.mcal(c='${frequency}/${frequency1}',a="confidence")
		f <<= nm.mcal(c='${frequency}/${total}',a="support")
		f <<= nm.mcal(c='${frequency}/(${frequency1}+${frequency2}-${frequency})',a="jaccard")
		f <<= nm.mcal(c='(${frequency}*${total})/((${frequency1}*${frequency2}))',a="lift")
		f <<= nm.mcal(c='(ln(${frequency})+ln(${total})-ln(${frequency1})-ln(${frequency2}))/(ln(${total})-ln(${frequency}))',a="PMI")
		f <<= nm.mcut(f="node1,node2,frequency,frequency1,frequency2,total,support,confidence,lift,jaccard,PMI")
		f <<= nm.msortf(f="node1,node2",o=self.oeFile)
		f.run()

		if self.onFile:
			f4 =   nm.mcut(f=self.itemFN+":node",i=self.iFile)
			f4 <<= nm.mcount(k="node",a="frequency")
			if self.node_support :
				minstr = "[%s,]"%(minSupp)
				f4 <<= nm.mselnum(f="frequency",c=minstr)

			f4 <<= nm.msetstr(v=total,a="total")
			f4 <<= nm.mcal(c='${frequency}/${total}',a="support")
			f4 <<= nm.mcut(f="node,support,frequency,total",o=self.onFile)
			f4.run()

		procTime=datetime.now()-t

		# ログファイル出力
		if self.logFile :
			kv=[["key","value"]]
			for k,v in self.args.items():
				kv.append([k,str(v)])
			kv.append(["time",str(procTime)])
			nm.writecsv(i=kv,o=self.logFile).run()
Exemple #12
0
os.environ["KG_VerboseLevel"] = "3"
debug = "on"

iPath = "./DATA"
oPath = "./OUTPUT/apriori"
os.system("mkdir -p %s" % oPath)

iFile = "%s/price_large.csv" % (iPath)
topix = "%s/index.csv" % (iPath)

# make a transaction data, which date as a transaction and tickerID as an item
tra = None
tra <<= nm.mcut(f="id,date,c", i=iFile)
tra <<= nm.mjoin(k="date", m=topix, f="i")
tra <<= nm.mslide(k="id", s="date", f="date:date2,c:c2,i:i2")
tra <<= nm.mcal(c="${c2}/${c}-${i2}/${i}", a="ret")
tra <<= nm.mselnum(f="ret", c="[0.05,0.1]")
tra <<= nm.mcut(f="id,date2:date,ret")

# frequency of one item
freq = None
freq <<= nm.mcut(f="id", i=tra)
freq <<= nm.mcount(k="id", a="freq")
freq <<= nm.mselnum(f="freq", c="[5,]")

# total number of transactions
total = None
total <<= nm.mcut(f="date", i=tra)
total <<= nm.muniq(k="date")
total <<= nm.mcount(a="total")
	def enumerate(self,eArgs):
		tf=nu.Mtemp()

		# 最小サポートと最小サポート件数
		if "minCnt" in eArgs :
			self.minCnt = int(eArgs["minCnt"])
			self.minSup = float(self.minCnt)/ float(self.db.size)
		else:
			self.minSup = float(eArgs["minSup"])
			self.minCnt = int(self.minSup * float(self.db.size) + 0.99)


		# 最大サポートと最大サポート件数
		self.maxCnt=None
		if "maxCnt" in eArgs or "maxSup" in eArgs:
			if "maxCnt" in eArgs:
				self.maxCnt = int(eArgs["maxCnt"])
				self.maxSup = float(self.maxCnt)/float(self.db.size)
			else:
				self.maxSup = float(eArgs["maxSup"])
				self.maxCnt = int(self.maxSup * float(self.db.size) + 0.99)

		#未使用
		#@minProb = eArgs["minProb"].to_f # 事後確率
		#@minGR   = @minProb/(1-@minProb) # 増加率
		#@minGR   = eArgs["minGR"].to_f if eArgs["minGR"]

		# あるクラスをpos、他のクラスをnegにして、パターン列挙した結果ファイル名を格納する
		pFiles=[]
		tFiles=[]
		for cName,posSize in self.db.clsNameRecSize.items(): 
			negSize=self.db.size-posSize

			# minGRの計算
			if "minGR" in eArgs:
				self.minGR=eArgs["minGR"]
			else:
				minProb = eArgs["minProb"] if ( "minProb" in eArgs ) else 0.5
				if "uniform" in eArgs and eArgs["uniform"]:
					self.minGR = (minProb/(1-minProb)) * (self.db.clsSize-1) # マニュアルの式(4)
				else:
					self.minGR = (minProb/(1-minProb)) * (float(negSize)/float(posSize)) # マニュアルの式(4)


			# 最小サポートと最小サポート件数
			if "minCnt" in eArgs:
				self.minPos = eArgs["minCnt"]
			else:
				self.minPos = int(eArgs["minSup"] * float(posSize) + 0.99)

			# 最大サポートと最大サポート件数
			if "maxCnt" in eArgs or "maxSup" in eArgs:
				if "maxCnt" in eArgs:
					self.maxCnt = int(eArgs["maxCnt"])
				else:
 					self.maxCnt = int(eArgs["maxSup"] * float(posSize) + 0.99)


			self.sigma[cName] = self.calSigma(self.minPos,self.minGR,posSize,negSize)

			# lcm_seqのパラメータ設定と実行
			lcmout = tf.file() # lcm_seq出力ファイル
			# 頻出パターンがなかった場合、lcm出力ファイルが生成されないので
			# そのときのために空ファイルを生成しておいく。
			with open(lcmout, "w") as efile:
				pass

			params = {}
			if self.msgoff:
				params["type"] ="CIA_"
			else:
				params["type"] ="CIA"

			if self.maxCnt: # windowサイズ上限
				params["U"] = str(self.maxCnt)
			if "minLen" in eArgs:
				params["l"] = str(eArgs["minLen"])
			if 'maxLen' in eArgs:
				params["u"] = str(eArgs["maxLen"])
			if 'gap' in eArgs:
				params["g"] = str(eArgs["gap"])
			if 'win' in eArgs:
				params["G"] = str(eArgs["win"])

			params["w"] = self.weightFile[cName]
			params["i"] = self.file
			params["sup"] = str(self.sigma[cName])
			params["o"] = lcmout

			# lcm_seq実行
			#MCMD::msgLog("#{run}")
			if 'padding' in eArgs and eArgs["padding"]: # padding指定時は、0アイテムを出力しないlcm_seqを実行
				extTake.lcmseq_zero(params)
			else:
				extTake.lcmseq(params)

			# パターンのサポートを計算しCSV出力する
			#MCMD::msgLog("output patterns to CSV file ...")
			pFiles.append(self.temp.file())
			transle = self.temp.file()

			extTake.lcmtrans(lcmout,"e",transle) # pattern,countP,countN,size,pid

			f=None
			f <<= nm.mdelnull(f="pattern",i=transle)
			f <<= nm.mcal(c='round(${countN},1)',a="neg")
			f <<= nm.mcal(c='round(${countP}/%s,1)'%(self.posWeight[cName]),a="pos")
			f <<= nm.mdelnull(f="pattern")
			f <<= nm.msetstr(v=cName,a="class")
			f <<= nm.msetstr(v=posSize,a="posTotal")
			f <<= nm.msetstr(v=self.minGR,a="minGR")
			f <<= nm.mcut(f="class,pid,pattern,size,pos,neg,posTotal,minGR",o=pFiles[-1])
			f.run()

			#s = MCMD::mrecount("i=#{pFiles.last}") # 列挙されたパターンの数
			#MCMD::msgLog("the number of contrast patterns on class `#{cName}' enumerated is #{s}")

			if self.outtf :
				# トランザクション毎に出現するシーケンスを書き出す
				#MCMD::msgLog("output tid-patterns ...")
				tFiles.append(self.temp.file())

				xxw= tf.file()
				f=None
				f <<= nm.mcut(f=self.db.idFN,i=self.db.file)
				f <<= nm.muniq(k=self.db.idFN)
				f <<= nm.mnumber(S=0,a="__tid",q=True)
				f <<= nm.msortf(f="__tid",o=xxw)
				f.run()

				nm.mcut(f=self.db.idFN,i=self.db.file).muniq(k=self.db.idFN).mnumber(S=0,a="__tid",q=True,o=xxw).run()
				translt = self.temp.file()
				extTake.lcmtrans(lcmout,"t",translt)
				nm.mjoin(k="__tid",m=xxw,f=self.db.idFN,i=translt).msetstr(v=cName,a="class").mcut(f=self.db.idFN+",class,pid",o=tFiles[-1]).run()


		# クラス別のパターンとtid-pidファイルを統合して最終出力
		self.pFile = self.temp.file()
		self.tFile = self.temp.file()

		# パターンファイル併合
		xxpCat = tf.file()
		f =   nm.mcat(i=",".join(pFiles))
		f <<= nm.msortf(f="class,pid")
		f <<= nm.mnumber(s="class,pid",S=0,a="ppid",o=xxpCat)
		f.run()

		# パターンファイル計算
		items=self.db.items
		f=""
		f =   nm.mcut(f="class,ppid:pid,pattern,size,pos,neg,posTotal,minGR",i=xxpCat)
		f <<= nm.msetstr(v=self.db.size,a="total")
		f <<= nm.mcal(c='${total}-${posTotal}',a="negTotal") # negのトータル件数
		f <<= nm.mcal(c='${pos}/${posTotal}',a="support") # サポートの計算
		f <<= nm.mcal(c='if(${neg}==0,1.797693135e+308,(${pos}/${posTotal})/(${neg}/${negTotal}))',a="growthRate")
		if "uniform" in eArgs and eArgs["uniform"] == True:
			f <<= nm.mcal(c='(${pos}/${posTotal})/(${pos}/${posTotal}+(%s-1)*${neg}/${negTotal})'%(self.db.clsSize),a="postProb")
		else:
			f <<= nm.mcal(c='${pos}/(${pos}+${neg})',a="postProb")

		f <<= nm.msel(c='${pos}>=%s&&${growthRate}>=${minGR}'%(self.minPos)) # minSupとminGRによる選択
		f <<= nm.mvreplace(vf="pattern",m=items.file,K=items.idFN,f=items.itemFN)
		f <<= nm.mcut(f="class,pid,pattern,size,pos,neg,posTotal,negTotal,total,support,growthRate,postProb")
		f <<= nm.mvsort(vf="pattern")
		f <<= nm.msortf(f="class%nr,postProb%nr,pos%nr",o=self.pFile)
		f.run()

		if self.outtf :
			# 列挙されたパターンを含むtraのみ選択するためのマスタ
			xxp4=nm.mcut(f="class,pid",i=self.pFile)

			f =   nm.mcat(i=",".join(tFiles))
			f <<= nm.mjoin(k="class,pid",m=xxpCat,f="ppid") # 全クラス統一pid(ppid)結合
			f <<= nm.mcommon(k="class,ppid",K="class,pid",m=xxp4) # 列挙されたパターンの選択
			f <<= nm.mcut(f=self.db.idFN+",class,ppid:pid")
			f <<= nm.msortf(f=self.db.idFN+",class,pid",o=self.tFile)
			f.run()


		self.size = nu.mrecount(i=self.pFile)
Exemple #14
0
def __mkNode(key, nf, nl, nv, nc, ni, ef1, ef2, ei, noiso, norm, mapFile,
             oFile):
    xbyE = None
    xbyN = None
    # edgeファイルからnode情報を生成
    # noiso(孤立node排除)の場合は、edgeにあってnodeにないidを省く必要があるので計算する。
    if ni == None or (ni != None and noiso):
        inp = [
            nm.mcut(f="%s:key,%s:nam,%s:nl" % (key, ef1, ef1), i=ei),
            nm.mcut(f="%s:key,%s:nam,%s:nl" % (key, ef2, ef2), i=ei)
        ]
        xbyE <<= nm.mnullto(i=inp, f="key", v="##NULL##")
        xbyE <<= nm.muniq(k="key,nam")
        xbyE <<= nm.mjoin(k="key", K="nam", m=mapFile, f="num:keyNum")
        xbyE <<= nm.mjoin(k="nam", K="nam", m=mapFile, f="num,leaf")
        xbyE <<= nm.msetstr(v=",,,,", a="nv,nc,nlKey,nvKey,ncKey")
        xbyE <<= nm.mcut(
            f="key,nam,keyNum,num,nl,nv,nc,leaf,nvKey,ncKey,nlKey")

    # nodeファイルから作成
    if ni:
        # mcal cat用のlabel項目の作成
        label = []
        #label項目
        if nl:
            for nml in nl:
                label.append(nml)
        else:
            label.append("$s{%s}" % (nf))

        nvcStr = []
        if nv:
            nvcStr.append('%s:nv' % (nv))
        if nc:
            nvcStr.append('%s:nc' % (nc))
        """
		# map
		# nam,leaf,num
		# ##NULL##,,0
		# #1_1,,2
		# #1_2,,3
		# #1_3,,4
		# #2_1,,5
		# a,1,6
		# b,1,7
		# c,1,8
		"""

        f = None
        f <<= nm.mcal(c='cat("_",%s)' % (','.join(label)), a="##label", i=ni)
        if len(nvcStr) == 0:
            f <<= nm.mcut(f='%s:key,%s:nam,##label:nl' % (key, nf))
        else:
            f <<= nm.mcut(f='%s:key,%s:nam,##label:nl,%s' %
                          (key, nf, ','.join(nvcStr)))
        f <<= nm.mnullto(f="key", v="##NULL##")
        if not nv:
            f <<= nm.msetstr(v="", a="nv")
        if not nc:
            f <<= nm.msetstr(v="", a="nc")

        f <<= nm.mjoin(k="key", K="nam", m=mapFile, f="num:keyNum")
        f <<= nm.mjoin(k="nam", K="nam", m=mapFile, f="num,leaf")

        if norm:
            #ノードの拡大率は固定
            nr = 3
            f <<= nm.mnormalize(f="nv:nv2", c="range")
            f <<= nm.mcal(c='${nv2}*(%s-1)+1' % (nr), a="nvv")
            f <<= nm.mcut(f="key,nam,keyNum,num,nl,nvv:nv,nc,leaf")  #o=#{xxa}"
        else:
            f <<= nm.mcut(f="key,nam,keyNum,num,nl,nv,nc,leaf")  #o=#{xxa}"

        xbyN <<= nm.mjoin(k="keyNum",
                          K="num",
                          m=f,
                          f="nl:nlk,nv:nvKey,nc:ncKey",
                          n=True,
                          i=f)
        xbyN <<= nm.mcal(c='if(isnull($s{nlk}),$s{key},$s{nlk})', a='nlKey')
        xbyN <<= nm.mcut(f="nlk", r=True)

    if ni != None and noiso:
        nm.mcommon(k="key,nam", m=xbyE, i=xbyN, o=oFile).run()
    elif ni != None:
        xbyN.writecsv(o=oFile).run()
    else:
        xbyE.writecsv(o=oFile).run()
    def enumerate(self, eArgs):
        tf = mtemp.Mtemp()

        # 最小サポートと最小サポート件数
        if "minCnt" in eArgs and eArgs["minCnt"] != None:
            self.minCnt = int(eArgs["minCnt"])
            self.minSup = float(self.minCnt) / float(self.db.size)
        else:
            self.minSup = float(eArgs["minSup"])
            self.minCnt = int(self.minSup * float(self.db.size) + 0.99)

        # 最大サポートと最大サポート件数
        self.maxCnt = None
        if ("maxCnt" in eArgs
                and eArgs["maxCnt"] != None) or ("maxSup" in eArgs
                                                 and eArgs["maxSup"] != None):
            if "maxCnt" in eArgs and eArgs["maxCnt"] != None:
                self.maxCnt = int(eArgs["maxCnt"])
                self.maxSup = float(self.maxCnt) / float(self.db.size)
            else:
                self.maxSup = float(eArgs["maxSup"])
                self.maxCnt = int(self.maxSup * float(self.db.size) + 0.99)

        # 列挙パターン数上限が指定されれば、一度lcmを実行して最小サポートを得る
        if "top" in eArgs and eArgs["top"] != None:
            self.top = eArgs["top"]

        # 列挙パターン数上限が指定されれば、一度lcmを実行して最小サポートを得る
        if self.top and self.top > 0:

            xxtop = tf.file()

            extTake.lcmseq(type="Cf",
                           K=str(self.top),
                           i=self.file,
                           sup="1",
                           so=xxtop)

            with open(xxtop, "r") as rfile:
                self.minCnt = int(rfile.read().strip())

        # lcm_seq出力ファイル
        lcmout = tf.file()
        # 頻出パターンがなかった場合、lcm出力ファイルが生成されないので
        # そのときのために空ファイルを生成しておいく。
        with open(lcmout, "w") as efile:
            pass

        # lcm_seqのパラメータ設定と実行
        params = {}
        if self.msgoff:
            params["type"] = "CIf_"
        else:
            params["type"] = "CIf"

        if self.maxCnt:
            params["U"] = str(self.maxCnt)
        if "minLen" in eArgs:
            params["l"] = str(eArgs["minLen"])
        if 'maxLen' in eArgs:
            params["u"] = str(eArgs["maxLen"])
        if 'gap' in eArgs:
            params["g"] = str(eArgs["gap"])
        if 'win' in eArgs:
            params["G"] = str(eArgs["win"])

        params["i"] = self.file
        params["sup"] = str(self.minCnt)
        params["o"] = lcmout

        # lcm_seq実行
        #MCMD::msgLog("#{run}")
        if 'padding' in eArgs and eArgs[
                "padding"]:  # padding指定時は、0アイテムを出力しないlcm_seqを実行
            extTake.lcmseq_zero(params)
        else:
            extTake.lcmseq(params)

        # パターンのサポートを計算しCSV出力する
        self.pFile = self.temp.file()
        items = self.db.items

        transl = self.temp.file()
        extTake.lcmtrans(lcmout, "p", transl)

        f = nm.mdelnull(f="pattern", i=transl)
        f <<= nm.mvreplace(vf="pattern",
                           m=items.file,
                           K=items.idFN,
                           f=items.itemFN)
        f <<= nm.msetstr(v=self.db.size, a="total")
        f <<= nm.mcal(c='${count}/${total}', a="support")  # サポートの計算
        f <<= nm.mcut(f="pid,pattern,size,count,total,support")
        f <<= nm.msortf(f="support%nr", o=self.pFile)
        f.run()

        if self.outtf:
            # トランザクション毎に出現するシーケンスを書き出す
            #MCMD::msgLog("output tid-patterns ...")
            self.tFile = self.temp.file()

            xxw = tf.file()  #Mtemp.new.name
            f = None
            f <<= nm.mcut(f=self.db.idFN, i=self.db.file)
            f <<= nm.muniq(k=self.db.idFN)
            f <<= nm.mnumber(S=0, a="__tid", q=True)
            f <<= nm.msortf(f="__tid", o=xxw)
            f.run()

            translt = self.temp.file()
            extTake.lcmtrans(lcmout, "t", translt)

            f = None
            f <<= nm.msortf(f="__tid", i=translt)
            f <<= nm.mjoin(k="__tid", m=xxw, f=self.db.idFN)
            f <<= nm.mcut(f=self.db.idFN + ",pid")
            f <<= nm.msortf(f=self.db.idFN + ",pid", o=self.tFile)
            f.run()
Exemple #16
0
20161013,-0.003854546465
20161014,-0.01164231144
20161017,-0.001684303294
20161018,-0.002248115897
20161019,-0.003859145998
20161020,-0.006883326367
20161021,-0.01175535999
20161024,-0.01474462455
"""


temo = mtemp.Mtemp()
vv = temo.file()
with open(vv,"w") as wfp:
	wfp.write(sampDAT1)

rls=[]
xxx1 = n.mcal(i=vv,c="left($s{date},6)",a="month").msetstr(a="ccc",v="")
xxx2 = xxx1.msum({"k":"date","f":"val"})
xxx3 = xxx1.msum({"k":"date","f":"val"},o=rls)
xxx3.run()
print(rls)

for x in xxx2.keyblock(["month"]):
	print(x)

for x in xxx2:
	if x[3] == "201608":
		print(x)

Exemple #17
0
# Filitering the friend pairs of StockCode in the similarity graph of StockCode.
f = None
f <<= nm.msplit(f="pattern", a="item1,item2", i="%s/patterns.csv" % oPath)
f <<= nm.mcut(f="item1,item2,lift", o="%s/rules.csv" % oPath)
f.run(msg="on")

#os.system("mfriends.rb ef=item1,item2 ei=%s/rules.csv ef=item1,item2 sim=lift rank=5 eo=%s/friends.csv -udout"%(oPath,oPath))
##### 上の行は、以下で動くようにする
nt.mfriends(ef="item1,item2",
            ei="%s/rules.csv" % oPath,
            sim="lift",
            rank=5,
            udout=True,
            eo="%s/friends.csv" % oPath).run()

# visualization of the graph
f = None
f <<= nm.mcal(c="cat(\" \",$s{item1},$s{item2})",
              a="edges",
              i="%s/friends.csv" % oPath)
f <<= nm.mcut(f="edges", nfno=True, o="%s/edges.csv" % oPath)
f.run(msg="on")

G = nx.read_edgelist("%s/edges.csv" % oPath)
pos = nx.spring_layout(G)
plt.figure(figsize=(10, 10))
nx.draw(G, pos=pos, node_size=40, iterations=20)

plt.savefig("%s/friends.png" % oPath)
Exemple #18
0
    def run(self):
        temp = mtemp.Mtemp()

        ### mtra2gc
        xxsimgN = temp.file()
        xxsimgE = temp.file()
        xxsimgE0 = temp.file()

        param = {}
        param["i"] = self.iFile
        if self.idFN:
            param["tid"] = self.idFN
        if self.itemFN:
            param["item"] = self.itemFN
        if self.sp1:
            param["s"] = self.sp1
        if self.sp2:
            param["S"] = self.sp2

        #####################
        # 異なる向きのconfidenceを列挙するためにsim=C th=0として双方向列挙しておく
        # 出力データは倍になるが、mfriendsで-directedとすることで元が取れている
        param["sim"] = "C"
        param["th"] = "0"

        param["node_support"] = True
        if self.numtp:
            param["num"] = True
        param["no"] = xxsimgN
        param["eo"] = xxsimgE0

        nt.mtra2gc(**param).run()

        f = nm.readcsv(xxsimgE0)
        for i in range(self.filterSize):
            f <<= nm.mselnum(f=self.filter[i],
                             c="[%s,%s]" % (self.lb[i], self.ub[i]))
        f <<= nm.writecsv(xxsimgE)
        f.run()

        ### mfrirends
        xxfriends = temp.file()
        xxfriendE = temp.file()
        xxw = temp.file()
        xxf = temp.file()
        xxff = temp.file()
        xxor = temp.file()

        if not os.path.isdir(xxfriends):
            os.makedirs(xxfriends)
        col = [["FF000080", "FF888880"], ["0000FF80", "8888FF80"],
               ["00FF0080", "88FF8880"]]

        for i in range(len(self.sim)):
            paramf = {}
            paramf["ei"] = xxsimgE
            paramf["ni"] = xxsimgN
            paramf["ef"] = "node1,node2"
            paramf["nf"] = "node"
            paramf["eo"] = xxfriendE
            paramf["no"] = xxfriends + "/n_" + str(i)
            paramf["sim"] = self.sim[i]
            paramf["dir"] = self.dir[i]
            paramf["rank"] = self.rank[i]
            paramf["directed"] = True

            nt.mfriends(**paramf).run()

            frec2 = nm.mfsort(f="node1,node2", i=xxfriendE)
            frec2 <<= nm.msummary(k="node1,node2",
                                  f=self.sim[i],
                                  c="count,mean")
            frec2 <<= nm.mselstr(f="count", v=2)
            # node1%0,node2%1,fld,count,mean
            # a,b,support,2,0.1818181818
            # a,d,support,2,0.1818181818

            f = nm.mjoin(k="node1,node2",
                         K="node1,node2",
                         m=frec2,
                         f="mean:s1",
                         n=True,
                         i=xxfriendE)
            f <<= nm.mjoin(k="node2,node1",
                           K="node1,node2",
                           m=frec2,
                           f="mean:s2",
                           n=True)
            # 1) xxrecs2でsimをjoinできない(s1,s2共にnull)ということは、それは片方向枝なので"F"をつける
            # 2) 双方向枝a->b,b->aのうちa->bのみ(s1がnullでない)に"W"の印をつける。
            # 3) それ以外の枝は"D"として削除
            f <<= nm.mcal(
                c='if(isnull($s{s1}),if(isnull($s{s2}),\"F\",\"D\"),\"W\")',
                a="dir")
            f <<= nm.mselstr(f="dir", v="D", r=True)
            f <<= nm.mcal(c='if($s{dir}==\"W\",$s{s1},$s{%s})' % (self.sim[i]),
                          a="sim")
            f <<= nm.mchgstr(f="dir:color",
                             c='W:%s,F:%s' % (col[i][0], col[i][1]),
                             A=True)
            f <<= nm.msetstr(v=[self.sim[i], str(i)], a="simType,simPriority")
            f <<= nm.mcut(f="simType,simPriority,node1,node2,sim,dir,color",
                          o=xxfriends + "/e_" + str(i))
            f.run()
            # node1%1,node2%0,simType,sim,dir,color
            # b,a,jaccard,0.3333333333,F,8888FF
            # j,c,jaccard,0.3333333333,F,8888FF
            # b,d,jaccard,0.3333333333,F,8888FF
            # a,e,jaccard,0.5,W,0000FF
            # d,e,jaccard,0.5,W,0000FF

        # rule fileの出力
        if self.orFile:
            mmm = nm.mcat(i=xxfriends + "/e_*").muniq(k="node1,node2")
            nm.mcommon(k="node1,node2", i=xxsimgE, m=mmm, o=self.orFile).run()

        # マルチ枝の単一化(W優先,パラメータ位置優先)
        if self.prune:
            """
			# 双方向と片方向に分割
			nm.mcat(i=xxfriends+"/e_*").mselstr(f="dir",v="W",o=xxw,u=xxf).run()
			# 片方向のみの枝を選択
			f =   nm.mcommon(k="node1,node2",K="node1,node2",r=True,m=xxw,i=xxf)
			f <<= nm.mcommon(k="node1,node2",K="node2,node1",r=True,m=xxw,o=xxff)
			f.run()
			f = nm.mcat(i=xxw+","+xxff).mbest(k="node1,node2",s="dir%r,simPriority%n",o=self.oeFile).run()
			"""
            #これだめ
            fo = nm.mcat(i=xxfriends + "/e_*").mselstr(f="dir", v="W")
            fu = fo.direction("u")  # これは再考
            fu <<= nm.mcommon(k="node1,node2", K="node1,node2", r=True, m=fo)
            fu <<= nm.mcommon(k="node1,node2", K="node2,node1", r=True, m=fo)
            #f  =   nm.m2cat()
            f = nm.mbest(i=[fo, fu],
                         k="node1,node2",
                         s="dir%r,simPriority%n",
                         o=self.oeFile)

            f.run()

        else:
            nm.mcat(i=xxfriends + "/e_*", o=self.oeFile).run()

        nm.mcat(i=xxfriends + "/n_0", o=self.onFile).run()