def get_object_by_data2(obj,usr): sql='select objeto,i from '+mdTb.table_object+' where lower(objeto)=lower(?) and USERNAME = ? ' resultSet = conn3.sqlX (sql,([obj,usr])) obj_nm=None uid=None for results in resultSet: obj_nm=results[0] uid=results[1] break #----------- lay=mdNeural.mdLayer () if obj_nm == None: obj_nm=obj lay.name=obj_nm #----------- if True : resultSet =conn3.sql ( 'select trim(DT),trim(TOPICO) from '+mdTb.table_dt+' where \"UID\" = ' + str(uid) ) obj_nm=None for results in resultSet: DT=results[0] TOP=results[1] nr= lay.set_nr(DT) #tps=lay.get_topico(TOP) #if tps == None: # tps=lay.set_topico(TOP) #---- tps=lay.set_topico(TOP) tps.connect_to(lay,nr,'Composicao') return lay
def get_object_by_data(obj,usr,conn): cursor = conn.cursor () sql='select objeto from SEMANTIC_OBJECT where i=%s and USERNAME = %s ' cursor.execute (sql,(obj,usr)) resultSet = cursor.fetchall() obj_nm=None for results in resultSet: obj_nm=results[0] #----------- lay=mdNeural.mdLayer() lay.name=obj_nm #----------- cursor = conn.cursor () sql='select DT,TOPICO from SEMANTIC_OBJECT_DT where OBJECT=%s and USERNAME = %s ' cursor.execute (sql,(obj,usr)) resultSet = cursor.fetchall() obj_nm=None for results in resultSet: DT=results[0] TOP=results[1] nr= lay.set_nr(DT) tps=lay.get_topico(TOP) if tps == None: tps=lay.set_topico(TOP) tps.connect_to(lay,nr,'Composicao') return lay
def runc_layer(layer): objects=[] ref_opcode='' # Relaction-oper-opcode opcode original dos qualificadores se nao especificado #===================================================== def get_o_ob(): for s in objects: if s.name==ob: return s return None #===================================================== if True: print 'RTS-REF(1):------------' r=layer for s in r.topicos: print 'Topico:',s.dt print 'SNS:++++++++++++++++++' for s1 in s.sinapses: print s1.nr.dt print '++++++++++++++++++' print 'RTS-REF(1)(END):------------' obj_foco=[] #print 'N tops:' , len(layer.topicos) for t in layer.topicos: tp=t nr_t=t.dt[0] if nr_t == '': continue if 'referencial.source' in t.dt : sn_dt='' for sn in tp.sinapses: if 'indicador' not in sn.nr.dt: for s1 in sn.nr.dt: sn_dt+=(s1+' ') if umisc.trim(sn_dt) != '$$id$$' and umisc.trim(sn_dt) != '': print 'referencial.source:',sn_dt if len(obj_foco) > 0: lay=obj_foco[0] lay.name+=(sn_dt) else: lay=mdNeural.mdLayer() lay.name=sn_dt #==== objects.append(lay) obj_foco.append(lay) rel1=False for t in layer.topicos: topico_rsf=t #============================================== for dt_top in topico_rsf.dt: if dt_top in [ 'referencial'] or ref_opcode in [ 'refer']: for ob1 in obj_foco: ob1.set_topico_nr(topico_rsf) break print 'Obj.foco.refer:',obj_foco return obj_foco
def get_object_by_data(obj, usr): sql = 'select trim(objeto),i from SEMANTIC_OBJECT3 where objeto=? and USERNAME = ? ' resultSet = conn3.sqlX(sql, ([obj, usr])) obj_nm = None uid = None for results in resultSet: obj_nm = results[0] uid = results[1] break #----------- lay = mdNeural.mdLayer() if obj_nm == None: obj_nm = obj lay.name = obj_nm tpc2 = lay.set_topico('identificador') nrc2 = lay.set_nr(lay.name) tpc2.connect_to(lay, nrc2, 'Composicao') #print lay.topicos,'....................' #print 'Read object:',obj,' uid:',uid #----------- if True: resultSet = conn3.sql( 'select trim(DT),trim(TOPICO),i from SEMANTIC_OBJECT_DT3 where "UID" = ' + str(uid) + ' and lev <=1 ') obj_nm = None for results in resultSet: DT = results[0] TOP = results[1] nr = lay.set_nr(DT) ic = results[2] if ic == None: ic = 0 #tps=lay.get_topico(TOP) #if tps == None: # tps=lay.set_topico(TOP) # --- tps = lay.set_topico(TOP) #=== #print 'Set topico:',TOP,' for layer:',obj_nm,' uid:',uid # --- tps.connect_to(lay, nr, 'Composicao') if True: #== resultSet = conn3.sql( 'select id_top,trim(DT),trim(TOPICO) from SEMANTIC_OBJECT_DT3 where UID = ' + str(uid) + ' and lev > 1 and id_top=' + str(ic) + 'and i <> ' + str(ic), ) obj_nm = None for results in resultSet: DT = results[0] TOP = results[1] nrc = lay.set_nr(DT) #== nr.connect_to(lay, nrc, 'Composicao') return lay
def run(layers,relacionado,startL,usr,stack): rts=[] for lr in layers: identif='' cenario='' # achar cenario + i do lr # #_get_ontology(self,aliases,purposes,usr) objects=lr._get_ontology([identif],[cenario],usr) for l in objects: layer_r=mdNeural.mdLayer() layer_r.name=l.name rts.append(layer_r) links=l.get_links('REFERENCIAL-CLASS') for l in links: tps=l.lr.get_all() for s in tps: layer_r.set_topico_nr(s) layers=rts
def get_object_by_data(obj, usr): sql = 'select objeto from SEMANTIC_OBJECT where i=? and USERNAME = ? ' resultSet = conn3.sqlX(sql, ([obj, usr])) obj_nm = None for results in resultSet: obj_nm = results[0] #----------- lay = mdNeural.mdLayer() if obj_nm == None: obj_nm = obj lay.name = obj_nm #----------- sql = 'select DT,TOPICO from SEMANTIC_OBJECT_DT where OBJECT=? and USERNAME = ? ' resultSet = conn4.sqlX(sql, ([obj, usr])) obj_nm = None for results in resultSet: DT = results[0] TOP = results[1] nr = lay.set_nr(DT) tps = lay.get_topico(TOP) if tps == None: tps = lay.set_topico(TOP) tps.connect_to(lay, nr, 'Composicao') return lay
def get_object_by_data29(obj,usr,max_purposes): sql='select objeto,i from '+mdTb.table_object+' where lower(objeto)=lower(?) and USERNAME = ? ' resultSet = conn3.sqlX (sql,([obj,usr])) obj_nm=None uid=None for results in resultSet: obj_nm=results[0] uid=results[1] break #----------- lay=mdNeural.mdLayer () if obj_nm == None: obj_nm=obj lay.name=obj_nm #----------- if True : rattings=0 #print 'SQl: conn:','select trim(DT),trim(TOPICO) from '+mdTb.table_dt+' where \"UID\" = ' + str(uid) resultSet =conn3.sql ( 'select trim(DT),trim(TOPICO) from '+mdTb.table_dt+' where \"UID\" = ' + str(uid) ) obj_nm=None for results in resultSet: DT=results[0] TOP=results[1] nr= lay.set_nr(DT) #print '->>:',DT,TOP #tps=lay.get_topico(TOP) #if tps == None: # tps=lay.set_topico(TOP) #---- tps=lay.set_topico(TOP) tps.connect_to(lay,nr,'Composicao') for d in max_purposes: if TOP.lower() == d.lower(): rattings+=1 break return [lay,rattings]
def run( layers,relactionado,startL,usr,stack): print 'Start compile cenary:----------' cur_cen=init_cenario_class_cliente(stack) lrK=mdNeural.mdLayer() #set_topico_a for l in layers: ids=l.name for u in l.topicos : if 'object' in u.dt: u.dt=['identificador'] lrK.set_topico_nr(u) print 'DIS:',u.sinapses lrK.name='__TST__' #lrK.set_topico_aq(['quality','']) #lrK.set_topico_aq(['rule','mean']) lrK.set_topico_aq(['alias','']) lrK.set_topico_aq(['opcode','mean']) layers_param=[lrK] for cn in cur_cen: for rct in cn.rcts: print 'ALiass:',rct second_Rcts=mdER.get_RactionLines2C(rct,usr) print 'len.rcts(O) [',len(second_Rcts),']' print '......................................' for s in second_Rcts: print 'S:',s for l in s: if type(l).__name__ == 'instance': if l.__class__.__name__ == 'mdLayer': print 'RCT-fnd:',l.name nrt=l.set_topico('purpose') l.set_nr_ch(nrt,'class','composicao') print 'RCT Ask Call-TO-Process:',l,l.name stack.proc_pg.call_process2(usr,[l],layers_param) print '--------------------------------------'
def get_object_by_data223(obj,usr,plus): sql='select objeto,i from '+mdTb.table_object+' where lower(objeto)=lower(?) and USERNAME = ? ' resultSet = conn3.sqlX (sql,([obj,usr])) obj_nm=None uid=None for results in resultSet: obj_nm=results[0] uid=results[1] break #----------- lay=mdNeural.mdLayer () if obj_nm == None: obj_nm=obj lay.name=obj_nm #----------- plus_fnd=0 if True : resultSet = conn3.sql ( 'select trim(DT),trim(TOPICO) from '+mdTb.table_dt+' where \"UID\" = ' + str(uid) ) obj_nm=None for results in resultSet: DT=results[0] TOP=results[1] nr= lay.set_nr(DT) plusf=False for sd in plus: if TOP == 'need' and DT == sd: plusf=True if plusf: plus_fnd+=1 #tps=lay.get_topico(TOP) #if tps == None: # tps=lay.set_topico(TOP) #---- tps=lay.set_topico(TOP) tps.connect_to(lay,nr,'Composicao') return [lay,plus_fnd]
def __init__(self, usr, purpose): inpt_word = mdNeural.mdLayer() # interpretacao de palavras #== [self.nodesER, self.all_purpouses] = mdER.get_RactionLines(purpose, usr)
def get_object_by_data(obj,usr,uid): #======== if uid != '': resultSet=tb_object.get(uid) key1=uid else: resultSet=tb_object.get(obj) key1=uid # # obj_nm=None uid=None cenar=0 cnts_all_tps=0 if True: results = resultSet obj_nm=results[u'objeto'] uid=key1 cenar=results[u'cenar'] cnts_all_tps=results[u'conts_n'] #----------- lay=mdNeural.mdLayer () if obj_nm == None: obj_nm=obj lay.name=obj_nm tpc2=lay.set_topico('identificador') tpc2.uuid=cenar nrc2= lay.set_nr(lay.name) nrc2.uuid=cenar tpc2.connect_to(lay,nrc2,'Composicao') #print lay.topicos,'....................' print 'Read object(g1):',obj,' uid:',uid #----------- def cached_dt(objectkey,cnts_all_tp): ''' cached=[] keyc=objectkey i=1 while i <= cnts_all_tp: try: c1=tb_object_dt.get(keyc+"|"+str(i)) cached.append([keyc+"|"+str(i),c1]) except: break i+=1 ''' cached=[] keyc=objectkey i=1 keys=[] while i <= int(cnts_all_tp): keys.append( keyc+"|"+str(i) ) i+=1 i=0 if True: try: c1=tb_object_dt.multiget(keys,column_count=10000) for kd in c1.items(): cached.append([kd[0],kd[1]]) i+=1 except Exception,e: print 'ERROR:',e pass return cached
def run(layers, relactionado, startL, usr, stack): print 'Start compile cenary:----------' cur_cen = init_cenario_class_facebook(stack) lrK = mdNeural.mdLayer() #set_topico_a obs_defs = [] for l in layers: print 'FACE-LR-DUMP:============' ids = l.name for u in l.topicos: #print 'dt:(t):',u.dt,'\n' if 'with' in u.dt: for d in u.sinapses: for dt in d.nr.dt: obs_defs.append(dt) if 'object' in u.dt: u.dt = ['identificador'] lrK.set_topico_nr(u) print 'DIS:', u.sinapses lrK.name = '__TST__' print 'FACE-LR-DUMP:(END)=========' #lrK.set_topico_aq(['quality','']) #lrK.set_topico_aq(['rule','mean']) # inicializar extrutura do cenario a ser procurado no facebook=== print '[obs_defs]:{', obs_defs, '}' obs = [] secs = [] for nm in obs_defs: print 'ALiass(obj-def):', nm second_Rcts = mdER.get_RactionLines2C(nm, usr) print 'len.rcts(O) [', len(second_Rcts), ']' print '(obj-def)......................................:)' secs.append(second_Rcts) if len(secs) == 0: return to_Search = [] to_Search_C = [] to_Search_A = [] for s1 in secs: for s in s1: #print 'S:',s for l in s: if type(l).__name__ == 'instance': if l.__class__.__name__ == 'mdLayer': print 'LR-FCB-RETURN:', l, l.name fcts = l.get_links('FACT') for fct in fcts: for u in fct.lr.topicos: if 'composicao' in u.dt: for d in u.sinapses: for dt in d.nr.dt: to_Search_C.append(dt) if 'action' in u.dt: for d in u.sinapses: for dt in d.nr.dt: to_Search_A.append(dt) for u in fct.lr.topicos: if 'identificador' in u.dt: for d in u.sinapses: for dt in d.nr.dt: if len(to_Search_A) == 0: for c in to_Search_C: dt2 = dt + ' ' + c + ' brasil' to_Search.append(dt2) elif len(to_Search_C) == 0: for c in to_Search_A: dt2 = dt + ' ' + c + ' brasil' to_Search.append(dt2) else: for c2 in to_Search_A: for c in to_Search_C: dt2 = dt + ' ' + c + ' ' + c2 + ' brasil' to_Search.append( dt2) import collect_it_fcb print 'to_Search:{', to_Search, '}' if len(to_Search) == 0: return collect_it_fcb.clean_marcador() collect_it_fcb.entry_Sents(to_Search, usr) return
def runc_layer(layer, ex_Data): objects = [] ref_opcode = '' not_in = [ 'alias', 'object3', 'class', 'indicator', 'defin', 'simple-collect', 'sample', 'equilibrio', 'know', 'mean', 'qtd-', 'time', 'indicative', 'interact', 'state', 'accept', 'way', 'need', 'intensity', 'grow', 'order', 'simple-', 'value', 'enum', 'event', 'news', 'referencial', 'compare', 'number' ] # Relaction-oper-opcode opcode original dos qualificadores se nao especificado #===================================================== def get_o_ob(): for s in objects: if s.name == ob: return s return None #===================================================== class_fnd = False if True: print 'RTS(1-S.2):------------', '[', ex_Data, ']' r = layer for s in r.topicos: for kdata in s.dt: if kdata.lower().startswith( 'defs') or kdata.lower().startswith('interact.'): class_fnd = True break if 'category' in s.dt or 'class' in s.dt: class_fnd = True if 'object' in s.dt: for ds in s.sinapses: if 'category' in ds.nr.dt or 'defs' in ds.nr.dt: class_fnd = True print 'Topico:', s.dt #==================================================== tp = s if 'object' in tp.dt: sn_dt = '' lst = [] print 'obj.alias.snp:', tp.sinapses has_spaced = False analiz = [] for skl1 in tp.sinapses: for dp in skl1.nr.dt: analiz.append(dp) if umisc.trim(dp) != '' and umisc.trim( dp) != '\n' and umisc.trim( dp) != 'start' and umisc.trim(dp) != '.': has_spaced = True print 'has_spaced:', has_spaced, analiz if len(tp.sinapses) == 0 or (not has_spaced): for d in ex_Data: sn_dt = ex_Data[0] sn_dt = sn_dt.replace('\n', '') sn_dt = sn_dt.replace('\t', '') sn_dt = sn_dt.replace('.', ' ') #sn_dt = filter(lambda x: x in string.printable, sn_dt) for sn in tp.sinapses: for a in sn.nr.dt: if a.lower() == 'start' or a.lower() == 'end': tp.sinapses.remove(sn) break for sn in tp.sinapses: ah = False for a in sn.nr.dt: for cin in not_in: if cin in a.lower() and a.lower( ) != 'evento' and a.lower() != 'eventos' and ( not a.lower().startswith('interact.')): ah = True continue sn_dt = umisc.trim(sn_dt) + (' ' + a + ' ') if ah: lst.append(sn.nr) continue sn_dt = umisc.trim(sn_dt) print 'Collect layer.sn_dt:', sn_dt if umisc.trim(sn_dt) != '$$id$$' and umisc.trim(sn_dt) != '': new_k = sn_dt if umisc.trim(new_k) not in ex_Data: ex_Data.append(umisc.trim(new_k)) print 'ex:data:', ex_Data #==================================================== print 'SNS:------------------------------------------' for s1 in s.sinapses: print ' ', s1.nr.dt for s2 in s1.nr.sinapses: print ' ', s2.nr.dt for s3 in s2.nr.sinapses: print ' ', s3.nr.dt print '----------------------------------------------' print 'RTS(1-S.2)(END):------------' #=============================== print 'need_fnd(2):', class_fnd if not class_fnd: return obj_foco = [] ex_Data2 = [] #= #print 'N tops:' , len(layer.topicos) found_obj_1 = False first_t = True print 'dump.layer:', layer.name layer.dump_layer() for t in layer.topicos: tp = t nr_t = t.dt[0] nr_t = nr_t.replace('\n', '') if umisc.trim(nr_t) == '' or umisc.trim(nr_t) == '\n': continue #====================================== if 'object' in t.dt: sn_dt = '' lst = [] print 'obj.alias.snp:', tp.sinapses has_spaced = False analiz = [] for skl1 in tp.sinapses: for dp in skl1.nr.dt: analiz.append(dp) if umisc.trim(dp) != '' and umisc.trim( dp) != '\n' and umisc.trim( dp) != 'start' and umisc.trim(dp) != '.': has_spaced = True print 'has_spaced:', has_spaced, analiz if len(tp.sinapses) == 0 or (not has_spaced): for d in ex_Data: sn_dt = ex_Data[0] sn_dt = sn_dt.replace('\t', '') sn_dt = sn_dt.replace('\n', '') sn_dt = sn_dt.replace('.', ' ') #sn_dt = filter(lambda x: x in string.printable, sn_dt) for sn in tp.sinapses: ah = False for a in sn.nr.dt: if (a.lower().startswith('interact.')): lst.append(sn.nr) continue for cin in not_in: if cin in a.lower() and a.lower( ) != 'evento' and a.lower() != 'eventos' and a.lower( ) != 'start' and a.lower() != 'end': ah = True continue sn_dt = umisc.trim(sn_dt) + (' ' + a + ' ') if ah: lst.append(sn.nr) continue sn_dt = umisc.trim(sn_dt) print 'Collect layer.sn_dt(2):', sn_dt if umisc.trim(sn_dt) != '$$id$$' and umisc.trim(sn_dt) != '': lay = mdNeural.mdLayer() new_k = sn_dt if umisc.trim(new_k) not in ex_Data: ex_Data.append(umisc.trim(new_k)) print 'ex:data:', ex_Data lay.name = umisc.trim(new_k) if umisc.trim(sn_dt) == '': if len(ex_Data) > 0: print 'ex_Data(1)-->', ex_Data for s in ex_Data: if umisc.trim(s.name) == '': lay.name = s.name break for topico_rsfd in lst: lay.set_topico_nr(topico_rsfd) #==== #print 'lay-name:',lay.name objects.append(lay) # verificar se tem mais de 3 words no nome do objeto, restringir ate achar ['o','a'] #lay.name=sep_multiple_data_unkwon(lay.name) print 'Collect layer(2):', lay.name obj_foco.append(lay) ex_Data.append(lay) found_obj_1 = True first_t = False if len(obj_foco) > 0 and (not found_obj_1): print 'ex_Data-->', obj_foco lay = obj_foco[len(obj_foco) - 1] for topico_rsfd in lst: lay.set_topico_nr(topico_rsfd) elif len(ex_Data) > 0 and (not found_obj_1): print 'ex_Data-->', ex_Data rel1 = False # verificar se tem os campos mandatorios( caracts necessarias ) mdn = False for t in layer.topicos: topico_rsf = t rel1 = False #======================================================= ref_opcode = t.dt[0] #============================================== if ref_opcode not in ['', None, 'object']: for ob1 in obj_foco: ob1.set_topico_nr(topico_rsf) #============================================= if not mdn and layer in obj_foco: obj_foco.remove(layer) for o_f in obj_foco: #print 'o_f.name:',o_f.name,len(o_f.name) if o_f.name == 'undef' or umisc.trim(o_f.name) == '': obj_foco.remove(o_f) print 'OBS.return:', len(obj_foco) return obj_foco
def runc_layer(layer, ex_Data): objects = [] ref_opcode = '' # Relaction-oper-opcode opcode original dos qualificadores se nao especificado #===================================================== def get_o_ob(): for s in objects: if s.name == ob: return s return None #===================================================== class_fnd = False if True: print 'RTS(1EV-S):------------', '[', ex_Data, ']' r = layer for s in r.topicos: for kdt in s.dt: if ('event' in kdt or 'startssssssss' in kdt) and not 'event-stop' in kdt: class_fnd = True if 'object' in s.dt: for ds in s.sinapses: if ('event' in ds.nr.dt or 'startsssssssssss' in ds.nr.dt) and not 'event-stop' in ds.nr.dt: class_fnd = True print 'Topico:', s.dt print 'SNS:++++++++++++++++++' for s1 in s.sinapses: print s1.nr.dt print '++++++++++++++++++' print 'RTS(1EV-S)(END):------------' #=============================== if not class_fnd: return obj_foco = [] #print 'N tops:' , len(layer.topicos) found_obj_1 = False first_t = True lst = [] way_mid = False for t in layer.topicos: if first_t and 'indicador' in t.dt: pass elif 'event-link' in t.dt: pass elif 'object' in t.dt: pass elif 'way.mid' in t.dt: #[way.mid,,way.mid] way_mid = True pass else: lst.append(t) for t in layer.topicos: if 'event-link' in t.dt and way_mid: lst.append(t) for t in layer.topicos: tp = t nr_t = t.dt[0] if nr_t == '': continue #====================================== if (not first_t) and 'indicador' in t.dt: if len(obj_foco) > 0: o = obj_foco[len(obj_foco) - 1] t.dt = ['pacient'] o.set_topico_nr(t) #====================================== indicador em inicio de sentenca detem o significado de agente e object if (('event-link' in t.dt or 'start' in t.dt) and not way_mid) or 'way.mid' in t.dt: not_in = [ 'alias', 'estimate', 'object3', 'place', 'class', 'indicador', 'indicator', 'defin', 'simple-collect', 'sample', 'equilibrio', 'know', 'mean', 'qtd-', 'time', 'indicative', 'interact', 'state', 'accept', 'way', 'need', 'intensity', 'grow', 'order', 'simple-', 'value', 'enum', 'event', 'news', 'referencial', 'compare', 'number', 'ele', 'ela' ] if way_mid: not_in = [ 'alias', 'estimate', 'object3', 'place', 'class', 'indicador', 'defin', 'simple-collect', 'sample', 'equilibrio', 'know', 'mean', 'qtd-', 'time', 'indicative', 'interact', 'state', 'accept', 'need', 'intensity', 'grow', 'order', 'simple-', 'value', 'enum', 'event', 'news', 'referencial', 'compare', 'number', 'ele', 'ela' ] sn_dt = '' for sn in t.sinapses: ah = False for a in sn.nr.dt: for cin in not_in: if cin in a.lower(): ah = True continue if ah: continue email_ad = False for a in sn.nr.dt: if a in 'site-address': email_ad = True if email_ad: for k in sn.nr.sinapses: for k1 in k.nr.dt: sn_dt += (k1) if not email_ad: for a in sn.nr.dt: sn_dt += (a + ' ') print 'T.dt:', t.dt, '->[', sn_dt, ']' if True: lay = mdNeural.mdLayer() lay.name = umisc.trim(sn_dt) #====================== for topico_rsfd in lst: lay.set_topico_nr(topico_rsfd) lst = [] #==== objects.append(lay) obj_foco.append(lay) return obj_foco
def runc_layer(layer): objects=[] ref_opcode='' # Relaction-oper-opcode opcode original dos qualificadores se nao especificado #===================================================== def get_o_ob(): for s in objects: if s.name==ob: return s return None #===================================================== obj_foco=[] #print 'N tops:' , len(layer.topicos) for t in layer.topicos: tp=t nr_t=t.dt[0] if len(tp.sinapses) > 1 : print '{',t.dt,'....','[',len(t.sinapses),']','}' if len(tp.sinapses) > 100: for ss in tp.sinapses: print ss.nr.dt if 'identificador' in t.dt: for sn in tp.sinapses: sn_dt='' for s1 in sn.nr.dt: sn_dt+=s1 lay=mdNeural.mdLayer() lay.name=sn_dt objects.append(lay) obj_foco.append(lay) if 'realid2' in t.dt: print '{realid2}:',len(tp.sinapses) for sn in tp.sinapses: sn_dt='' for s1 in sn.nr.dt: sn_dt =s1 lay=mdNeural.mdLayer() lay.name=sn_dt print '{{',sn_dt,'}}' lay.set_topico('identificador') lay.set_topico('{realid}') objects.append(lay) obj_foco.append(lay) #print 'obj:',sn_dt rel1=False for t in layer.topicos: topico_rsf=t #======================================================= ref_opcode=t.dt[0] if 'composicao' in t.dt or 'hierarchy' in t.dt : rel1=False #============================================== if ref_opcode in ['quality-good','need','way','guess','class','referencial','action','indicador','choice','decison','option','guess','state']: rel1=True if rel1: for sn in tp.sinapses: sn_dt=None for s1 in sn.nr.dt: sn_dt=s1 break opc=ref_opcode for ob1 in obj_foco: referencia_c=False # referencia,link -> achar objeto e linkar , senao, apeas conecta if ref_opcode in ['refers','links','needs','ways','reference','referencial','referencia'] : referencia_c = True # ==== if not referencia_c: ob1.set_topico_nr(topico_rsf) else : print '{Connect reference...}' obj_k=get_o(sn_dt) if obj_k == None: #criar obj obj_k=mdNeural.mdLayer() obj_k.name=sn_dt ob1.set_link_ds(obj_k,opc,[],[]) #============================================= if 'composicao' in t.dt or 'hierarchy' in t.dt : sn_dt='' for ob1 in obj_foco: ob1.set_topico_nr(t) return objects
def process_termo(termo, usr, purp, start_c, path_j): pages = [] ths2 = [] no_start = False if start_c == 0: no_start = True clean_Trace(usr) start_c = 1 all_pgs = get_pages_len(usr, termo, purp) ib = 0 lb = all_pgs print 'Process pages:', (all_pgs) w_fnds = True while ib < all_pgs: if ib < 15: start_c += 15 w_fnds = False else: #start_job_sec(ib+1,path_j) print 'Start collection at:', start_c - 15 ths2.append(thread_cntl()) thread.start_new_thread( start_job_sec, ((start_c - 15), usr, path_j, ths2[len(ths2) - 1])) w_fnds = True ib += 15 if not w_fnds: print 'Start collection at:', start_c - 15 ths2.append(thread_cntl()) thread.start_new_thread( start_job_sec, ((start_c - 15), usr, path_j, ths2[len(ths2) - 1])) w_fnds = False else: pass #======================================================================================================================================== #======================================================================================================================================== if no_start: ind_col = 0 while True and len(ths2) > 0: print 'wait for secondary process...', len(ths2) - ind_col fnds_t = False ind_col = 0 for ths1 in ths2: if not ths1.finished: fnds_t = True if ths1.finished: ind_col += 1 if fnds_t: time.sleep(10) continue else: break if no_start: clean_Trace(usr) finish_Trace(usr) #finish info cache #======================================================================================================================================== if no_start: return #======================================================================================================================================== job_index = int(start_c / 100) job_index += 1 pages = get_pages(usr, start_c, termo, purp) print 'Process pages:(', len(pages), ')' #===================================== [onto_basis, [allp, allp2]] = mount_node(termo, usr, purp) #======================================== cind2 = 0 threads_fd = [] result_onto_tree_er = [] # caracteristicas,descricoes,etc... result_onto_tree_bpm = [] # fluxo de acoes, sequencia de actions result_linked = [] cnt_process = 0 addresses = [] all_size_pg = len(pages) pg_index = start_c d1 = 1 print '================================================' print 'Init Process tree for pages:', len(pages), ' Start at:', start_c print '================================================' for pagina in pages: if pagina.dt1 == None: continue all_p = [] endereco = pagina.dt1 lines_doc = pagina.dt2 all_p.append([endereco, lines_doc]) cnt_process += 1 d1 += 1 cind2 += 1 if len(all_p) > 0: print 'Process page num:', pg_index process_page(all_p, usr, purp, cind2, None, cind2, pg_index, all_size_pg, job_index, addresses, result_onto_tree_er, onto_basis) #================ print 'End page:', pg_index, ' of total:', len(pages) #================ pg_index += 1 # processar informacoes ( encontrar os ractionlines linkdas por 'CALL' ) rts = [] ind = 0 def get_result_links(lr): for s in result_linked: for s2 in s: if s2 == lr: return s return [] if len(pages) <= 0: # <1> return print 'Prepare to call rcts:', start_c for data_c in result_onto_tree_er: runc = result_onto_tree_bpm[ ind] # rct original, ractionline q processou as informacoes #sd_link=result_linked[ind] sd_link = get_result_links(data_c) runc.process_Call_RCT(conn, data_c, usr, sd_link) rts.append(data_c) ind += 1 #======================================================================================================================================== print 'ER:', len(result_onto_tree_er) for e in result_onto_tree_er: print '----------------------------------------' for t in e.topicos: print t.dt print '**************************' for s in t.sinapses: print s.nr.dt print '**************************' print '----------------------------------------' #break # retorno: ''' ->gravar na ontologia semantic_objects2, os qualificadores de definicao,composicao( achar objectos com destinacao(info-composicao), que contem as informacoes de propriedades ) ->gravar na ontologia semantic_objects2, os qualificadores de estado( achar objectos com destinacao(info-state), que contem as informacoes de estado->links para cada momento das propriedades ) ->gravar na ontologiao links_objetcts os qualificadores definidores de relacao , link ,conexao, etc.. ( achar objectos com destinacao(info-relaction), que contem as informacoes de relacoes ) ->os qualificadores de filtro,foco informados na sentenca do clipping seram utilizados para o retorno no clipping ''' layers_processed = [] for e in result_onto_tree_er: #================ topicos_composicao = [] # ->semantic_objects2 topicos_relaction = [] # ->semantic_relactions2 topicos_infostate = [] # ->semantic_infostate objs_prs = get_objs_purp(e.name, 'COMPOSICAO', usr) tops_cmp_prs = get_objectdt_by(objs_prs, usr) objs_prs = get_objs_purp(e.name, 'INFO-STATE', usr) tops_infos_prs = get_objectdt_by(objs_prs, usr) objs_prs = get_objs_purp(e.name, 'RELACTION', usr) tops_rel_prs = get_objectdt_by(objs_prs, usr) objets_Real_post = [ ] # REAL-ONTOLOGY-POST=> qualificadores que indicam se deve ou nao fazer parte da ontologia primaria(ontologia principal de conhecimento )->instrucao privilegiada #================ layer_post = mdNeural.mdLayer() layers_processed.append(layer_post) foco_h = [ ] #-> usado para focar os elementos pertinentes Я observacao para o historico foco_h2 = [] #======================== for t in e.topicos: for d in t.dt: if d.upper() == 'FOCO': for s in t.sinases: foco_h2.append(s) for d2 in s.nr.dt: foco_h.append(d2) #======================== for t in e.topicos: for d in t.dt: if d.upper() == 'REAL-ONTOLOGY-POST': for s in t.sinases: for d2 in s.nr.dt: #[objecto] objets_Real_post.append([d2]) #================================== fnd = False for d in t.dt: if not fnd: if d.upper() == 'IDENTIFICADOR': for s in t.sinases: if not fnd: for d2 in s.nr.dt: obj_nm = d2 layer_post.name = obj_nm break #================================== fnd = False for d in t.dt: if not fnd: for d2 in tops_cmp_prs: if d.upper() == d2.upper(): fnd = True break if fnd: topicos_composicao.append(t) #================================== fnd = False for d in t.dt: if not fnd: for d2 in tops_rel_prs: if d.upper() == d2.upper(): fnd = True break if fnd: topicos_relaction.append(t) #================================== fnd = False for d in t.dt: if not fnd: for d2 in tops_infos_prs: if d.upper() == d2.upper(): fnd = True break if fnd: topicos_infostate.append(t) #= for t_com in topicos_composicao: for s in t_com.sinapses: layer_post.set_topico_nr(t_com.nr) #===================== # links = [] for lnk_s in topicos_relaction: dt = [] for s in t_com.sinapses: for d in nr.dt: o = get_object_by_data(dt, usr) if o != None: layer_post.set_link( o, s.opcode) # opcode=> link, motivo da conexao if len(objets_Real_post) == 0: post_object_by_data2(layer_post, usr, termo, foco_h2) else: # se estiver na lista objets_Real_post, postar como ontologia principal post_object_by_data(layer_post, usr, termo, foco_h2) # historico for t_hist in topicos_infostate: # historico, identificador do referencial state_type = t_hist.dt composicao = [] rels = [] #========= if len(foco_h) == 0: for t_com in topicos_composicao: composicao.append(t_com) else: for t_com in topicos_composicao: fnd = False for f in foco_h: #composicao, elementos considerados if not fnd: for s in t_com.sinapses: if f.upper() in s.dt: composicao.append(t_com) fnd = True break else: break #========= lnks if len(foco_h) == 0: for t_com in topicos_relaction: rels.append(t_com) else: for t_com in topicos_relaction: fnd = False for f in foco_h: #rels, links considerados if not fnd: for s in t_com.sinapses: if f.upper() in s.dt: rels.append(t_com) fnd = True break else: break post_datah_state(state_type, layer_post.name, composicao, rels, usr) #================================== # filtro,foco para indicar o que extrair dos objetos acima postados #================================== s_foco = [] s_restricao = [] s_filtro = [] s_m = [] # msg ou clipping for lay in onto_basis.nodesER: nodes2 = lay.get_links('FACT') for lay in nodes2: lay = lay.lr for top in lay.topicos: for dts in top.dt: if dts.upper() == "FILTRO": for s in top.sinapses: s_filtro.append(s.nr) if dts.upper() == "FOCO": for s in top.sinapses: s_foco.append(s.nr) if dts.upper() == "RESTRICAO": for s in top.sinapses: s_restricao.append(s.nr) if dts.upper() == "PURPOSE-DST": for s in top.sinapses: for ds1 in s.nr.dt: s_m.append(ds1) #================================== ''' 1 -setar foco nos objectos 2 -caracteristicas + links 3 -layout-out com interesses 4 -layout-code para extrair informacao 5 -inserir no clipping return ''' for lr in layers_processed: lr_name = lr.name #foco consid = [] #============================================ for f in s_foco: for nr2 in lr.topicos: if lr.compare_dt_depend(conn, usr, purp, f, nr2, []): consid.append(nr2) #============================================ for f in s_filtro: for nr2 in lr.topicos: if lr.compare_dt_depend(conn, usr, purp, f, nr2, []): consid.append(nr2) #============================================ for f in s_restricao: for nr2 in lr.topicos: if lr.compare_dt_depend(conn, usr, purp, f, nr2, []): consid.append(nr2) #============================================ for c in consid: lr.put_publish(c) #============================================ #======================================================================================================================================== #======================================================================================================================================== data_return = [] # char layoutcode ind_c = 0 for data_c in result_onto_tree_er: #========== cmps_nr = layers_processed[ind_c].get_all() ind_c += 1 #========== runc = result_onto_tree_bpm[ ind] # rct original, ractionline q processou as informacoes addr_url = addresses[ind] layout_codes = runc.get_links('LAYOUT-CODE') nms = [] for d in result_onto_tree_bpm: nms.append(d.lr.name) rts2 = prepare_search_customlayouts(nms, cmps_nr, usr) #purposes,dts,usr data_return.append(rts2) #======================================================================================================================================== # #==================================== all_txt = '' for dt in data_return: # post no clipping all_txt += (' ' + dt) all_txt = umisc.trim(all_txt) if all_txt != "": #== classificacao/categoria #class=get_class(purpose) - > retorna o label do purpose class_c = get_class(purp, usr) #== post no clipping if len(s_m) <= 0: cursor.execute( " insert into clipping_return(username,retorno,categoria,href) values ( %s , %s , %s , %s )", (usr, all_txt, class_c, '')) else: has_clipp = False has_msg = False for d in s_m: # verificar onde inserir if d.upper() == "CLIPPING": if not has_clipp: has_clipp = True cursor.execute( " insert into clipping_return(username,retorno,categoria,href) values ( %s , %s , %s , %s )", (usr, all_txt, class_c, '')) elif d.upper() == "FEED": if not has_msg: has_msg = True cursor.execute( " insert into INBOX_MSG (username,MSG,SOURCE,REFERENCIAS) values ( %s , %s , %s , %s )", (usr, all_txt, class_c, addr_url))
def runc_layer(layer, usr): #=============================================== def translate_year(mn): if mn == '$this-year': return str(date2.datetime.today().year) elif mn == '$next-year': return str(date2.datetime.today().year + 1) elif mn == '$next-two-year': return str(date2.datetime.today().year + 2) elif mn == '$next-three-year': return str(date2.datetime.today().year + 3) elif mn == '$next-four-year': return str(date2.datetime.today().year + 4) elif mn == '$next-five-year': return str(date2.datetime.today().year + 5) elif mn == '$next-six-year': return str(date2.datetime.today().year + 6) elif mn == '$next-seven-year': return str(date2.datetime.today().year + 7) elif mn == '$next-eigth-year': return str(date2.datetime.today().year + 8) elif mn == '$next-nine-year': return str(date2.datetime.today().year + 9) elif mn == '$next-ten-year': return str(date2.datetime.today().year + 10) else: return mn def translate_month(mn): if mn.lower() == 'janeiro': return 1 elif mn.lower() == 'fevereiro': return 2 elif mn.lower() == 'março' or mn.lower() == 'marco': return 3 elif mn.lower() == 'abril': return 4 elif mn.lower() == 'maio': return 5 elif mn.lower() == 'junho': return 6 elif mn.lower() == 'julho': return 7 elif mn.lower() == 'agosto': return 8 elif mn.lower() == 'setembro': return 9 elif mn.lower() == 'outubro': return 10 elif mn.lower() == 'novembro': return 11 elif mn.lower() == 'dezembro': return 12 else: return mn #=============================================== not_in = [ 'event', 'start', 'number', 'class-past', 'person', 'assunto', 'simple-collect', 'sample', 'equilibrio', 'know', 'mean', 'qtd-', 'time', 'indicative', 'interact', 'state', 'accept', 'way', 'need', 'intensity', 'grow', 'order', 'simple-', 'value', 'enum', 'event', 'news', 'referencial', 'compare', 'number', 'ele', 'ela' ] not_in2 = ['class-past', 'start-past'] not_in3 = ['e', 'do', 'da', 'dos', 'das'] not_in4 = ['state', 'event', 'start'] ref_opcode = '' global_uuid = 0 if True: print 'RTS(1--2-I)(' + layer.name + '):------------' r = layer r.dump_layer() print 'RTS(1--2-I)(END):------------' obj_foco = [] prop_l = False abs_cnt = 1 #print 'N tops:' , len(layer.topicos) topics = [] reins_tps = [] for t in layer.topicos: if 'object' in t.dt or 'identificador' in t.dt: for sn in t.sinapses: ah = False for a in sn.nr.dt: for cin in not_in: if cin in a.lower(): ah = True continue if ah: reins_tps.append(sn.nr) for t in layer.topicos: if 'tstatic' in t.dt: # se tstatic, avalia apenas os topicos abaixo desse for s1 in t.sinapses: topics.append(s1.nr) else: topics.append(t) found_by_name = False if True: if umisc.trim(layer.name) != '$$id$$' and umisc.trim(layer.name) != '': lay = mdNeural.mdLayer() lay.name = layer.name #=== obj_foco.append(lay) found_by_name = True for t in topics: tp = t nr_t = t.dt[0] if nr_t == '': continue if 'object' in t.dt or 'identificador' in t.dt and not found_by_name: sn_dt = '' for sn in tp.sinapses: for s1 in sn.nr.dt: print 'Object.fnd:', s1, '->>', t.dt sn_dt += (s1 + ' ') rts = layer.s_get_ontology_ponderate(sn_dt, [], [], usr) for [ratting, la] in rts: obj_foco.append(la) break if len(rts) == 0: if umisc.trim(sn_dt) != '$$id$$': lay = mdNeural.mdLayer() lay.name = sn_dt #=== obj_foco.append(lay) #=== if len(obj_foco) == 0: # nao descreve um layer, é o proprio layer obj_foco.append(layer) prop_l = True i_need = [] i_sugest = [] i_guess = [] acts = [] event = [] for t in layer.topicos: reins_tps.append(t) start_data = None end_data = None for t in reins_tps: topico_rsf = t rel1 = False ref_opcode = t.dt[0] dt1 = '' dt2 = '' #============================================== if ref_opcode in ['interact.state.moment.startday']: for s in t.sinapses: for ds in s.nr.dt: d1 = ds if ds != '': d1 += '-' break #============================================== if ref_opcode in ['interact.state.moment.endday']: for s in t.sinapses: for ds in s.nr.dt: d2 = ds if ds != '': d2 += '-' break #============================================== if ref_opcode in ['interact.state.moment.month']: for s in t.sinapses: for ds in s.nr.dt: d1 += str(translate_month(ds)) d2 += str(translate_month(ds)) if ds != '': d2 += '-' d1 += '-' break #============================================== if ref_opcode in ['interact.state.moment.year']: for s in t.sinapses: for ds in s.nr.dt: d1 += translate_year(ds) d2 += translate_year(ds) #=========== start_data = d1 end_data = d2 #=========== break #============================================== if ref_opcode in ['interact.state.moment' ] or ref_opcode in ['interact.state.moment.mult']: event.append(t) #============================================== if ref_opcode in ['need'] or ref_opcode in [ 'interact.need' ] or ref_opcode in ['interact.info.value' ] or ref_opcode in ['interact.want']: i_need.append(t) #============================================= if ref_opcode in ['interact.can'] or ref_opcode in [ 'interaction.comunic.ask.can' ] or ref_opcode in ['interact.find' ] or ref_opcode in ['interesting-ask']: i_sugest.append(t) if ref_opcode in ['interact.guess']: i_guess.append(t) if ref_opcode in ['interact.get.action']: acts.append(t) #============================================== #=========================================================================== # need #=========================================================================== cl_fnd = False cn_cl = [] ls_link = [] print 'Process need:', i_need if not cl_fnd: # criar um objeto __abstract__ e implementar com o class recolhido dtnm = '' ah = False ah2 = False to_class_impl = [] for c_i in i_need: for sn in c_i.sinapses: to_class_impl.append(sn.nr) if True: print 'Parse need layers:', to_class_impl if len(to_class_impl) > 0: laycl = mdNeural.mdLayer() laycl.name = '[__need__]:' + str(global_uuid) #====================================================== for cl_impl_nr in to_class_impl: tps = laycl.set_topico('defs') tps.uuid = global_uuid laycl.set_nr_ch_a2(tps, cl_impl_nr, 'Composicao') for ev in event: laycl.set_topico_nr(ev) #====================================================== for a in acts: c = laycl.set_topico_nr(a) c.uuid = global_uuid #====================================================== if start_data != None and end_data != None: c = laycl.set_topico('interact.state.moment.start-data') c.uuid = global_uuid laycl.set_nr_ch(c, start_data, 'defs') #============ c = laycl.set_topico('interact.state.moment.end-data') c.uuid = global_uuid laycl.set_nr_ch(c, end_data, 'defs') #======================================================= ls_link.append(laycl) print '[global_uuid]:', global_uuid for la in obj_foco: for laycl in ls_link: laycl.name = la.name + '[__need__]:' + str(global_uuid) print 'Process i_need link :', laycl.name la.set_link_ds(laycl, 'need', '', '') #=========================================================================== # guess #=========================================================================== ls_link = [] cl_fnd = False cn_cl = [] ls_link = [] print 'Process guess:', i_guess if not cl_fnd: # criar um objeto __abstract__ e implementar com o class recolhido dtnm = '' ah = False ah2 = False to_class_impl = [] for c_i in i_guess: for sn in c_i.sinapses: to_class_impl.append(sn.nr) if True: print 'Parse guess layers:', to_class_impl if len(to_class_impl) > 0: laycl = mdNeural.mdLayer() laycl.name = '[__guess__]:' + str(global_uuid) #====================================================== for cl_impl_nr in to_class_impl: tps = laycl.set_topico('defs') tps.uuid = global_uuid laycl.set_nr_ch_a2(tps, cl_impl_nr, 'Composicao') #====================================================== for a in acts: r1 = laycl.set_topico_nr(a) r1.uuid = global_uuid #====================================================== ls_link.append(laycl) for la in obj_foco: for laycl in ls_link: laycl.name = la.name + '[__guess__]:' + str(global_uuid) print 'Process i_guess(' + la.name + ') link :', laycl.name la.set_link_ds(laycl, 'guess', '', '') #=========================================================================== # sugest #=========================================================================== cl_fnd = False cn_cl = [] ls_link = [] print 'Process sugest:', i_sugest if not cl_fnd: # criar um objeto __abstract__ e implementar com o class recolhido dtnm = '' ah = False ah2 = False to_class_impl = [] for c_i in i_sugest: for sn in c_i.sinapses: to_class_impl.append(sn.nr) if True: print 'Parse sugest layers:', to_class_impl if len(to_class_impl) > 0: laycl = mdNeural.mdLayer() laycl.name = '[__sugest__]:' + str(global_uuid) #====================================================== for cl_impl_nr in to_class_impl: tps = laycl.set_topico('defs') tps.uuid = global_uuid laycl.set_nr_ch_a2(tps, cl_impl_nr, 'Composicao') #====================================================== for a in acts: c = laycl.set_topico_nr(a) c.uuid = global_uuid #====================================================== ls_link.append(laycl) for la in obj_foco: for laycl in ls_link: laycl.name = la.name + '[__sugest__]:' + str(global_uuid) print 'Process i_sugest(' + la.name + ') link :', laycl.name la.set_link_ds(laycl, 'sugest', '', '') #=========================================================================== for lr_p in obj_foco: if umisc.trim(lr_p.name) != '': print 'Prepare post object:', lr_p.name az = mdTb.Zeus_Mode mdTb.Zeus_Mode = False lr_p.s_post_object_by_data_es(lr_p, usr) mdTb.Zeus_Mode = az return obj_foco
def runc_layer(layer, usr): return ref_opcode = '' ''' if True: print 'RTS(12):------------' r=layer for s in r.topicos: print 'Topico:',s.dt print 'SNS:++++++++++++++++++' for s1 in s.sinapses: print s1.nr.dt print '++++++++++++++++++' print 'RTS(12)(END):------------' ''' obj_foco = [] i_quality = [] prop_l = False #print 'N tops:' , len(layer.topicos) topics = [] for t in layer.topicos: if 'tstatic' in t.dt: # se tstatic, avalia apenas os topicos abaixo desse for s1 in t.sinapses: topics.append(s1.nr) else: topics.append(t) for t in topics: tp = t nr_t = t.dt[0] if nr_t == '': continue if 'object' in t.dt or 'identificador' in t.dt: sn_dt = '' for sn in tp.sinapses: for s1 in sn.nr.dt: print 'Object.fnd:', s1, '->>', t.dt sn_dt += (s1 + ' ') rts = layer.s_get_ontology_ponderate(sn_dt, [], [], usr) for [ratting, la] in rts: obj_foco.append(la) break if len(rts) == 0: if umisc.trim(sn_dt) != '$$id$$': lay = mdNeural.mdLayer() lay.name = sn_dt #=== obj_foco.append(lay) #=== if len(obj_foco) == 0: # nao descreve um layer, é o proprio layer obj_foco.append(layer) prop_l = True ''' ''' i_ev = [] i_ev_data = [] i_state = [] i_data_f = [] i_data_interv = [] i_ev_tps = [] dt_ini = None i_month = None i_year = None i_day = None for t in layer.topicos: topico_rsf = t rel1 = False ref_opcode = t.dt[0] if 'object' in t.dt or 'identificador' in t.dt: continue #============================================== if 'quality' in t.dt: i_quality.append(t) if 'event' in t.dt: i_ev.append(t) elif 'event-data' in t.dt: i_ev_data.append(t) elif 'event-data' in t.dt: i_ev_data.append(t) elif 'event-data-month' in t.dt: i_month = (t) #========================= elif 'event-data-year' in t.dt: i_year = (t) i_data_f.append([i_day, i_month, i_year]) i_day = None i_month = None i_year = None #========================= elif 'event-data-day' in t.dt: i_day = (t) #========================= elif 'state' in t.dt: i_state.append(t) elif 'event-data-ini' in t.dt: dt_ini = t elif 'event-data-end' in t.dt: if dt_ini != none: i_data_interv.append([dt_ini, t]) dt_ini = None else: i_ev_tps.append(t) #============================================== #=========================================================================== # mount event #=========================================================================== cl_fnd = False cn_cl = [] ls_link = [] # == for c_i in i_ev: # == if prop_l: layer.topicos.remove(c_i) #==== dt = c_i.dt # por classe e por caracts obs = layer.s_get_ontology_s(dt[0], [], [], usr) if len(obs) > 0: cl_fnd = True for i in obs: ls_link.append(o) else: cn_cl.append(dt[0]) if not cl_fnd: for dt in cn_cl: # procurar as informacoes nas caracts do obj obs = layer.s_get_ontology_s(dt, cn_cl, [], usr) if len(obs) > 0: cl_fnd = True for i in obs: ls_link.append(o) if not cl_fnd: # criar um objeto __abstract__ e implementar com o class recolhido laycl = None if len(i_data_interv): for [ini, fim] in i_data_f: c_ini = '' c_fim = '' if ini != None: for s in ini.sinapses: for d in s.nr.dt: if umisc.trim(c_ini) != '': c_ini += (' ') c_ini += (d) #== if c_fim == None: continue for s in fim.sinapses: for d in s.nr.dt: if umisc.trim(c_fim) != '': c_fim += (' ') c_fim += (d) laycl = mdNeural.mdLayer() laycl.name = c_ini + '|' + c_fim ls_link.append(laycl) if len(i_data_f): for [day, month, year] in i_data_f: c_day = '' c_month = '' c_year = '' if day != None: for s in day.sinapses: for d in s.nr.dt: if umisc.trim(c_day) != '': c_day += (' ') c_day += (d) #== if month != None: for s in month.sinapses: for d in s.nr.dt: if umisc.trim(c_month) != '': c_month += (' ') c_month += (d) #== if year == None: continue for s in year.sinapses: for d in s.nr.dt: if umisc.trim(c_year) != '': c_year += (' ') c_year += (d) laycl = mdNeural.mdLayer() laycl.name = c_day + '|' + c_month + '|' + c_year ls_link.append(laycl) elif len(i_ev): ev = '' for y in i_ev: for s in y.sinapses: for d in s.nr.dt: if umisc.trim(ev) != '': ev += (' ') ev += (d) laycl = mdNeural.mdLayer() laycl.name = ev ls_link.append(laycl) elif len(i_state): ev = '' for y in i_state: for s in y.sinapses: for d in s.nr.dt: if umisc.trim(ev) != '': ev += (' ') ev += (d) laycl = mdNeural.mdLayer() laycl.name = ev ls_link.append(laycl) elif len(i_ev_data): ev = '' for y in i_state: for s in y.sinapses: for d in s.nr.dt: if umisc.trim(ev) != '': ev += (' ') ev += (d) laycl = mdNeural.mdLayer() laycl.name = ev ls_link.append(laycl) if laycl != None: for c_i in i_ev_tps: laycl.set_topico_nr(c_i) for la in obj_foco: for laycl in ls_link: if len(i_quality) == 0: la.set_link(laycl, 'history') else: for q in i_quality: la.set_link(laycl, q.dt[0]) for lr_p in obj_foco: if umisc.trim(lr_p.name) != '': print 'Prepare post object:', lr_p.name lr_p.s_post_object_by_data_es(lr_p, usr) return obj_foco
def runc_layer(layer, ex_Data): objects = [] ref_opcode = '' # Relaction-oper-opcode opcode original dos qualificadores se nao especificado #===================================================== def get_o_ob(): for s in objects: if s.name == ob: return s return None #===================================================== class_fnd = False if True: print 'RTS(1-S):------------', '[', ex_Data, ']' r = layer for s in r.topicos: if 'class' in s.dt or 'classe' in s.dt or 'classificacao' in s.dt or 'defs' in s.dt: class_fnd = True if 'object' in s.dt: for ds in s.sinapses: if 'class' in ds.nr.dt or 'classe' in ds.nr.dt or 'classificacao' in ds.nr.dt or 'defs' in ds.nr.dt: class_fnd = True print 'Topico:', s.dt print 'SNS:++++++++++++++++++' for s1 in s.sinapses: print s1.nr.dt print '++++++++++++++++++' print 'RTS(1-S)(END):------------' #=============================== print 'class_fnd:', class_fnd if not class_fnd: return obj_foco = [] ex_Data2 = [] #print 'N tops:' , len(layer.topicos) found_obj_1 = False first_t = True for t in layer.topicos: tp = t nr_t = t.dt[0] if nr_t == '': continue #====================================== if (not first_t) and 'indicator' in t.dt: if len(obj_foco) > 0: o = obj_foco[len(obj_foco) - 1] t.dt = ['pacient'] o.set_topico_nr(t) #====================================== indicador em inicio de sentenca detem o significado de agente e object if first_t and 'indicator' in t.dt: sn_dt = '' lst = [] t.dt = ['agent'] for sn in tp.sinapses: ah = False for a in sn.nr.dt: if 'way' in a: ah = True continue if 'simple-collect' in a: ah = True continue if ah: continue if 'intensity' in sn.nr.dt: lst.append(sn.nr) continue for s1 in sn.nr.dt: if umisc.trim(s1) in [ ',', ';', '.', '?', '!', 'with', 'e', 'E', 'dos', 'das', 'do', 'da' ]: s1 = '' sn_dt += (s1 + ' ') if umisc.trim(sn_dt) != '$$id$$': lay = mdNeural.mdLayer() if umisc.trim(sn_dt) in [ ',', ';', '.', '?', '!', 'with', 'e', 'E', 'dos', 'das', 'do', 'da' ]: sn_dt = '' sn_dt = sn_dt.replace(',', '') sn_dt = sn_dt.replace('.', '') sn_dt = sn_dt.replace(';', '') sn_dt = sn_dt.replace('!', '') sn_dt = sn_dt.replace('?', '') sn_dt = sn_dt.replace('"', '') if umisc.trim(sn_dt).lower() == 'e': sn_dt = '' lay.name = umisc.trim(sn_dt) if umisc.trim(sn_dt) == '': if len(ex_Data) > 0: print 'ex_Data(1)-->', ex_Data for s in ex_Data: if umisc.trim(s.name) != '': lay.name = s.name break for topico_rsfd in lst: lay.set_topico_nr(topico_rsfd) lay.set_topico_nr(t) #==== objects.append(lay) obj_foco.append(lay) ex_Data.append(lay) found_obj_1 = True #=================================================== if 'object' in t.dt: sn_dt = '' lst = [] not_in = [ 'alias', 'object3', 'class', 'indicator', 'defin', 'simple-collect', 'sample', 'equilibrio', 'know', 'mean', 'qtd-', 'time', 'indicative', 'interact', 'state', 'accept', 'way', 'need', 'intensity', 'grow', 'order', 'simple-', 'value', 'enum', 'event', 'news', 'referencial', 'compare', 'number' ] for sn in tp.sinapses: ah = False for a in sn.nr.dt: for cin in not_in: if cin in a.lower( ) and a.lower() != 'evento' and a.lower() != 'eventos': ah = True continue if ah: lst.append(sn.nr) continue if 'place' in sn.nr.dt: lst.append(sn.nr) sn_dt = '' for sn2 in sn.nr.sinapses: for a in sn2.nr.dt: sn_dt += (' ' + a) if umisc.trim(sn_dt) != '$$id$$' and len(ex_Data2) == 0: lay = mdNeural.mdLayer() new_k = sn_dt lay.name = umisc.trim(new_k) if umisc.trim(sn_dt) == '': if len(ex_Data) > 0: print 'ex_Data(1.2)-->', ex_Data for s in ex_Data: if umisc.trim(s.name) != '': lay.name = s.name break #==== objects.append(lay) # lay.name = sep_multiple_data_unkwon(lay.name) # obj_foco.append(lay) ex_Data.append(lay) ex_Data2.append(lay) # else: lay = ex_Data2[0] lay.name += (sn_dt) #============================================== for topico_rsfd in lst: lay.set_topico_nr(topico_rsfd) topico_rsfd = [] continue if ('simple-collect-place2' in sn.nr.dt) or ('place-this' in sn.nr.dt) or ('place' in sn.nr.dt): lst.append(sn.nr) continue if ('intensity' in sn.nr.dt) or ('person' in sn.nr.dt) or ('event' in sn.nr.dt): lst.append(sn.nr) continue for s1 in sn.nr.dt: if umisc.trim(s1) in [',', '"']: s1 = '' sn_dt += (s1 + ' ') if umisc.trim(sn_dt) != '$$id$$': lay = mdNeural.mdLayer() new_k = sn_dt lay.name = umisc.trim(new_k) if umisc.trim(sn_dt) == '': if len(ex_Data) > 0: print 'ex_Data(1)-->', ex_Data for s in ex_Data: if umisc.trim(s.name) != '': lay.name = s.name break for topico_rsfd in lst: lay.set_topico_nr(topico_rsfd) #==== #print 'lay-name:',lay.name objects.append(lay) # verificar se tem mais de 3 words no nome do objeto, restringir ate achar ['o','a'] lay.name = sep_multiple_data_unkwon(lay.name) obj_foco.append(lay) ex_Data.append(lay) found_obj_1 = True first_t = False if len(obj_foco) > 0 and (not found_obj_1): print 'ex_Data-->', obj_foco lay = obj_foco[len(obj_foco) - 1] for topico_rsfd in lst: lay.set_topico_nr(topico_rsfd) elif len(ex_Data) > 0 and (not found_obj_1): print 'ex_Data-->', ex_Data rel1 = False # verificar se tem os campos mandatorios( caracts necessarias ) mdn = False for t in layer.topicos: topico_rsf = t rel1 = False #======================================================= ref_opcode = t.dt[0] #============================================== if ref_opcode not in ['', None, 'object']: for ob1 in obj_foco: ob1.set_topico_nr(topico_rsf) #============================================= if not mdn and layer in obj_foco: obj_foco.remove(layer) ''' print 'DUMP:------------------!!' for ok in obj_foco: for te in ok.topicos: print 'TP:-------------' print te.dt print 'SNS:','-----------' for sg in te.sinapses: print sg.nr.dt print 'TP(END):-------------' print '-----------------------!!' ''' for o_f in obj_foco: #print 'o_f.name:',o_f.name,len(o_f.name) if o_f.name == 'undef' or umisc.trim(o_f.name) == '': obj_foco.remove(o_f) return obj_foco
#EXEC
def __init__(self): inpt_word = mdNeural.mdLayer() # interpretacao de palavras #== [self.nodesER, self.all_purpouses] = [[], []]
def process_termo(termo,usr,purp,start_c,path_j): pages=[] ths2=[] if start_c == 0: start_c=1 all_pgs=get_pages_len(usr) ib=0 lb=all_pgs print 'Process pages:',all_pgs while ib < all_pgs: if ib < 100: print 'Start collection at:',start_c pages=get_pages(usr,start_c) start_c+=100 else: #start_job_sec(ib+1,path_j) ths2.append(thread_cntl()) thread.start_new_thread(start_job_sec,(ib+1,path_j,ths2[len(ths2)-1]) ) ib+=100 else: print 'Start collection on:',start_c #======================================================================================================================================== #======================================================================================================================================== ind_col=0 while True and len(ths2) > 0: print 'wait for secondary process...',len(ths2)-ind_col fnds_t=False ind_col=0 for ths1 in ths2: if not ths1.finished:fnds_t=True if ths1.finished: ind_col+=1 if fnds_t: time.sleep(10) continue else: break #======================================================================================================================================== #======================================================================================================================================== #===================================== [onto_basis,allp]=mount_node(termo,usr,purp) #======================================== cind2=0 threads_fd=[] result_onto_tree_er=[] # caracteristicas,descricoes,etc... result_onto_tree_bpm=[] # fluxo de acoes, sequencia de actions result_linked=[] #print 'Init Process tree for pages:',len(pages) cnt_process=0 all_p=[] addresses=[] for pagina in pages: cind2+=1 if pagina.dt1 == None: continue endereco=pagina.dt1 lines_doc=pagina.dt2 def process_page(all_ps,id,purpose,pgs,finish,th): ln_o='' conn= MySQLdb.connect(host='dbmy0023.whservidor.com', user='******' , passwd='acc159753', db='mindnet') for lines_doc2_ in all_ps: #try: endereco_url=lines_doc2_[0] lines_doc2=lines_doc2_[1] l2=Identify.prepare_layout(conn,id,purpose) if True : for s in lines_doc2: ln_o=s addresses.append(endereco_url) if umisc.trim(ln_o) == '': finish.finished=True return ir=Identify.process_data(conn,l2,ln_o,onto_basis,purpose,id,th) if ir[0] != None : # procura identificador --- fnd_ident=False for es in ir[0].topicos: if ir[0].es_compare_dt(es,'identificador'): fnd_ident=True if not fnd_ident: ind=len(result_onto_tree_er)-1 fond_cs=False while ind >=0 and not fond_cs: for es2 in result_onto_tree_er[ind].topicos: if ir[0].es_compare_dt(es2,'identificador'): ir[0].set_topico_nr(es2) fond_cs=True break ind-=1 # verificar se nao tem somente identificadores(elemento fact invalido) oth=False for es in ir[0].topicos: if ir[0].es_compare_dt(es,'identificador'): pass else: oth=True if not oth: continue result_onto_tree_er.append(ir[0]) # procurar group ind=len(result_onto_tree_er)-1 while ind >=0 : if ir[0] != result_onto_tree_er[ind]: for es2 in result_onto_tree_er[ind].topicos: if ir[0].es_compare_dt(es2,'identificador'): for top in ir[0].topicos: if ir[0].compare_dt_depend(conn,usr,purpose,es2,top,['']): # encontrou referencias do mesmo identificador, incluir nos objetos linkados rt=None fnd_new=False for k1 in result_linked: for k2 in k1: if k1 == ir[0]: fnd_new=True rt=k1 #= if not fnd_new: result_linked.append([ir[0]]) rt= result_linked[len(result_linked)-1 ] #======================================= fnd_new=False for k2 in rt: if k2 == result_onto_tree_er[ind]: fnd_new=True if not fnd_new: rt.append(result_onto_tree_er[ind]) ind-=1 #========================== if ir[1] != None: result_onto_tree_bpm.append(ir[1]) #except Exception ,err: # print 'Except:',err # finish.finished=True finish.finished=True conn.close() #print 'Thread ',pgs,' was finished.','Len:',len(ln_o),' process:',start_c/10 if cnt_process >= 10: #print 'Start thread job :',cind2,' process:', threads_fd.append(thread_cntl()) #process_page(all_p,usr,purp,cind2,threads_fd[len(threads_fd)-1],cind2) thread.start_new_thread(process_page, (all_p,usr,purp,cind2,threads_fd[len(threads_fd)-1],cind2) ) all_p=[] cnt_process=0 continue else: all_p.append([endereco,lines_doc]) cnt_process+=1 #-- ind_fs=0 while True: if (len(threads_fd)-ind_fs) == 0: break #print 'Processing...',len(threads_fd)-ind_fs,',ind_fs:',ind_fs ind_fs=0 time.sleep(10) conti=False for d in threads_fd: if not d.finished: conti=True if d.finished: ind_fs+=1 if conti: continue # processar informacoes ( encontrar os ractionlines linkdas por 'CALL' ) rts=[] ind=0 def get_result_links(lr): for s in result_linked: for s2 in s: if s2 == lr: return s return [] conn= MySQLdb.connect(host='dbmy0023.whservidor.com', user='******' , passwd='acc159753', db='mindnet') for data_c in result_onto_tree_er: runc=result_onto_tree_bpm[ind] # rct original, ractionline q processou as informacoes #sd_link=result_linked[ind] sd_link=get_result_links(data_c) runc.process_Call_RCT(conn,data_c,usr,sd_link) rts.append(data_c) ind+=1 #======================================================================================================================================== print 'ER:',len(result_onto_tree_er) for e in result_onto_tree_er: print '----------------------------------------' for t in e.topicos: print t.dt print '**************************' for s in t.sinapses: print s.nr.dt print '**************************' print '----------------------------------------' #break # retorno: ''' ->gravar na ontologia semantic_objects2, os qualificadores de definicao,composicao( achar objectos com destinacao(info-composicao), que contem as informacoes de propriedades ) ->gravar na ontologia semantic_objects2, os qualificadores de estado( achar objectos com destinacao(info-state), que contem as informacoes de estado->links para cada momento das propriedades ) ->gravar na ontologiao links_objetcts os qualificadores definidores de relacao , link ,conexao, etc.. ( achar objectos com destinacao(info-relaction), que contem as informacoes de relacoes ) ->os qualificadores de filtro,foco informados na sentenca do clipping seram utilizados para o retorno no clipping ''' layers_processed=[] for e in result_onto_tree_er: #================ topicos_composicao=[] # ->semantic_objects2 topicos_relaction=[] # ->semantic_relactions2 topicos_infostate=[] # ->semantic_infostate objs_prs=get_objs_purp(e.name,'COMPOSICAO',usr,conn) tops_cmp_prs=get_objectdt_by(objs_prs,usr,conn) objs_prs=get_objs_purp(e.name,'INFO-STATE',usr,conn) tops_infos_prs=get_objectdt_by(objs_prs,usr,conn) objs_prs=get_objs_purp(e.name,'RELACTION',usr,conn) tops_rel_prs=get_objectdt_by(objs_prs,usr,conn) objets_Real_post=[] # REAL-ONTOLOGY-POST=> qualificadores que indicam se deve ou nao fazer parte da ontologia primaria(ontologia principal de conhecimento )->instrucao privilegiada #================ layer_post=mdNeural.mdLayer() layers_processed.append(layer_post) foco_h=[] #-> usado para focar os elementos pertinentes Я observacao para o historico foco_h2=[] #======================== for t in e.topicos: for d in t.dt: if d.upper() == 'FOCO': for s in t.sinases: foco_h2.append(s) for d2 in s.nr.dt: foco_h.append(d2) #======================== for t in e.topicos: for d in t.dt: if d.upper() == 'REAL-ONTOLOGY-POST': for s in t.sinases: for d2 in s.nr.dt: #[objecto] objets_Real_post.append([d2]) #================================== fnd=False for d in t.dt: if not fnd: if d.upper() == 'IDENTIFICADOR': for s in t.sinases: if not fnd: for d2 in s.nr.dt: obj_nm=d2 layer_post.name=obj_nm break #================================== fnd=False for d in t.dt: if not fnd: for d2 in tops_cmp_prs: if d.upper() == d2.upper(): fnd=True break if fnd: topicos_composicao.append(t) #================================== fnd=False for d in t.dt: if not fnd: for d2 in tops_rel_prs: if d.upper() == d2.upper(): fnd=True break if fnd: topicos_relaction.append(t) #================================== fnd=False for d in t.dt: if not fnd: for d2 in tops_infos_prs: if d.upper() == d2.upper(): fnd=True break if fnd: topicos_infostate.append(t) #= for t_com in topicos_composicao: for s in t_com.sinapses: layer_post.set_topico_nr(t_com.nr) #===================== # links=[] for lnk_s in topicos_relaction: dt=[] for s in t_com.sinapses: for d in nr.dt: o= get_object_by_data(dt,usr,conn) if o != None: layer_post.set_link(o,s.opcode) # opcode=> link, motivo da conexao if len(objets_Real_post)==0: post_object_by_data2(layer_post,usr,conn,termo,foco_h2) else:# se estiver na lista objets_Real_post, postar como ontologia principal post_object_by_data(layer_post,usr,conn,termo,foco_h2) # historico for t_hist in topicos_infostate: # historico, identificador do referencial state_type=t_hist.dt composicao=[] rels=[] #========= if len(foco_h)==0: for t_com in topicos_composicao: composicao.append(t_com) else: for t_com in topicos_composicao: fnd=False for f in foco_h: #composicao, elementos considerados if not fnd: for s in t_com.sinapses: if f.upper() in s.dt : composicao.append(t_com) fnd=True break else: break #========= lnks if len(foco_h)==0: for t_com in topicos_relaction: rels.append(t_com) else: for t_com in topicos_relaction: fnd=False for f in foco_h: #rels, links considerados if not fnd: for s in t_com.sinapses: if f.upper() in s.dt : rels.append(t_com) fnd=True break else: break post_datah_state(state_type,layer_post.name,composicao,rels,usr,conn) #================================== # filtro,foco para indicar o que extrair dos objetos acima postados #================================== s_foco=[] s_restricao=[] s_filtro=[] s_m=[] # msg ou clipping for lay in onto_basis.nodesER: nodes2=lay.get_links('FACT') for lay in nodes2: lay=lay.lr for top in lay.topicos: for dts in top.dt: if dts.upper() == "FILTRO": for s in top.sinapses: s_filtro.append(s.nr) if dts.upper() == "FOCO": for s in top.sinapses: s_foco.append(s.nr) if dts.upper() == "RESTRICAO": for s in top.sinapses: s_restricao.append(s.nr) if dts.upper() == "PURPOSE-DST": for s in top.sinapses: for ds1 in s.nr.dt: s_m.append(ds1) #================================== ''' 1 -setar foco nos objectos 2 -caracteristicas + links 3 -layout-out com interesses 4 -layout-code para extrair informacao 5 -inserir no clipping return ''' for lr in layers_processed: lr_name=lr.name #foco consid=[] #============================================ for f in s_foco: for nr2 in lr.topicos: if lr.compare_dt_depend(conn,usr,purp,f,nr2,[]): consid.append(nr2) #============================================ for f in s_filtro: for nr2 in lr.topicos: if lr.compare_dt_depend(conn,usr,purp,f,nr2,[]): consid.append(nr2) #============================================ for f in s_restricao: for nr2 in lr.topicos: if lr.compare_dt_depend(conn,usr,purp,f,nr2,[]): consid.append(nr2) #============================================ for c in consid: lr.put_publish(c) #============================================ #======================================================================================================================================== #======================================================================================================================================== data_return=[] # char layoutcode ind_c=0 for data_c in result_onto_tree_er: #========== cmps_nr=layers_processed[ind_c].get_all() ind_c+=1 #========== runc=result_onto_tree_bpm[ind] # rct original, ractionline q processou as informacoes addr_url=addresses[ind] layout_codes=runc.get_links('LAYOUT-CODE') nms=[] for d in result_onto_tree_bpm: nms.append(d.lr.name) rts2=prepare_search_customlayouts(nms,cmps_nr,usr,conn) #purposes,dts,usr data_return.append(rts2) #======================================================================================================================================== # #==================================== all_txt='' for dt in data_return: # post no clipping all_txt+=(' '+dt) all_txt=umisc.trim(all_txt) if all_txt != "": #== classificacao/categoria #class=get_class(purpose) - > retorna o label do purpose class_c = get_class(purp,usr,conn) #== post no clipping if len(s_m) <=0 : cursor.execute (" insert into clipping_return(username,retorno,categoria,href) values ( %s , %s , %s , %s )", (usr, all_txt,class_c,'')) else: has_clipp=False has_msg=False for d in s_m: # verificar onde inserir if d.upper()=="CLIPPING": if not has_clipp: has_clipp=True cursor.execute (" insert into clipping_return(username,retorno,categoria,href) values ( %s , %s , %s , %s )", (usr, all_txt,class_c,'')) elif d.upper()=="FEED": if not has_msg: has_msg=True cursor.execute (" insert into INBOX_MSG (username,MSG,SOURCE,REFERENCIAS) values ( %s , %s , %s , %s )", (usr, all_txt,class_c,addr_url)) #=========================================================================================================================================== conn.close()