Exemplo n.º 1
0
    def find_and_process_new(self, f):
        dr = self.db.query("""
         UPDATE %s SET state = 'running'
         WHERE state = 'new' AND id = (SELECT id FROM %s WHERE state = 'new' ORDER BY id ASC LIMIT 1)
         RETURNING id, state, iql;""" % (self.TBL_NAME, self.TBL_NAME))
        dr = dr.dictresult()

        if len(dr) == 0 or len(dr) > 1:
            return False  # Nothing to do

        item = dr[0]

        result = {}

        try:
            result = f(item['iql'])
        except Exception as error:
            print(error)
            self.db.query("""
      UPDATE %s SET state = 'failed', result = '%s'::JSON
      WHERE id = '%s';""" % (self.TBL_NAME,
                             escape_string(json.dumps(result, sort_keys=True)),
                             escape_string(item['id'])))
            return None

        self.db.query(
            """
    UPDATE %s SET state = 'done', result = '%s'::JSON
    WHERE id = '%s';""" %
            (self.TBL_NAME, escape_string(json.dumps(
                result, sort_keys=True)), escape_string(item['id'])))

        return True
Exemplo n.º 2
0
 def fetch_colors(self, database, qfigure, qtype, qid):
     label_color_map = {}
     q = "SELECT label, color FROM colors WHERE type = %d AND id = '%s' AND figure LIKE '%s';" % (
         qtype, pg.escape_string(qid), pg.escape_string(qfigure))
     r = database.query(q).getresult()
     for record in r:
         label_color_map[record[0]] = record[1]
     return label_color_map
Exemplo n.º 3
0
 def find_svg(self, database, qfigure, qtype, qid):
     q = "SELECT DISTINCT figure FROM colors WHERE type = %d AND id = '%s' AND figure LIKE '%s';" % (
         qtype, pg.escape_string(qid), pg.escape_string(qfigure))
     r = database.query(q).getresult()
     if len(r) == 0:
         raise Exception, "No figures matched query"
     elif len(r) > 1:
         raise Exception, "More than one figure matches query"
     return r[0][0]
Exemplo n.º 4
0
 def batchInsert(self,table,dataList):
     self.Err = ''
     if not commonutil.isList(dataList):
         self.Err='the format of data is incorrect'
         return False
     sqlList = []
     for dataDict in dataList:
         sql='insert into '+table+' '
         keys=''
         vals=''
         for i in dataDict:
             keys+='"'+str(i)+'",'
             v=str(dataDict[i])
             v=v.strip()
             if v=='' or v.lower()=='null':
                 vals+="null,"
             else:
                 vals+="'"+pg.escape_string(str(dataDict[i]))+"',"
         keys=keys.strip(',')
         vals=vals.strip(',')
         sql=sql+"("+keys+") values("+vals+")"
         sqlList.append(sql)
     try:
         for eachSql in sqlList:
             self.Link.query(eachSql)
     except Exception, e:
         self.Err=e.args[0]
         return False
Exemplo n.º 5
0
    def insert(self,table,data):
        self.Err = ''
        if not commonutil.isDict(data):
            self.Err='the format of data is incorrect'
            return False
        sql='insert into '+table+' '
        keys=''
        vals=''
        for i in data:
            keys+='"'+str(i)+'",'
            v=str(data[i])
            v=v.strip()
            if v=='' or v.lower()=='null':
                vals+="null,"
            else:
                vals+="'"+pg.escape_string(str(data[i]))+"',"
        keys=keys.strip(',')
        vals=vals.strip(',')
        sql=sql+"("+keys+") values("+vals+")"

        try:
            self.Link.query(sql)
        except Exception, e:
            self.Err=e.args[0]
            return False
    def test_pygresql_escape_string(self):
        '''Test pygresql (escape strings)'''

        self.user = testlib.TestUser()
        self.testuser = self.user.login
        self.testdb = "pygresql_db"
        self._create_user(self.testuser, self.user.password)
        self._create_db(self.testdb, self.testuser)

        import pg
        self.pgcnx = pg.connect(dbname=self.testdb,
                                host='127.0.0.1',
                                user=self.testuser,
                                passwd=self.user.password)

        search = "''"
        warning = 'Could not find "%s"\n' % search
        self.assertTrue(pg.escape_string("'") == search, warning)

        # fix for CVE-2009-2940 added this
        search = "''"
        warning = 'Could not find "%s"\n' % search
        try:
            self.assertTrue(self.pgcnx.escape_string("'") == search, warning)
        except AttributeError:
            warning = 'CVE-2009-2940: Could not find required pyobj.escape_string()'
            self.assertTrue(False, warning)
def nyt_parse_descriptors(infile):
    try:
        f = codecs.open(infile, 'r', 'utf-8')
    except:
        print 'Cannot open the file for reading. Either the file is not UTF-8, or check the permissions.'
        return
        
    # total_recs come from
    # wc -l /scratch/tsagias/acl2010-tracking/descriptors.out
    total_recs = 858689
    for i, line in enumerate(f):
        out = 'Processing %d / %d\r' % (i, total_recs)
        print out # + '\b' * len(out)
        
        """ Skip lines beginning with #, it's a comment"""
        if line[0] == '#': continue
        
        """ Split the line at <TAB>"""
        descriptor, descriptor_type = line.split("\t")
        
        """ Insert the line to the groups_type table """
        db.query('''
        INSERT INTO groups
        (group_name, group_type_id)
        VALUES
        ('%s', %d)
        ''' % (pg.escape_string(descriptor.encode('utf-8')), int(descriptor_type)))
        
        
    f.close()
Exemplo n.º 8
0
def escapeString(s):
	if type(s) in [str,unicode]:
		try:
			return pg.escape_string(s)
		except:
			#log.warning("There is no escape_string in PyGreSQL. Please update backend.")
			return s
Exemplo n.º 9
0
 def GetJobstderrcontents(self,UserName,JobID,LocalJobID):
     
     JobName=self.Options['Torque:jobprefix']+UserName[:4]+'_'+str(LocalJobID)
     
     path = os.path.join(self.Options['WorkFlowSettings:WorkingDir'], UserName, str(JobID),'log')
     old_dir = os.getcwd()
     os.chdir(path)
     stderrstring=''
     listoffiles=glob.glob(JobName+".e*")
     logging.info("Searching for File:"+JobName+".e*")
     counter=10
     while (len(listoffiles)==0 and counter>0):
         counter=counter-1
         logging.info('No stderr file found... Will retry for more '+str(counter)+' times')
         time.sleep(1)
         listoffiles=glob.glob(JobName+".e*")
         
     if len(listoffiles)>0:        
         stderrfile=listoffiles[0]                   
         logging.info('Job Finished With Error, reading error information from:'+stderrfile)
         LogFile = open(stderrfile, "r")
         stderrstring=pg.escape_string(LogFile.read())
     else:
         stderrstring='Cannot file the stderr file:'+JobName
     os.chdir(old_dir)     
     return stderrstring
Exemplo n.º 10
0
 def fetch_svg(self, database, qfigure):
     q = "SELECT svg FROM figures WHERE figure = '%s';" % pg.escape_string(
         qfigure)
     r = database.query(q).getresult()
     if len(r) == 0:
         raise Exception, "Error: No figure called %s has been returned" % qfigure
     self.__svg = r[0][0]
     return
Exemplo n.º 11
0
 def SetJobFinishedWithError(self,JobID,Comment,ExecTime):
     Comment=pg.escape_string(Comment)
     if len(Comment)>500:
         Comment=Comment[:500]
     logging.info('Job ('+str(JobID)+') Finished With Error .... '+Comment)
     Updatest="UPDATE Jobs set JobStatus="+str(EnumerationLookup.JobState.Error)+",completedate=insertdate+INTERVAL '"+str(ExecTime)+" seconds',jobstatuscomment='"+Comment+"' where JobID="+str(JobID)+";"
     Updatest=Updatest+"INSERT INTO JobHistory(JobID,NewStatus,Comments) VALUES("+str(JobID)+","+str(EnumerationLookup.JobState.Error)+",'Error');"
     return self.ExecuteNoQuerySQLStatment(Updatest)
Exemplo n.º 12
0
def main():
    #Note directory structure: ./gtfs/[agency] contains gtfs files, ./schema contains sql to create schema and build indexes
    try:
      db = sys.argv[1]
      hst = sys.argv[2]
      usr = sys.argv[3]
      agency = sys.argv[4]
      schema = sys.argv[5]
    except IndexError:
      print "Usage: python load-gtfs.py db hst usr agency schema"
      sys.exit(1)
    con = pg.connect(dbname=db,host=hst,user=usr)
    
    #set up GTFS schema
    try:
      con.query("DROP SCHEMA %s cascade;" % schema)
    except pg.ProgrammingError:
      pass
    con.query("CREATE SCHEMA %s;" % schema)
    os.system('cat ./schema/gtfs_schema.create.sql | psql -U %s -d %s -h %s' % (usr,db,hst))

    TABLES = ['agency', 'calendar', 'calendar_dates', 'fare_attributes','fare_rules','frequencies', 'routes', 'shapes','stop_times','stops','trips']
    #TABLES = ['agency','calendar','calendar_dates']
    for table in TABLES:
        try:
          f = open('gtfs/%s/%s.txt' % (agency,table), 'r')
          print 'processing %s' % table
          reader = csv.reader(f)
          columns = reader.next()
          #print ','.join(columns)
          for row in reader:
              insert_row = []
              for value in row:
                  if value == '':
                      insert_row.append('NULL')
                  elif not is_numeric(value):
                      insert_row.append("'" + pg.escape_string(value) + "'")
                  else:
                      insert_row.append(value)
              #while threading.activeCount() > 10:
              #    pass
              #thread = queryThread(con,table,','.join(columns),','.join(insert_row),agency)
              #thread.start() 
              insert_into_table(con,table,','.join(columns),','.join(insert_row),agency)
        except IOError:
          print 'NOTICE: %s.txt not provided in feed.' % table
    # create new columns, indexes and constraints
    extra_sql = []
    extra_sql.append('update shapes set the_geom = st_setsrid(st_point(shape_pt_lon,shape_pt_lat),4326);')
    extra_sql.append('update stops set the_geom = st_setsrid(st_point(stop_lon,stop_lat),4326);')
    for sql in extra_sql:
      try:
        con.query(sql)
      except pg.ProgrammingError:
        pass
    os.system('cat ./schema/gtfs_schema.index.sql | psql -U %s -d %s -h %s' % (usr,db,hst))
Exemplo n.º 13
0
 def _load(self):
     if self._stations != "":
         stationcond = " AND UPPER(station) = UPPER('%s') "  % pg.escape_string(self._stations)
     else:
         stationcond = ""
     
     """ SQL Query: fetch the raw counts """
     """
     if self._min_interval > 2:
         time = "time"
     else:
         time = "date_trunc('hour',time)"
     """    
     sql = """SELECT 
                 time, 
                 SUM(teff) AS teff, 
                 -- SUM(eca) AS eca,
                 SUM( eca * (%.7f + (1-%.7f) * (sin(radians(alt))^%.7f) / sin(radians(alt))) ) AS eca,
                 SUM(met) AS met,
                 COUNT(*) AS stations
              FROM metrecflux
              WHERE 
                  time >= '%s'::timestamp 
                  AND time <= '%s'::timestamp
                  AND shower = '%s'
                  AND eca IS NOT NULL
                  AND alt >= %.7f
                  AND eca > 0.50
                  %s
              GROUP BY time 
              ORDER BY time""" % (self._delta, self._delta, self._gamma, \
                                  pg.escape_string(str(self._begin)), \
                                  pg.escape_string(str(self._end)), \
                                  pg.escape_string(self._shower), \
                                  self._min_alt, \
                                  stationcond)
     
     result = vmo.sql(sql)
     if result != None:
         self._data = result
     else:
         self._data = []
Exemplo n.º 14
0
def put_pdf_files_in(conn):
        print "------------------------"
        print "-- Putting pdf files in "
        print "------------------------"
        # INPUT: file and its name
        # data in columns: name of a file AND file
        f = get_pdf_files(conn)
        conn.execute(
            "INSERT INTO files (file, file_name) VALUES (E'%s', '%s')" %
            (pg.escape_bytea( f.read() ), pg.escape_string( f.name ))
        )
Exemplo n.º 15
0
def api_aquery():

    iql = request.args.get('q')

    if iql == None or iql == '':
        return json400({"error": "Empty query!"})

    try:
        iql = json.loads(iql)
        iqls = json.dumps(iql, sort_keys=True)
    except:
        return json400({"error": "Not valid JSON!"})

    try:
        iqlc.convert(iql, get_iql_config())
    except ValueError as error:
        return json400({"iql": iql, "error": str(error)})

    query_hash = sha1_hash(iqls)

    sql = "SELECT * FROM query_queue WHERE id = '%s';" % (
        escape_string(query_hash))

    try:
        dr = get_db().query(sql).dictresult()
    except error:
        return json500({"error": "Internal Server Error"})

    if len(dr) > 0:
        first = dr[0]
        return json200({"query_id": first["id"]})

    sql = "INSERT INTO query_queue(id, iql, result, state) VALUES('%s', '%s'::JSON, NULL, 'new');" % (
        escape_string(query_hash), escape_string(iqls))

    try:
        get_db().query(sql)
    except:
        return json500({"error": "Internal Server Error"})

    return json200({"query_id": query_hash})
Exemplo n.º 16
0
def api_result():
    query_id = request.args.get('id')

    sql = "SELECT * FROM query_queue WHERE id = '%s';" % (
        escape_string(query_id))

    try:
        dr = get_db().query(sql).dictresult()
    except:
        return json500({"error": "Internal Server Error"})

    if len(dr) <= 0:
        return json404({"error": "Not found!"})

    return json200(dr[0])
Exemplo n.º 17
0
 def getObserverTable(self, format="html"):
     if not hasattr(self, '_stationdata'):
         if self._fluxdata._stations != "":
             stationcond = " AND upper(station) = upper('%s') "  % pg.escape_string(self._fluxdata._stations)
         else:
             stationcond = ""
         
         sql = """SELECT a.station, a.observer, a.country, a.teff, a.eca, a.met, b.spo
                     FROM (
                     SELECT 
                         UPPER(station) AS station, 
                         MAX(meta.observer_firstname || ' ' || meta.observer_lastname) AS observer,
                         MAX(meta.site_country) AS country,
                         SUM(teff) AS teff, 
                         SUM(eca) AS eca, 
                         SUM(met) AS met 
                      FROM metrecflux AS x
                      LEFT JOIN metrecflux_meta AS meta ON x.filename = meta.filename
                      WHERE time BETWEEN '%s' AND '%s' 
                      AND shower = '%s'
                  AND eca IS NOT NULL
                  AND eca > 0.00
                      %s
                      GROUP BY UPPER(station)
                      ORDER BY UPPER(station) ) AS a
                      
                     LEFT JOIN (
                         SELECT 
                             UPPER(station) AS station, SUM(met) AS spo
                         FROM 
                             metrecflux
                         WHERE 
                             time BETWEEN '%s' AND '%s' 
                             AND shower= 'SPO'
                         AND eca IS NOT NULL
                     AND eca > 0.00
                         GROUP BY UPPER(station)
                     ) AS b ON a.station = b.station        
                      """ % (pg.escape_string(str(self._begin)), pg.escape_string(str(self._end)), \
                             pg.escape_string(self._shower), stationcond, \
                             pg.escape_string(str(self._begin)), pg.escape_string(str(self._end)))
         self._stationdata = vmo.sql(sql)
     
         if self._stationdata == None:
             return ""
 
     html = u"<table>\n"
     html += u"\t<thead><th style='text-align:left;'>Station<br/> </th><th style='text-align:left;'>Observer<br/> </th><th style='text-align:left;'>Country<br/> </th>"
     html += u"<th>Teff<br/>[h]</th><th>ECA<br/>[10<sup>3</sup>&#183;km<sup>2</sup>&#183;h]</th><th>n%s<br/> </th><th>nSPO<br/> </th></thead>\n" % self._shower
     
     for row in self._stationdata:
         html += u"\t<tr><td style='text-align:left;'>%s</td><td style='text-align:left;'>%s</td><td style='text-align:left;'>%s</td>" % (row['station'], row['observer'].decode("utf-8"), row['country'])
         html += u"<td>%.0f</td><td>%.0f</td><td>%d</td><td>%d</td></tr>\n" % (row['teff']/60.0, row['eca']/1000.0, row['met'], row['spo'])
         
     html += u"<table>\n"
     return html
Exemplo n.º 18
0
    def __init__(self, rest):
        HTMLPageCollide.__init__(self)

        if "query" in rest:
            self.head.title = "Search result"
            dictionary = pg.connect(host='localhost',
                                    user='******',
                                    dbname='dictionary')
            search = pg.escape_string(rest["query"])
            names = dictionary.query(
                "SELECT type, id, name FROM preferred WHERE name ILIKE '%s%%' AND type<>-11;"
                % search).getresult()
            if len(names):
                XH1(self.frame.content, "Result for '%s'" % (rest["query"]))
                table = XTable(self.frame.content)
                for type, id, name in names:
                    table.addrow(type, id, name)
            else:
                XH1(
                    self.frame.content, "Nothing found for '%s' (%s)" %
                    (rest["query"], rest["filter"]))

        else:
            self.head.title = "Search diseases and genes"

            form = XTag(self.frame.content, "form")
            form["action"] = "Search"
            form["method"] = "post"
            center = XTag(form, "center")
            p1 = XP(center)
            XH3(p1, "Search for diseases, genes and identifiers")
            XTag(
                p1, "input", {
                    "type": "text",
                    "name": "query",
                    "size": "100%",
                    "value": "ABCA17P"
                })
            submit = XTag(XP(p1), "input", {
                "type": "submit",
                "value": "submit"
            })
Exemplo n.º 19
0
def escape(var):
    return pg.escape_string(var)
Exemplo n.º 20
0
def nyt_parse_articles(infile):
    descriptor2article = collections.defaultdict(lambda: list())
    taxonomy2article = collections.defaultdict(lambda: list())
    
    try:
        f = codecs.open(infile, 'r', 'utf-8')
    except:
        print 'Cannot open the file for reading. Either the file is not UTF-8, or check the permissions.'
        return
        
    # total_recs come from
    # wc -l /scratch/tsagias/acl2010-tracking/articles2topics.out
    total_recs = 1859122
    for i, line in enumerate(f):
        out = 'Processing %d / %d\r' % (i, total_recs)
        print out + '\b' * len(out)
        
        """ Skip lines beginning with #, it's a comment"""
        if line[0] == '#': continue
        
        """ Split the line at <TAB>"""
        descriptors, taxonomy, article_id, article_pubdate = line.split("\t")
        
        """ DESCRIPTORS: split them in ;"""
        descriptors = descriptors.split(';')
        
        """ TAXONOMIES: split them in ;"""
        taxonomies = taxonomy.split(';')
        
        """ Assign articles to descriptors and taxonomies """
        for d in descriptors:
            descriptor2article[d.strip()].append(article_id)
            
        for t in taxonomies:
            taxonomy2article[t.strip()].append(article_id)
            
        # """ Add article to the database """
        # db.query('''
        # INSERT INTO articles
        # (article_id, pubdate)
        # VALUES
        # (%d, '%s')
        # ''' % (int(article_id), article_pubdate))
    f.close()
    
    """ Assign articles to descriptors """
    # print 'Sending article<->descriptors associations ..'
    # for k, v in descriptor2article.iteritems():
    #     descriptor_id = db.query('''
    #     SELECT group_id 
    #     FROM groups
    #     WHERE group_name = E'%s'
    #     ''' % pg.escape_string(k.strip().encode('utf-8')))
    #     
    #     if not descriptor_id.ntuples(): 
    #         print '** NOT FOUND Descriptor: %s' % k
    #         continue
    #     else:
    #         descriptor_id = descriptor_id.getresult()[0][0]
    #     
    #     for i in v:
    #         db.query('''
    #         INSERT INTO articles2groups
    #         (article_id, group_id)
    #         VALUES
    #         (%d, %d)
    #         ''' % (int(i), int(descriptor_id)))
    # print 'done'
    
    """ Assign articles to taxonomy """
    print 'Building taxonomy ..'
    for k, v in taxonomy2article.iteritems():
        print 'Processing taxonomy %s (%d articles)' % (k, len(v))
        """ Split the taxonomy by /"""
        t = k.split('/')
        
        parent_id = 'NULL'
        for i, rec in enumerate(t):
            sql_taxonomy_name = pg.escape_string(rec.strip().encode('utf-8'))
            
            # if not sql_taxonomy_name:
            #     print '*** Taxonomy name empty. Skipping ..'
            #     continue
                
            # check if we have the taxonomy already
            if parent_id == 'NULL':
                q = '''
                SELECT taxonomy_id
                FROM taxonomy
                WHERE lower(taxonomy_name) = E'%s'
                AND taxonomy_parent_id IS NULL
                ''' % (sql_taxonomy_name.lower())
            else:
                q = '''
                SELECT taxonomy_id
                FROM taxonomy
                WHERE lower(taxonomy_name) = E'%s'
                AND taxonomy_parent_id = %s
                ''' % (sql_taxonomy_name.lower(), str(parent_id))
                
            rtaxonomy_id = db.query(q)
            if rtaxonomy_id.ntuples() > 0:
                parent_id = rtaxonomy_id.getresult()[0][0]
                print '*** NOTICE: Taxonomy %s already inserted as %s' % (sql_taxonomy_name, parent_id)
                continue
            
            # if you don't, insert the new taxonomy
            db.query('''
            INSERT INTO taxonomy
            (taxonomy_name, taxonomy_parent_id)
            VALUES (E'%s', %s)
            ''' % (sql_taxonomy_name, str(parent_id)))
                
            # get taxonomy id
            parent_id = db.query('''
            SELECT currval('taxonomy_taxonomy_id_seq')
            ''').getresult()[0][0]
        
        for rec in set(v):
            db.query('''
            INSERT INTO articles2taxonomy
            (article_id, taxonomy_id)
            VALUES
            (%d, %s)
            ''' % (int(rec), int(parent_id)))
    print 'done'
Exemplo n.º 21
0
    def _closest_mb_artists(self,artist,mbart='',mbart_lc_list='',k=5) :
        """returns triples for k-best (score, artist, artist_mb_id) matches for artist artist """


        if artist=='' :
            return (list(),mbart,mbart_lc_list)

        vals_by_artist = dict()
        korig=k
        artist = artist.encode('utf-8')
        mbartists = self.dbmb.query("SELECT DISTINCT ON (artist.id) artist.name,artist.gid,artist.id FROM artist RIGHT JOIN album ON album.artist = artist.id WHERE artist.name='%s'" % pg.escape_string(artist)).getresult()
        if len(mbartists)>0 :
            idxs=range(len(mbartists))
            artist_terms=ones((len(mbartists)))
            k=len(mbartists)

        else : #try case-insensitve search
            mbartists = self.dbmb.query("SELECT DISTINCT ON (artist.id) artist.name,artist.gid,artist.id FROM artist RIGHT JOIN album on album.artist = artist.id WHERE artist.name ILIKE '%s'" % pg.escape_string(artist)).getresult()

            if len(mbartists)>0 :
                idxs=range(len(mbartists))
                artist_terms=ones((len(mbartists)))
                k=len(mbartists)


            else :  #go a hunting'
                if len(mbart)==0 :
                    #lower case artists names ordered by artist id
                    print 'Building mbart list in _closest_mb_artist'
                    mbart = self.dbmb.query("SELECT DISTINCT ON (artist.id) artist.name,artist.gid,artist.id FROM artist RIGHT JOIN album ON album.artist = artist.id ORDER BY artist.id").getresult()
                    #lower case artists names ordered by artist id
                    mbart_lc_list = map(string.lower,asarray(mbart)[:,0])
                mbartists = mbart
                (idxs,artist_terms) = self._get_close_matches(artist.lower(),mbart_lc_list,korig,.8)

        l = list()
        for i in range(len(idxs)) :
            (mbartist,mmaid,mbaid) = mbartists[idxs[i]]
            l.append((artist_terms[i],mbartist,mmaid,mbaid))



        return (l,mbart,mbart_lc_list) #return mbart because the query is expensive to build
Exemplo n.º 22
0
def s(s, comma = True):
    r = "'"+pg.escape_string(s)+"'"
    if comma:
        r += ", "
    return r
Exemplo n.º 23
0
a.sort()

cnt = 0

f = open("duplicates.txt", "w")
for i in a:
    if i is not None and len(i) > 0 and len(i.split(" ")) > 1:
        c=[]
        
        
        c.append(i)
        
        
            
        cur.execute("""select distinct name_orig, similarity(name_orig, %s) from dupl_table where name_orig %% %s order by similarity(name_orig, %s) desc""",  (escape_string(i), escape_string(i), escape_string(i)))
        if cur.rowcount > 1: 
        
            d=[]
            for j in cur.fetchall():
                #if i!=j[0] and j[1] > 0.5:
        
                d.append("%s, %f" %( j[0], j[1]))
                
                c.append(d)
            b.append(c)
            try:
                a.pop(a.index(i))
            except:
                pass
        
Exemplo n.º 24
0
def db_safestr(p):
	if not isinstance(p, basestring):
		s = str(p)
	else:
		s = p
	return str("'" + escape_string(s) + "'")
Exemplo n.º 25
0
ws_new['C1'] = 'text'
ws_new['D1'] = 'hashtag_id'
ws_new['E1'] = 'hashtag'
for row in range(0, wslen):
	current_hash_list = ''
	hashtag_id_list = ''
	temp=[]
	cell_text_read = "{col}{row}".format(col='B', row=(row+2))
	cell_text_write = "{col}{row}".format(col='C', row=(row+2))
	cell_hash_id_write = "{col}{row}".format(col='D', row=(row+2))
	cell_hash_write = "{col}{row}".format(col='E', row=(row+2))
	tags = str(ws[cell_text_read].value)
	cell_read = str(ws[cell_text_read].value)
	
	# text in wb_new schreiben
	text = pg.escape_string(cell_read)
	ws_new[cell_text_write] = text
	
	# zelleninhalt teilen und #finden, wie auch entfernen
	temp=[tag.strip("#") for tag in tags.split() if tag.startswith("#")]
	if (len(temp)>0):
		temp[0] = regex.sub('', temp[0])
		if (temp[0] == ''):
			pass
		elif (zaehlerID==0):
			temp[0] = regex.sub('', temp[0])
			hashtag_list = [temp[zaehlerID]]
			current_hash_list += str(temp[zaehlerID])
			hashtag_id_list += str(zaehlerID)
			zaehlerID+=1
		elif (zaehlerID>0):
Exemplo n.º 26
0
print cur.rowcount

cnt = 0

f = open("duplicates.txt", "w")
for i in a:
    if i is not None and len(i) > 0 and len(i.split(" ")) > 1:
        c=[]
        #print i
        #f.write("Строка поиска: %s\n" % i)
        c.append(i)
        
        #print '\n'
            
        cur.execute("""select distinct name_orig, similarity(name_orig, %s) as sim from dupl_table  where name_orig %% %s""",  (escape_string(i), escape_string(i)))#, escape_string(i)))
        #print 'выполняю запрос %s ' % i
        a.pop(a.index(i))
        if cur.rowcount > 1: 
            #print "Строка поиска: %s" % i
            d=[]
            qr=[ "\nЗапрос: %s" % i]
            for j in cur.fetchall():
                if i != j[0] and j[1] > 0.6:
                    qr.append(" Ответ: %s %s " % j)
                    #print " Ответ: %s %s " % j
                #print type(i)
                #print type(j[0])
                    try:
                        a.pop(a.index(j[0]))
                    except:
Exemplo n.º 27
0
def main():
    #Note directory structure: ./gtfs/[agency] contains gtfs files, ./schema contains sql to create schema and build indexes
    try:
        db = sys.argv[1]
        hst = sys.argv[2]
        usr = sys.argv[3]
        agency = sys.argv[4]
        schema = sys.argv[5]
    except IndexError:
        print "Usage: python load-gtfs.py db hst usr agency schema"
        sys.exit(1)
    con = pg.connect(dbname=db, host=hst, user=usr)

    #set up GTFS schema
    try:
        con.query("DROP SCHEMA %s cascade;" % schema)
    except pg.ProgrammingError:
        pass
    con.query("CREATE SCHEMA %s;" % schema)
    os.system('cat ./schema/gtfs_schema.create.sql | psql -U %s -d %s -h %s' %
              (usr, db, hst))

    TABLES = [
        'agency', 'calendar', 'calendar_dates', 'fare_attributes',
        'fare_rules', 'frequencies', 'routes', 'shapes', 'stop_times', 'stops',
        'trips'
    ]
    #TABLES = ['agency','calendar','calendar_dates']
    for table in TABLES:
        try:
            f = open('gtfs/%s/%s.txt' % (agency, table), 'r')
            print 'processing %s' % table
            reader = csv.reader(f)
            columns = reader.next()
            #print ','.join(columns)
            for row in reader:
                insert_row = []
                for value in row:
                    if value == '':
                        insert_row.append('NULL')
                    elif not is_numeric(value):
                        insert_row.append("'" + pg.escape_string(value) + "'")
                    else:
                        insert_row.append(value)
                #while threading.activeCount() > 10:
                #    pass
                #thread = queryThread(con,table,','.join(columns),','.join(insert_row),agency)
                #thread.start()
                insert_into_table(con, table, ','.join(columns),
                                  ','.join(insert_row), agency)
        except IOError:
            print 'NOTICE: %s.txt not provided in feed.' % table
    # create new columns, indexes and constraints
    extra_sql = []
    extra_sql.append(
        'update shapes set the_geom = st_setsrid(st_point(shape_pt_lon,shape_pt_lat),4326);'
    )
    extra_sql.append(
        'update stops set the_geom = st_setsrid(st_point(stop_lon,stop_lat),4326);'
    )
    for sql in extra_sql:
        try:
            con.query(sql)
        except pg.ProgrammingError:
            pass
    os.system('cat ./schema/gtfs_schema.index.sql | psql -U %s -d %s -h %s' %
              (usr, db, hst))
Exemplo n.º 28
0
def load_module_loadone(options, modpath, db, preparse=False):
    module = ""
    tables = []
    mtd_files = {}
    filetypes = [
        "xml", "ui", "qry", "kut", "qs", "mtd", "ts", "pgsql", "py", "qs.py"
    ]
    allowed_extensions = [
        "ar", "kut", "mod", "mtd", "qry", "qs", "ts", "ui", "xml", "xpm",
        "qs.py", "py"
    ]
    omitted_exts = []
    unicode_filetypes = ["ui", "ts"]

    files = []
    pd = shelve.open(
        "/tmp/pydba")  # open -- file may get suffix added by low-level lib

    for root, dirs, walk_files in os.walk(modpath):
        for name in walk_files:
            if not name.endswith(".ts") and f_ext(
                    name) not in allowed_extensions:
                if f_ext(name) not in omitted_exts:
                    omitted_exts.append(f_ext(name))
                    #print "Extension %r omitida: %r" % (f_ext(name) , name)

                continue
            fname = os.path.join(root, name)
            mtime = os.stat(fname)[ST_MTIME]
            loadFile = True
            if pd.has_key(fname):
                if pd[fname]["mtime"] == mtime:
                    loadFile = False

            if f_ext(name) == "mod":
                module = name[:-4]
                file_module = loadfile_inutf8(root, name)
                module_parse = XMLParser(name)
                module_parse.parseText(file_module)
                d_module = {
                    'name': str(module_parse.root.module.name),
                    'alias': str(module_parse.root.module.alias),
                    'area': str(module_parse.root.module.area),
                    'areaname': str(module_parse.root.module.areaname),
                    'version': str(module_parse.root.module.version),
                    'icon': str(module_parse.root.module.icon),
                }
                if loadFile:
                    d_module['icon_data'] = loadfile_inutf8(
                        root, d_module['icon'])

            contents = ""
            contents_1 = ""
            if name.endswith(".ts") or f_ext(name) in filetypes:
                contents_1 = loadfile_inutf8(root, name)
                contents = my_escape_string(contents_1)
                if not loadFile:
                    try:
                        if pd[fname]["sha"] is None:
                            loadFile = True  # Some bug in database can cause sha is None.
                    except:
                        pass

                if loadFile:
                    sha = SHA1(contents_1)
                    pd[fname] = {
                        "mtime": mtime,
                        "sha": sha,
                        'root': root,
                        'name': name
                    }
                options.sha_allowed_files |= set([pd[fname]["sha"]])
                options.filenames_allowed_files |= set([pd[fname]["name"]])

            if f_ext(
                    name
            ) == "qs" and loadFile == True and options.flscriptparser == True:
                flscriptparser(root, name)

            if f_ext(name) == "pgsql":
                array_name = name.split(".")
                # name.ext1.pgsql
                # array_name = [ "nombre" , "view" , "pgsql" ]
                fullfilename = os.path.join(root, name)
                if options.verbose: print "Cargando PGSQL: ", fullfilename
                ret = None
                if len(array_name) == 2:
                    ret = pydba_loadpgsql.loadpgsqlfile(
                        fullfilename=fullfilename,
                        options=options,
                        database=db,
                        pgname=array_name[0],
                        pgtype="sql1",
                        pgtext=contents_1)
                elif len(array_name) != 3:
                    print "ERROR: Al cargar un .pgsql Se esperaban 2 o 3 elementos al estilo nombre.view.pgsql o nombre.pgsql y se encontró %s " % name
                    continue
                else:
                    ret = pydba_loadpgsql.loadpgsqlfile(
                        fullfilename=fullfilename,
                        options=options,
                        database=db,
                        pgname=array_name[0],
                        pgtype=array_name[1],
                        pgtext=contents_1)

                if ret != True:
                    print "ERROR: Errores al cargar el fichero ", root, name

            if f_ext(name) == "mtd":
                table = name[:-4]
                # print "### Table: " + table
                tables += [table]
                mtd_files[table] = contents_1
                if preparse:
                    xml = XMLParser(name)
                    xml.parseText(contents_1)
                    if xml.root == None:
                        #print "ERROR: Failed to parse xml %s" %  (name)
                        xml = None
                    else:
                        import pydba_mtdparser

                        mtd = xml.root.tmd

                        mparser = pydba_mtdparser.MTDParser()
                        mparser.parse_mtd(mtd)
                        mtdquery = getattr(mtd, "query", None)
                        if not mtdquery:
                            pydba_mtdparser.Tables[table] = mparser

            if contents and f_ext(name) in filetypes:
                file = {}
                for key, val in pd[fname].iteritems():
                    file[key] = val
                file["contents"] = contents
                if (options.modules_loaded.has_key(name)):
                    print "ERROR: %s file was already loaded." % name
                    print "--> this file was found at %s" % root
                    print "--> previous file found at %s" % options.modules_loaded[
                        name]['root']
                    print "* skipping file"
                else:
                    options.modules_loaded[name] = file
                    files += [file]

    pd.close()
    try:
        os.chmod("/tmp/pydba",
                 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)
    except:
        pass

    qry_areas = db.query("SELECT descripcion, bloqueo, idarea"
                         " FROM flareas WHERE idarea='%s'" % d_module['area'])
    tareas = qry_areas.dictresult()
    if len(tareas) == 0:
        print "Creando Area %s - %s " % (d_module['area'],
                                         d_module['areaname'])
        db.query("INSERT INTO flareas (descripcion, bloqueo, idarea)"
                 "VALUES('%s','t','%s')" %
                 (d_module['areaname'], d_module['area']))

    habilitar_carga = False
    qry_modulo = db.query(
        "SELECT idmodulo, version, descripcion, bloqueo, idarea"
        " FROM flmodules WHERE idmodulo='%s'" % module)
    tmodulo = qry_modulo.dictresult()
    cargado = False
    for pmodulo in tmodulo:
        cargado = True
        # if pmodulo['bloqueo']=='t':    # TRUE Es que NO está bloqueado. Está al revés.s
        habilitar_carga = True
    if cargado and options.updatemodules:
        cargado = False
        idmodulo = pg.escape_string(d_module['name'])
        sql = ("DELETE FROM flmodules WHERE idmodulo = '%s'" % (idmodulo))
        db.query(sql)

    if not cargado:
        print "Se procede a crear el módulo nuevo %s" % module

        idmodulo = pg.escape_string(d_module['name'])
        idarea = pg.escape_string(d_module['area'])
        version = pg.escape_string(d_module['version'])
        bloqueo = "t"
        descripcion = pg.escape_string(d_module['alias'])
        icono = pg.escape_string(d_module['icon_data'])

        sql = (
            "INSERT INTO flmodules (idmodulo, idarea, version, bloqueo, descripcion,icono) "
            "VALUES('%s','%s','%s','%s','%s','%s')" %
            (idmodulo, idarea, version, bloqueo, descripcion, icono))
        db.query(sql)
        habilitar_carga = True

    if not habilitar_carga:
        print "Error when trying to update the module '%s': non-loaded or locked module" % module
        return 0

    qry_modulos = db.query(
        "SELECT nombre,sha FROM flfiles WHERE idmodulo='%s' " % (module))
    tuplas_modulos = qry_modulos.dictresult()
    dmodulos = {}
    for modulo in tuplas_modulos:
        dmodulos[modulo['nombre']] = modulo

    loaded = []

    for file in files:

        update = True
        if (dmodulos.has_key(file['name'])):
            dm = dmodulos[file['name']]
            if (dm['sha'] == file['sha']):
                update = False

        if options.loadbaselec:
            update = (file['name'] == 'baselec.mtd')

        if (update):
            loaded += [file['name']]
            if (options.verbose):
                print "* Loading file '%s' => '%s'..." % (file['name'],
                                                          file['sha'])
            sql = "DELETE FROM flfiles WHERE nombre='%s';\n" % file['name']
            db.query(sql)

            file['module'] = module

            sql = (
                "INSERT INTO flfiles (contenido, bloqueo, sha, idmodulo, nombre) "
                "VALUES(E'%(contents)s', 't', '%(sha)s','%(module)s', '%(name)s')"
                % file)
            try:
                db.query(sql)
            except Exception, e:
                print e.__class__.__name__, e
                print sql

                raise
Exemplo n.º 29
0
def repair_db(options,ddb=None,mode=0,odb=None):
    if (options.verbose):
        print "-> RepairDB"
    if (not ddb):
        if (not options.ddb):
            print "RepairDB requiere una base de datos de destino y no proporcionó ninguna."
            return 0
        ddb=dbconnect(options)
        if (not ddb): 
            return 0
    
    if (not odb):
        if (not options.odb):
            print "RepairDB requiere una base de datos de origen y no proporcionó ninguna."
            return 0
        odb=odbconnect(options)
        if (not odb): 
            return 0
        
    where=""
    if not options.full :
        #if len(options.files_loaded)>0:
        where+=" AND (nombre IN ('" + "','".join(options.files_loaded) + "' ) OR nombre LIKE '%.mtd')"
          
        if len(options.modules)>0:
            where+=" AND ( 0=1 "
            for modname,module in options.modules.iteritems():
                noloadtable=[]
                loadtable=[]
                for tablename, value in module.iteritems():
                    if value==False:
                        noloadtable.append(tablename + ".mtd")
                    else:
                        loadtable.append(tablename + ".mtd")
                
                try:
                    default=module['_default_']
                except:
                    default=False
                
                if default==True:
                    where+=" OR ( idmodulo = '%s' AND nombre NOT IN ('" % modname + "','".join(noloadtable) + "'))" 
                else:
                    where+=" OR ( idmodulo = '%s' AND nombre IN ('" % modname + "','".join(loadtable) + "'))" 
            where+=" OR nombre LIKE '%.mtd' )"
                       
        if options.odb!=options.ddb:
            where+=" AND nombre LIKE '%.mtd'"
        
    if (options.verbose):
        print "Inicializando reparación de la base de datos '%s'..." % options.ddb
        print " * Calcular firmas SHA1 de files y metadata"
    
    if options.debug:
        print "* Revisando si existe una tabla de servidores réplica ..."
    tabla = None
    qry1 =ddb.query("SELECT relname FROM pg_class WHERE relname ='servidoresreplica'");
    for row in qry1.dictresult():
        tabla = row['relname']
    
    if tabla:
        if options.debug:
            print "Encontrada una tabla %s." % tabla
        qry1 =ddb.query("SELECT codservidor, activo, numero  FROM %s" % tabla);
        servsize = len(qry1.dictresult())
        servnumber = -1
        servname = None
        for row in qry1.dictresult():
            if row['activo'] == 't':
                try:
                    assert(servnumber == -1)
                except AssertionError:
                    print "PANIC: Hay mas de un servidor activo! El valor anterior era %s el nuevo %s. (activo vale %s)" % (
                        repr(servnumber),
                        repr(row['numero']),
                        repr(row['activo'])
                        )
                    raise
                servnumber = row['numero']
                servname = row['codservidor']
        activar_servrep = True
        if servsize < 1 or servsize > 10:
            activar_servrep = False
            print "WARN: No se activa la sincronización de secuencia para replicación master-master por error en la cantidad de servidores (%d)" % servsize
            
        if servnumber < 0 or servnumber >= servsize:
            activar_servrep = False
            print "WARN: No se activa la sincronización de secuencia para replicación master-master por error el numero de este servidor (%d/%d)" % (servnumber,servsize)
        
        if activar_servrep:
            print "INFO: Activando sincro. de secuencias para %s: %d/%d" % (repr(servname), servnumber,servsize)
            options.seqsync = (servnumber,servsize)
        
    else:
        if options.debug:
            print "No se encontró tabla de servidores de replica"
            
        
    
    
    if options.transactions: 
        odb.query("BEGIN;");
        try:
            lltables = "flfiles,flserial,flmetadata".split(",")
            for ltable in lltables:
                sql = "LOCK %s NOWAIT;" % ltable
                if (options.verbose): print sql
                odb.query(sql);
                if (options.verbose): print "done."
        except:
            print "Error al bloquear la tabla %s , ¡algun otro usuario está conectado!" % ltable
            odb.query("ROLLBACK;");
            raise
            
    tables_notrebuilt = []
    pydba_loadpgsql.process_drop(options,ddb)
    

    qry_omodulos=odb.query("SELECT sha " +
            "FROM flfiles WHERE sha!='' AND nombre NOT LIKE '%%alteredtable%%.mtd' ORDER BY sha");
    ofiles=[]
    for row in qry_omodulos.dictresult():
        ofiles.append(row['sha'])
    
    qry_dmodulos=ddb.query("SELECT sha " +
            "FROM flfiles WHERE sha!='' AND nombre NOT LIKE '%%alteredtable%%.mtd' ORDER BY sha");
    dfiles=[]
    for row in qry_dmodulos.dictresult():
        if row['sha'] in ofiles:
            ofiles.remove(row['sha'])
        else:
            dfiles.append(row['sha'])
    
    # Eliminar los ficheros sobrantes.
    qry_dmodulos=ddb.query("DELETE FROM flfiles WHERE sha IN ('" + "','".join(dfiles) + "')")
    
    # Obtener los ficheros nuevos
    qry_omodulos=odb.query("SELECT * FROM flfiles WHERE sha IN ('" + "','".join(ofiles) + "')")
    
    # Insertarlos en la nueva DB.
    for row in qry_omodulos.dictresult():
        fields=row.keys()
        values=[]
        for field in fields:
            campo=row[field]
            if (campo is not None):#Si el valor es nulo
                values.append("(E'" + pg.escape_string(str(campo)) + "')")
            else:
                values.append("NULL")
        try:
            qry_dmodulos=ddb.query("DELETE FROM flfiles WHERE nombre ='" + row['nombre'] + "'")
            
            sql="INSERT INTO flfiles (" + ",".join(fields) + ") VALUES(" + ",".join(values) + ")"
            ddb.query(sql)
        except:
            print sql
            raise
        
        
    
    sqlModulos = ("SELECT idmodulo, nombre, contenido, sha " +
                    "FROM flfiles WHERE sha!='' AND nombre NOT LIKE '%%alteredtable%%.mtd' "
                    + where + " ORDER BY idmodulo, nombre")
    # print sqlModulos
    qry_modulos=ddb.query(sqlModulos);
                                
    modulos=qry_modulos.dictresult() 
    # print "%d resultados." % len(modulos)
    sql=""
    resha1="";
    xmlfiles=("xml","ui","qry","kut","mtd","ts")
    ficheros_actualizados=0
    for modulo in modulos:
        if options.loadbaselec:
            if modulo['nombre'] != 'baselec.mtd': 
                continue
        xml=None
        if options.full and modulo.has_key('contenido'):
            sha1=SHA1(modulo['contenido'])
        else:            
            sha1=modulo['sha']
        
        if (sha1==None):
            print "ERROR: Carácteres no ISO en %s.%s (se omite SHA1)" % (modulo['idmodulo'],modulo['nombre'])
            sha1=modulo['sha']
        
        if f_ext(modulo['nombre']) in xmlfiles:
            xml=XMLParser("%s.%s" % (modulo['idmodulo'],modulo['nombre']))
            xml.parseText(modulo['contenido'])
            if xml.root==None:
                xml=None
            
        
        resha1=SHA1(resha1+sha1)
        if (modulo['sha']!=sha1):
            ficheros_actualizados+=1
            print "Updating " + modulo['nombre'] + " => " + sha1 + " ..."
            sql+="UPDATE flfiles SET sha='%s' WHERE nombre='%s';\n" %    (sha1,modulo['nombre'])
        elif (options.debug):
            print modulo['nombre'] + " is ok."
        
        if (f_ext(modulo['nombre'])=="mtd"):
            tabla=modulo['nombre'][:-4]
            
            qry_modulos=ddb.query("SELECT xml FROM flmetadata WHERE tabla='%s'" % tabla);
            tablas=qry_modulos.dictresult() 
            TablaCargada=False
            sql_update_metadata = ""
            for txml in tablas:
                TablaCargada=True
                if txml['xml']!=sha1:
                    sql_update_metadata="UPDATE flmetadata SET xml='%s' WHERE tabla='%s';\n" % (sha1,tabla)
            if not TablaCargada:
                    print "Cargando tabla nueva %s ..." % tabla
                    sql_update_metadata=("INSERT INTO flmetadata (tabla,bloqueo,seq,xml)"
                        " VALUES('%s','f','0','%s');\n" % (tabla,sha1) )
            if xml:
                if sql_update_metadata or options.full or options.loadbaselec:    
                    updatedTableStructure = load_mtd(options,odb,ddb,tabla,xml)
                    if updatedTableStructure or not TablaCargada:
                        if options.verbose:
                            print "Actualizando metadata para %s" % tabla
                        if sql_update_metadata:
                            ddb.query(sql_update_metadata)
                    elif sql_update_metadata:
                        if options.rebuildtables or options.forgottables:
                            print "Actualizando metadatos para tabla %s" % tabla
                            ddb.query(sql_update_metadata)
                        else:
                            tables_notrebuilt.append(modulo['nombre'])
                
        if (len(sql)>1024):
            ddb.query(sql)
            sql=""
    
    if (len(sql)>0):    
        ddb.query(sql)
        sql=""
    
    qry_d_pkeys=ddb.query("SELECT table_name, column_name,constraint_name FROM information_schema.constraint_column_usage WHERE constraint_name LIKE '%_pkey_%';")
    for row in qry_d_pkeys.dictresult():
        qry_d_pkeys2=ddb.query("""
        SELECT table_name, column_name,constraint_name 
        FROM information_schema.constraint_column_usage 
        WHERE constraint_name = '%(table_name)s_pkey';
        """ % row)
        for row2 in qry_d_pkeys2.dictresult():
            sql = """
            ALTER TABLE %(table_name)s DROP CONSTRAINT %(constraint_name)s;
            """ % row2
            try: 
                ddb.query(sql)
                print "Borrado pkey de la tabla %(table_name)s" % row2
            except:
                print "Error en query corrigiendo pkey:", row2
                print traceback.format_exc()
                print "SQL:"
                print sql
        
        sql = """
        ALTER TABLE %(table_name)s DROP CONSTRAINT %(constraint_name)s;
        ALTER TABLE %(table_name)s ADD PRIMARY KEY (%(column_name)s);
        """ % row
        try: 
            ddb.query(sql)
            print "PK Regenerado: %(constraint_name)s" % row
            try:
                ddb.query("ALTER INDEX %(table_name)s_pkey SET (fillfactor = 80);" % row)
            except:
                pass
        except:
            print "Error en query corrigiendo pkey:", row
            print traceback.format_exc()
            print "SQL:"
            print sql

    # !---- Verificacion de modulos instalados
    qry_dmodulos=ddb.query("SELECT DISTINCT idmodulo FROM flfiles");
    modules_installed = set([])
    modules_to_uninstall = set([])
    for (idmodulo, ) in qry_dmodulos.getresult():
        modules_installed.add(idmodulo)
    
    if modules_installed:
        # !--- Examinar los modulos ptes de desinstalacion
        qry_dmodulos=ddb.query("SELECT idmodulo, descripcion FROM flmodules WHERE bloqueo = TRUE AND idmodulo NOT IN ('" + "', '".join(modules_installed) + "')");
        for idmodulo, descripcion in qry_dmodulos.getresult():
            modules_to_uninstall.add((idmodulo, descripcion))
    
    if modules_to_uninstall:
        if options.cleanfiles:
            print "INFO: Los siguientes modulos ya no se usan y serán desinstalados:"
            for idmodulo, nombre in modules_to_uninstall:
                print " - %s : %s " % (idmodulo, nombre)
                ddb.query("DELETE FROM flmodules WHERE idmodulo = '%s'" % idmodulo);
            print ":: Finalizado."
        else:
            print "Existen %d modulos pendientes de desinstalar:" % (len(modules_to_uninstall)),
            for idmodulo, nombre in modules_to_uninstall:
                print idmodulo, 
            print
            print "Use --cleanfiles para desinstalarlos"
    # !---- Verificacion de areas instaladas
    qry_dmodulos=ddb.query("SELECT DISTINCT idarea FROM flmodules");
    areas_installed = set([])
    areas_to_uninstall = set([])
    for (idarea, ) in qry_dmodulos.getresult():
        areas_installed.add(idarea)
    
    if areas_installed:
        # !--- Examinar las areas ptes de desinstalacion
        qry_dmodulos=ddb.query("SELECT idarea, descripcion FROM flareas WHERE bloqueo = TRUE AND idarea NOT IN ('" + "', '".join(areas_installed) + "')");
        for idarea, descripcion in qry_dmodulos.getresult():
            areas_to_uninstall.add((idarea, descripcion))
    
    if areas_to_uninstall:
        if options.cleanfiles:
            print "INFO: Las siguientes areas ya no se usan y serán desinstaladas:"
            for idarea, nombre in areas_to_uninstall:
                print " - %s : %s " % (idarea, nombre)
                ddb.query("DELETE FROM flareas WHERE idarea = '%s'" % idarea);
            print ":: Finalizado."
        else:
            print "Existen %d areas pendientes de desinstalar:" % (len(areas_to_uninstall)),
            for idarea, nombre in areas_to_uninstall:
                print idarea, 
            print
            print "Use --cleanfiles para desinstalarlos"
            
    qry_serial=ddb.query("SELECT sha FROM flserial");
    serials=qry_serial.dictresult() 
    for serial in serials:
        if (serial['sha']==resha1):
            resha1=False
                
    if resha1 and ficheros_actualizados>0:
        if len(serials)>0 :
            ddb.query("UPDATE flserial SET sha='%s';" % (resha1))
            print "Updated flserial => %s." % (resha1)     
        else:        
            ddb.query("INSERT INTO flserial (serie,sha) VALUES(1,'%s')" % (resha1))
            print "Created flserial => %s." % (resha1)     
    
    pydba_loadpgsql.process_create(options,ddb)
    if options.transactions: odb.query("COMMIT;");
    if tables_notrebuilt:
        print "Tables pending rebuild:", ", ".join(tables_notrebuilt)
Exemplo n.º 30
0
def escape(var):
   return pg.escape_string(var)
Exemplo n.º 31
0
counter=0
for entry in old_job_status.dictresult():
    #let get all the data we need 
    job_id = entry["job_id"]
    #job_outputs = db_old.query("select * from job_output where job_id = \'%s\'" % job_id)
    #output_files = db_old.query("select * from output_file where job_id = \'%s\'" % job_id)

    #we have to parse starttime and lastupdate
    import time, datetime
    start_time = datetime.datetime(*time.strptime(entry["start_time"], "%b %d, %Y %I:%M:%S %p")[0:6])
    last_update = datetime.datetime(*time.strptime(entry["last_update"], "%b %d, %Y %I:%M:%S %p")[0:6])

    #we have everything we need let's start to load the entry in the new DB
    #first with the job_info table
    #job_id code message base_url handle 
    result = db_new.query("insert into job_info values (\'"+job_id+"\', "+str(entry["code"])+", \'"+pg.escape_string(entry["message"])+"\', \'"+pg.escape_string(entry['base_url'])+"\', \'null\'," + \
        #start_time_date start_time_time activation_time_date activation_time_time completion_time_date completion_time_time 
        "\'"+str(start_time.date())+"\', \'"+str(start_time.time())+"\', NULL, NULL, NULL, NULL, " + \
        #last_update_date last_update_time client_dn client_ip service_name 
        "\'"+str(last_update.date())+"\', \'"+str(last_update.time())+"\', \'"+pg.escape_string(entry["client_dn"])+"\', \'"+entry["client_ip"]+"\', \'"+entry["service_name"]+"\') ")
    #if result != 1:
        #print "insert failed! Aborting"
        #sys.exit(-1)
    #second we load the job_outputs table 
    #if you are migrating you probably don't need to bring this in the new DB
    #for output in job_outputs.dictresult():
        #db_new.query("insert into job_output (id, job_id, std_out, std_err) values ("+str(startoutputID)+", \'"+job_id+"\', \'"+output['std_out']+\
        #    "\', \'"+output['std_err']+"\')")
        #startoutputID=startoutputID+1
    #for file in output_files.dictresult():
        #db_new.query("insert into output_file (id, url, name, job_id) values ("+str(startoutfilesID)+", \'"+file['url']+"\', \'"+file['name']+"\', \'"+job_id+"\')")
Exemplo n.º 32
0
 def e( self, s):
     return pg.escape_string( s)
Exemplo n.º 33
0
def repair_db(options,ddb=None,mode=0,odb=None):
    if (options.verbose):
        print "-> RepairDB"
    if (not ddb):
        if (not options.ddb):
            print "RepairDB requiere una base de datos de destino y no proporcionó ninguna."
            return 0
        ddb=dbconnect(options)
        if (not ddb): 
            return 0
    
    if (not odb):
        if (not options.odb):
            print "RepairDB requiere una base de datos de origen y no proporcionó ninguna."
            return 0
        odb=odbconnect(options)
        if (not odb): 
            return 0
        
    where=""
    if not options.full :
        #if len(options.files_loaded)>0:
        where+=" AND (nombre IN ('" + "','".join(options.files_loaded) + "' ) OR nombre LIKE '%.mtd')"
          
        if len(options.modules)>0:
            where+=" AND ( 0=1 "
            for modname,module in options.modules.iteritems():
                noloadtable=[]
                loadtable=[]
                for tablename, value in module.iteritems():
                    if value==False:
                        noloadtable.append(tablename + ".mtd")
                    else:
                        loadtable.append(tablename + ".mtd")
                
                try:
                    default=module['_default_']
                except:
                    default=False
                
                if default==True:
                    where+=" OR ( idmodulo = '%s' AND nombre NOT IN ('" % modname + "','".join(noloadtable) + "'))" 
                else:
                    where+=" OR ( idmodulo = '%s' AND nombre IN ('" % modname + "','".join(loadtable) + "'))" 
            where+=" OR nombre LIKE '%.mtd' )"
                       
        if options.odb!=options.ddb:
            where+=" AND nombre LIKE '%.mtd'"
        
    if (options.verbose):
        print "Inicializando reparación de la base de datos '%s'..." % options.ddb
        print " * Calcular firmas SHA1 de files y metadata"
    
    if options.debug:
        print "* Revisando si existe una tabla de servidores réplica ..."
    tabla = None
    qry1 =ddb.query("SELECT relname FROM pg_class WHERE relname ='servidoresreplica'");
    for row in qry1.dictresult():
        tabla = row['relname']
    
    if tabla:
        if options.debug:
            print "Encontrada una tabla %s." % tabla
        qry1 =ddb.query("SELECT codservidor, activo, numero  FROM %s" % tabla);
        servsize = len(qry1.dictresult())
        servnumber = -1
        servname = None
        for row in qry1.dictresult():
            if row['activo'] == 't':
                try:
                    assert(servnumber == -1)
                except AssertionError:
                    print "PANIC: Hay mas de un servidor activo! El valor anterior era %s el nuevo %s. (activo vale %s)" % (
                        repr(servnumber),
                        repr(row['numero']),
                        repr(row['activo'])
                        )
                    raise
                servnumber = row['numero']
                servname = row['codservidor']
        activar_servrep = True
        if servsize < 1 or servsize > 10:
            activar_servrep = False
            print "WARN: No se activa la sincronización de secuencia para replicación master-master por error en la cantidad de servidores (%d)" % servsize
            
        if servnumber < 0 or servnumber >= servsize:
            activar_servrep = False
            print "WARN: No se activa la sincronización de secuencia para replicación master-master por error el numero de este servidor (%d/%d)" % (servnumber,servsize)
        
        if activar_servrep:
            print "INFO: Activando sincro. de secuencias para %s: %d/%d" % (repr(servname), servnumber,servsize)
            options.seqsync = (servnumber,servsize)
        
    else:
        if options.debug:
            print "No se encontró tabla de servidores de replica"
            
        
    
    
    if options.transactions: 
        odb.query("BEGIN;");
        try:
            lltables = "flfiles,flserial,flmetadata".split(",")
            for ltable in lltables:
                sql = "LOCK %s NOWAIT;" % ltable
                if (options.verbose): print sql
                odb.query(sql);
                if (options.verbose): print "done."
        except:
            print "Error al bloquear la tabla %s , ¡algun otro usuario está conectado!" % ltable
            odb.query("ROLLBACK;");
            raise
            
    tables_notrebuilt = []
    pydba_loadpgsql.process_drop(options,ddb)
    

    qry_omodulos=odb.query("SELECT sha " +
            "FROM flfiles WHERE sha!='' AND nombre NOT LIKE '%%alteredtable%%.mtd' ORDER BY sha");
    ofiles=[]
    for row in qry_omodulos.dictresult():
        ofiles.append(row['sha'])
    
    qry_dmodulos=ddb.query("SELECT sha " +
            "FROM flfiles WHERE sha!='' AND nombre NOT LIKE '%%alteredtable%%.mtd' ORDER BY sha");
    dfiles=[]
    for row in qry_dmodulos.dictresult():
        if row['sha'] in ofiles:
            ofiles.remove(row['sha'])
        else:
            dfiles.append(row['sha'])
    
    # Eliminar los ficheros sobrantes.
    qry_dmodulos=ddb.query("DELETE FROM flfiles WHERE sha IN ('" + "','".join(dfiles) + "')")
    
    # Obtener los ficheros nuevos
    qry_omodulos=odb.query("SELECT * FROM flfiles WHERE sha IN ('" + "','".join(ofiles) + "')")
    
    # Insertarlos en la nueva DB.
    for row in qry_omodulos.dictresult():
        fields=row.keys()
        values=[]
        for field in fields:
            campo=row[field]
            if (campo is not None):#Si el valor es nulo
                values.append("(E'" + pg.escape_string(str(campo)) + "')")
            else:
                values.append("NULL")
        try:
            qry_dmodulos=ddb.query("DELETE FROM flfiles WHERE nombre ='" + row['nombre'] + "'")
            
            sql="INSERT INTO flfiles (" + ",".join(fields) + ") VALUES(" + ",".join(values) + ")"
            ddb.query(sql)
        except:
            print sql
            raise
        
        
    
    sqlModulos = ("SELECT idmodulo, nombre, contenido, sha " +
                    "FROM flfiles WHERE sha!='' AND nombre NOT LIKE '%%alteredtable%%.mtd' "
                    + where + " ORDER BY idmodulo, nombre")
    # print sqlModulos
    qry_modulos=ddb.query(sqlModulos);
                                
    modulos=qry_modulos.dictresult() 
    # print "%d resultados." % len(modulos)
    sql=""
    resha1="";
    xmlfiles=("xml","ui","qry","kut","mtd","ts")
    ficheros_actualizados=0
    for modulo in modulos:
        if options.loadbaselec:
            if modulo['nombre'] != 'baselec.mtd': 
                continue
        xml=None
        if options.full and modulo.has_key('contenido'):
            sha1=SHA1(modulo['contenido'])
        else:            
            sha1=modulo['sha']
        
        if (sha1==None):
            print "ERROR: Carácteres no ISO en %s.%s (se omite SHA1)" % (modulo['idmodulo'],modulo['nombre'])
            sha1=modulo['sha']
        
        if f_ext(modulo['nombre']) in xmlfiles:
            xml=XMLParser("%s.%s" % (modulo['idmodulo'],modulo['nombre']))
            xml.parseText(modulo['contenido'])
            if xml.root==None:
                xml=None
            
        
        resha1=SHA1(resha1+sha1)
        if (modulo['sha']!=sha1):
            ficheros_actualizados+=1
            print "Updating " + modulo['nombre'] + " => " + sha1 + " ..."
            sql+="UPDATE flfiles SET sha='%s' WHERE nombre='%s';\n" %    (sha1,modulo['nombre'])
        elif (options.debug):
            print modulo['nombre'] + " is ok."
        
        if (f_ext(modulo['nombre'])=="mtd"):
            tabla=modulo['nombre'][:-4]
            
            qry_modulos=ddb.query("SELECT xml FROM flmetadata WHERE tabla='%s'" % tabla);
            tablas=qry_modulos.dictresult() 
            TablaCargada=False
            sql_update_metadata = ""
            for txml in tablas:
                TablaCargada=True
                if txml['xml']!=sha1:
                    sql_update_metadata="UPDATE flmetadata SET xml='%s' WHERE tabla='%s';\n" % (sha1,tabla)
            if not TablaCargada:
                    print "Cargando tabla nueva %s ..." % tabla
                    sql_update_metadata=("INSERT INTO flmetadata (tabla,bloqueo,seq,xml)"
                        " VALUES('%s','f','0','%s');\n" % (tabla,sha1) )
            if xml:
                if sql_update_metadata or options.full or options.loadbaselec:    
                    updatedTableStructure = load_mtd(options,odb,ddb,tabla,xml)
                    if updatedTableStructure or not TablaCargada:
                        if options.verbose:
                            print "Actualizando metadata para %s" % tabla
                        if sql_update_metadata:
                            ddb.query(sql_update_metadata)
                    elif sql_update_metadata:
                        if options.rebuildtables or options.forgottables:
                            print "Actualizando metadatos para tabla %s" % tabla
                            ddb.query(sql_update_metadata)
                        else:
                            tables_notrebuilt.append(modulo['nombre'])
                
        if (len(sql)>1024):
            ddb.query(sql)
            sql=""
    
    if (len(sql)>0):    
        ddb.query(sql)
        sql=""
    
    qry_d_pkeys=ddb.query("SELECT table_name, column_name,constraint_name FROM information_schema.constraint_column_usage WHERE constraint_name LIKE '%_pkey_%';")
    for row in qry_d_pkeys.dictresult():
        qry_d_pkeys2=ddb.query("""
        SELECT table_name, column_name,constraint_name 
        FROM information_schema.constraint_column_usage 
        WHERE constraint_name = '%(table_name)s_pkey';
        """ % row)
        for row2 in qry_d_pkeys2.dictresult():
            sql = """
            ALTER TABLE %(table_name)s DROP CONSTRAINT %(constraint_name)s;
            """ % row2
            try: 
                ddb.query(sql)
                print "Borrado pkey de la tabla %(table_name)s" % row2
            except:
                print "Error en query corrigiendo pkey:", row2
                print traceback.format_exc()
                print "SQL:"
                print sql
        
        sql = """
        ALTER TABLE %(table_name)s DROP CONSTRAINT %(constraint_name)s;
        ALTER TABLE %(table_name)s ADD PRIMARY KEY (%(column_name)s);
        """ % row
        try: 
            ddb.query(sql)
            print "PK Regenerado: %(constraint_name)s" % row
            try:
                ddb.query("ALTER INDEX %(table_name)s_pkey SET (fillfactor = 80);" % row)
            except:
                pass
        except:
            print "Error en query corrigiendo pkey:", row
            print traceback.format_exc()
            print "SQL:"
            print sql

    # !---- Verificacion de modulos instalados
    qry_dmodulos=ddb.query("SELECT DISTINCT idmodulo FROM flfiles");
    modules_installed = set([])
    modules_to_uninstall = set([])
    for (idmodulo, ) in qry_dmodulos.getresult():
        modules_installed.add(idmodulo)
    
    if modules_installed:
        # !--- Examinar los modulos ptes de desinstalacion
        qry_dmodulos=ddb.query("SELECT idmodulo, descripcion FROM flmodules WHERE bloqueo = TRUE AND idmodulo NOT IN ('" + "', '".join(modules_installed) + "')");
        for idmodulo, descripcion in qry_dmodulos.getresult():
            modules_to_uninstall.add((idmodulo, descripcion))
    
    if modules_to_uninstall:
        if options.cleanfiles:
            print "INFO: Los siguientes modulos ya no se usan y serán desinstalados:"
            for idmodulo, nombre in modules_to_uninstall:
                print " - %s : %s " % (idmodulo, nombre)
                ddb.query("DELETE FROM flmodules WHERE idmodulo = '%s'" % idmodulo);
            print ":: Finalizado."
        else:
            print "Existen %d modulos pendientes de desinstalar:" % (len(modules_to_uninstall)),
            for idmodulo, nombre in modules_to_uninstall:
                print idmodulo, 
            print
            print "Use --cleanfiles para desinstalarlos"
    # !---- Verificacion de areas instaladas
    qry_dmodulos=ddb.query("SELECT DISTINCT idarea FROM flmodules");
    areas_installed = set([])
    areas_to_uninstall = set([])
    for (idarea, ) in qry_dmodulos.getresult():
        areas_installed.add(idarea)
    
    if areas_installed:
        # !--- Examinar las areas ptes de desinstalacion
        qry_dmodulos=ddb.query("SELECT idarea, descripcion FROM flareas WHERE bloqueo = TRUE AND idarea NOT IN ('" + "', '".join(areas_installed) + "')");
        for idarea, descripcion in qry_dmodulos.getresult():
            areas_to_uninstall.add((idarea, descripcion))
    
    if areas_to_uninstall:
        if options.cleanfiles:
            print "INFO: Las siguientes areas ya no se usan y serán desinstaladas:"
            for idarea, nombre in areas_to_uninstall:
                print " - %s : %s " % (idarea, nombre)
                ddb.query("DELETE FROM flareas WHERE idarea = '%s'" % idarea);
            print ":: Finalizado."
        else:
            print "Existen %d areas pendientes de desinstalar:" % (len(areas_to_uninstall)),
            for idarea, nombre in areas_to_uninstall:
                print idarea, 
            print
            print "Use --cleanfiles para desinstalarlos"
            
    qry_serial=ddb.query("SELECT sha FROM flserial");
    serials=qry_serial.dictresult() 
    for serial in serials:
        if (serial['sha']==resha1):
            resha1=False
                
    if resha1 and ficheros_actualizados>0:
        if len(serials)>0 :
            ddb.query("UPDATE flserial SET sha='%s';" % (resha1))
            print "Updated flserial => %s." % (resha1)     
        else:        
            ddb.query("INSERT INTO flserial (serie,sha) VALUES(1,'%s')" % (resha1))
            print "Created flserial => %s." % (resha1)     
    
    pydba_loadpgsql.process_create(options,ddb)
    if options.transactions: odb.query("COMMIT;");
    if tables_notrebuilt:
        print "Tables pending rebuild:", ", ".join(tables_notrebuilt)
Exemplo n.º 34
0
passwd = "andreygon"

#f = open("publ_loc.txt", "r")
conn = db.connect("dbname='%s' user='******' host='%s'  password='******'" % (dbname, user, host, passwd))
cur = conn.cursor()

cur.execute("select type from articles where type is not null")
a = cur.fetchall()
b = []
for i in a:
    if len(i[0]) > 0:
#        nm = i[0].split(",")
#        for j in nm:
#            k = j.split(", ")
#            for u in k:
#                if u.startswith(" "):
#                        u = u[1:]
#        
        b.append(i[0])#.replace(".", ""))

c = set(b)
c = list(c)
c.sort()


for i in c:#f.readlines():
    #f.write("%s\n" % escape_string(i))
    cur.execute("insert into type (publ_type) values ('%s')" % escape_string(i.replace("\n", "")))
    print i#.replace("\n", "")
    conn.commit()
Exemplo n.º 35
0
def load_module_loadone(options,modpath,db, preparse=False):
    module=""
    tables=[]
    mtd_files={}
    filetypes=["xml","ui","qry","kut","qs","mtd","ts", "pgsql"]
    unicode_filetypes=["ui","ts"]
    
    files=[]
    pd = shelve.open("/tmp/pydba") # open -- file may get suffix added by low-level lib
                          
    
    for root, dirs, walk_files in os.walk(modpath):
            for name in walk_files:
                fname = os.path.join(root, name)
                mtime = os.stat(fname)[ST_MTIME]
                loadFile = True
                if pd.has_key(fname):
                    if pd[fname]["mtime"]==mtime:
                        loadFile=False
                
                if f_ext(name)=="mod":
                    module=name[:-4]
                    file_module=loadfile_inutf8(root, name)
                    module_parse=XMLParser(name)
                    module_parse.parseText(file_module)
                    d_module={
                        'name' :        str(module_parse.root.module.name),
                        'alias' :     str(module_parse.root.module.alias),
                        'area' :        str(module_parse.root.module.area),
                        'areaname' :        str(module_parse.root.module.areaname),
                        'version' :        str(module_parse.root.module.version),
                        'icon' :        str(module_parse.root.module.icon),
                        }
                    if loadFile:
                        d_module['icon_data']=loadfile_inutf8(root, d_module['icon'])
                
                contents=""
                contents_1=""
                if f_ext(name) in filetypes:
                    contents_1=loadfile_inutf8(root,name)
                    contents=my_escape_string(contents_1)
                    if not loadFile:
                        try:
                            if pd[fname]["sha"] is None: loadFile = True # Some bug in database can cause sha is None.
                        except:
                            pass
                            
                    if loadFile:                    
                        sha=SHA1(contents_1)
                        pd[fname]={"mtime":mtime, "sha":sha, 'root' : root,'name' : name}
                    options.sha_allowed_files |= set([pd[fname]["sha"]])    
                    options.filenames_allowed_files |= set([pd[fname]["name"]])    
                    
                if f_ext(name)=="qs" and loadFile==True and options.flscriptparser==True:
                    flscriptparser(root,name)              
                    
                
                if f_ext(name)=="pgsql":
                    array_name=name.split(".")
                    # name.ext1.pgsql
                    # array_name = [ "nombre" , "view" , "pgsql" ]
                    fullfilename = os.path.join(root, name)
                    if options.verbose: print "Cargando PGSQL: ", fullfilename
                    ret = None
                    if len(array_name)==2:
                        ret = pydba_loadpgsql.loadpgsqlfile(fullfilename = fullfilename, options = options, database = db, pgname = array_name[0], pgtype = "sql1", pgtext = contents_1)
                    elif len(array_name)!=3:
                        print "ERROR: Al cargar un .pgsql Se esperaban 2 o 3 elementos al estilo nombre.view.pgsql o nombre.pgsql y se encontró %s " % name
                        continue
                    else:
                        ret = pydba_loadpgsql.loadpgsqlfile(fullfilename = fullfilename, options = options, database = db, pgname = array_name[0], pgtype = array_name[1], pgtext = contents_1)
                    
                    if ret != True:
                        print "ERROR: Errores al cargar el fichero ", root, name
                        
                        
                        
                        
                    
                
                    
                
                if f_ext(name)=="mtd":
                    table=name[:-4]
                    # print "### Table: " + table
                    tables+=[table]
                    mtd_files[table]=contents_1
                    if preparse:
                        xml=XMLParser(name)
                        xml.parseText(contents_1)
                        if xml.root==None:
                            #print "ERROR: Failed to parse xml %s" %  (name)
                            xml=None
                        else:
                            import pydba_mtdparser
                            
                            mtd=xml.root.tmd
                            
                            mparser=pydba_mtdparser.MTDParser()
                            mparser.parse_mtd(mtd)
                            mtdquery = getattr(mtd, "query", None)
                            if not mtdquery:
                                pydba_mtdparser.Tables[table]=mparser
                            
                                

                        
                    
                
                if contents and f_ext(name) in filetypes:
                    file={}
                    for key,val in pd[fname].iteritems():
                        file[key]=val
                    file["contents"]=contents
                    if (options.modules_loaded.has_key(name)):
                        print "ERROR: %s file was already loaded." % name
                        print "--> this file was found at %s" % root
                        print "--> previous file found at %s" % options.modules_loaded[name]['root']
                        print "* skipping file"
                    else:
                        options.modules_loaded[name]=file
                        files+=[file]
    
    pd.close()
    try:
        os.chmod("/tmp/pydba", S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)
    except:
        pass

    qry_areas=db.query("SELECT descripcion, bloqueo, idarea"
                                                " FROM flareas WHERE idarea='%s'" % d_module['area'])
    tareas=qry_areas.dictresult()                        
    if len(tareas)==0:
        print "Creando Area %s - %s " %(d_module['area'],d_module['areaname'])
        db.query("INSERT INTO flareas (descripcion, bloqueo, idarea)"
                "VALUES('%s','t','%s')" % (d_module['areaname'],d_module['area']))
                    
                                                    
    
    habilitar_carga=False
    qry_modulo=db.query("SELECT idmodulo, version, descripcion, bloqueo, idarea"
                            " FROM flmodules WHERE idmodulo='%s'" % module);
    tmodulo=qry_modulo.dictresult() 
    cargado=False
    for pmodulo in tmodulo:
        cargado=True
        # if pmodulo['bloqueo']=='t':    # TRUE Es que NO está bloqueado. Está al revés.s
        habilitar_carga=True
    if cargado and options.updatemodules:
        cargado = False
        idmodulo        = pg.escape_string(d_module['name']) 
        sql=("DELETE FROM flmodules WHERE idmodulo = '%s'"  % 
                            (idmodulo))
        db.query(sql)     
        
    if not cargado:
        print "Se procede a crear el módulo nuevo %s" % module
        
        idmodulo        = pg.escape_string(d_module['name']) 
        idarea            = pg.escape_string(d_module['area'])
        version         = pg.escape_string(d_module['version'])
        bloqueo         = "t"
        descripcion = pg.escape_string(d_module['alias'])
        icono             = pg.escape_string(d_module['icon_data'])
        
        sql=("INSERT INTO flmodules (idmodulo, idarea, version, bloqueo, descripcion,icono) "    
                    "VALUES('%s','%s','%s','%s','%s','%s')" % 
                            (idmodulo, idarea, version, bloqueo, descripcion,icono))
        db.query(sql)     
        habilitar_carga=True     

    if not habilitar_carga:
        print "Error when trying to update the module '%s': non-loaded or locked module" % module
        return 0
    
    qry_modulos=db.query("SELECT nombre,sha FROM flfiles WHERE idmodulo='%s' " % (module));
    tuplas_modulos=qry_modulos.dictresult() 
    dmodulos={}
    for modulo in tuplas_modulos:
        dmodulos[modulo['nombre']]=modulo;
        
    loaded=[]
    
    for file in files:
        
        update=True        
        if (dmodulos.has_key(file['name'])):
            dm=dmodulos[file['name']]
            if (dm['sha']==file['sha']):
                update=False
        
        if options.loadbaselec:
            update=(file['name']=='baselec.mtd')
              
        if (update):
            loaded+=[file['name']]
            if (options.verbose):
                print "* Loading file '%s' => '%s'..." % (file['name'],file['sha'])
            sql="DELETE FROM flfiles WHERE nombre='%s';\n" % file['name']
            db.query(sql)
        
            file['module']=module
            
            sql=("INSERT INTO flfiles (contenido, bloqueo, sha, idmodulo, nombre) "    
                    "VALUES(E'%(contents)s', 't', '%(sha)s','%(module)s', '%(name)s')" % file)
            try:
                db.query(sql)
            except Exception, e:
                print e.__class__.__name__, e
                print sql
                
                raise
Exemplo n.º 36
0
def db_safestr(p):
	if not isinstance(p, basestring):
		s = str(p)
	else:
		s = p
	return str("'" + escape_string(s) + "'")