Ejemplo n.º 1
0
def archiveFilesPath(bContinual):
  global ARCHIVE_TIME
  if bContinual:
    dirOrig = UPLOAD_CONTINUAL_WEB_DIR
    dirArchive = ARCHIVE_CONTINUAL_DIR
  else: 
    dirOrig = UPLOAD_WEB_DIR
    dirArchive = ARCHIVE_DIR

  now = time.time()
  ctr = 0
  for f in os.listdir(dirOrig):
    if not f.endswith(".zip"):
      #os.remove(dirOrig + fname)
      continue

    #if ctr > 5:
    #   print "That's enough..."
    #   break

    fname = os.path.join(dirOrig, f)
    dtime = 0
    (dbin, dtime) = getFanoutDirFromZip(f)
    #print str(dbin) + " - " + str(dtime)
    if long(dtime) > 0 and long(dtime) <= ARCHIVE_TIME:
         ctr = ctr + 1
         #just move file to archive dir but use fanout dir name
         fullarchivedir = makeFanoutDir(dirArchive, dbin)
         fullarchivepath = os.path.join(fullarchivedir, f)

         # old file to archive
         # look for zip file with this bin
         #fullzippath = dirArchive + dbin + ".zip"
         errLevel = 0
         try:  # catch zipfile exceptions if any - open for append, no compression (since already a zip), and allow > 2GB
           #myzip = zipfile.ZipFile(fullzippath, "a", zipfile.ZIP_STORED, True)
           # test for valid zip file
           #if myzip.testzip() != None:  # file corrupt write a new file
           #   myzip = zipfile.ZipFile(fullzippath, "w", zipfile.ZIP_STORED, True)
           
           # we just want to add fname to this zip file
           #myzip.write(fname, f, zipfile.ZIP_STORED)
           #myzip.close()

           # if we made it here then it was archived and can remove the original
           #os.remove(fname)
           shutil.move(fname, fullarchivepath)
           print "Successfully archived " + fname + " to " + fullarchivepath

         except:
           print "Error in moving archive zip file: " + f + " to  " + fullarchivepath
           traceback.print_exc()
           print ""
           errLevel = 1
Ejemplo n.º 2
0
def archiveFilesPath(bContinual):
    global ARCHIVE_TIME
    if bContinual:
        dirOrig = UPLOAD_CONTINUAL_WEB_DIR
        dirArchive = ARCHIVE_CONTINUAL_DIR
    else:
        dirOrig = UPLOAD_WEB_DIR
        dirArchive = ARCHIVE_DIR

    now = time.time()
    ctr = 0
    for f in os.listdir(dirOrig):
        if not f.endswith(".zip"):
            #os.remove(dirOrig + fname)
            continue

        #if ctr > 5:
        #   print "That's enough..."
        #   break

        fname = os.path.join(dirOrig, f)
        dtime = 0
        (dbin, dtime) = getFanoutDirFromZip(f)
        #print str(dbin) + " - " + str(dtime)
        if long(dtime) > 0 and long(dtime) <= ARCHIVE_TIME:
            ctr = ctr + 1
            #just move file to archive dir but use fanout dir name
            fullarchivedir = makeFanoutDir(dirArchive, dbin)
            fullarchivepath = os.path.join(fullarchivedir, f)

            # old file to archive
            # look for zip file with this bin
            #fullzippath = dirArchive + dbin + ".zip"
            errLevel = 0
            try:  # catch zipfile exceptions if any - open for append, no compression (since already a zip), and allow > 2GB
                #myzip = zipfile.ZipFile(fullzippath, "a", zipfile.ZIP_STORED, True)
                # test for valid zip file
                #if myzip.testzip() != None:  # file corrupt write a new file
                #   myzip = zipfile.ZipFile(fullzippath, "w", zipfile.ZIP_STORED, True)

                # we just want to add fname to this zip file
                #myzip.write(fname, f, zipfile.ZIP_STORED)
                #myzip.close()

                # if we made it here then it was archived and can remove the original
                #os.remove(fname)
                shutil.move(fname, fullarchivepath)
                print "Successfully archived " + fname + " to " + fullarchivepath

            except:
                print "Error in moving archive zip file: " + f + " to  " + fullarchivepath
                traceback.print_exc()
                print ""
                errLevel = 1
Ejemplo n.º 3
0
def procDownloadRequest(dbconn, outfilename, url, jobid, userid, trigidlist):
 global DBNAME, DBNAME_ARCHIVE
 # check for empty set of trig id
 if trigidlist == "()":
   return 0

 tmpdir = tempfile.mkdtemp()
 myCursor = dbconn.cursor()
 # need to join with archive table 
 query = "SELECT t.id,t.hostid,t.latitude,t.longitude,t.levelvalue,t.levelid,t.file, " +\
            "t.qcn_quakeid, q.time_utc quake_time, q.depth_km quake_depth_km, " +\
            "q.latitude quake_lat, q.longitude quake_lon, q.magnitude quake_mag, 0 is_archive, t.time_trigger " +\
              "FROM " + DBNAME + ".qcn_trigger t " +\
              "LEFT OUTER JOIN sensor.qcn_quake q ON q.id = t.qcn_quakeid " +\
              "WHERE t.received_file=100 AND t.id IN " + trigidlist + " UNION " +\
        "SELECT t.id,t.hostid,t.latitude,t.longitude,t.levelvalue,t.levelid,t.file, " +\
            "t.qcn_quakeid, q.time_utc quake_time, q.depth_km quake_depth_km, " +\
            "q.latitude quake_lat, q.longitude quake_lon, q.magnitude quake_mag, 1 is_archive, t.time_trigger " +\
              "FROM " + DBNAME_ARCHIVE + ".qcn_trigger t " +\
              "LEFT OUTER JOIN sensor.qcn_quake q ON q.id = t.qcn_quakeid " +\
              "WHERE t.received_file=100 AND t.id IN " + trigidlist

 try:
   myCursor.execute(query)
 except:
   print "Error in query"
   print query
   traceback.print_exc()
   sys.exit(3)


 zipoutpath = os.path.join(DOWNLOAD_WEB_DIR, outfilename)
 zipinpath = ""

 # get the resultset as a tuple
 result = myCursor.fetchall()
 numbyte = 0
 myzipout = None
 errlevel = 0

 try:
 
   # open a zip output file - allow zip64 compression for large (>2GB) files
   errlevel = 1
   #print zipoutpath, "  ", 'w', "  ", str(ZIP_STORED), "  ", str(True)
   myzipout = zipfile.ZipFile(zipoutpath, "w", ZIP_STORED, True)

   # iterate through resultset
   curdir = os.getcwd()   # save current directory and go to the temp dir (so paths aren't stored in zip's)
   os.chdir(tmpdir)
   for rec in result:
      if rec[13] == 1:  # archive - need to get fanout dir name from file name rec[6]
         fandir, dtime = getFanoutDirFromZip(rec[6])
         fullpath = os.path.join(ARCHIVE_WEB_DIR, fandir)
         zipinpath = os.path.join(fullpath, rec[6])
      else:
         zipinpath = os.path.join(UPLOAD_WEB_DIR, rec[6])

      errlevel = 2
      #print "    ", rec[0] , "  ", rec[1], "  ", rec[2], "  ", rec[3], "  ", rec[4], "  ", rec[5], "  ", rec[6]

      #host id is rec[1], time_trigger is rec[14]
      # test for valid zip file
      try:
        myzipin = zipfile.ZipFile(zipinpath, "r")
        if os.path.isfile(zipinpath) and myzipin.testzip() == None:
           errlevel = 3
           # valid zip file so add to myzipout, first close
           zipinlist = myzipin.namelist()
           myzipin.extractall(tmpdir)
           myzipin.close()
           for zipinname in zipinlist:
             errlevel = 4
             #zipinpath = os.path.join(tmpdir, zipinname)
             # OK - at this point the zip file requested has been unzipped, so we need to process metadata here
             getSACMetadata(zipinname, rec[1], rec[2], rec[3], rec[4], rec[5], rec[7], rec[8], rec[9], rec[10], rec[11], rec[12])

             # OK - at this point prepend hostid ID & trigger time so it will be sorted OK
             nicefilename = "%09d_%d_%s" % (rec[1], rec[14], zipinname)
             os.rename(zipinname, nicefilename)
             myzipout.write(nicefilename)
             os.remove(nicefilename)
        else:
          print "Invalid or missing file " + zipinpath   

      except:
        print "Error " + str(errlevel) + " in myzipin " + zipinpath
        continue

   os.chdir(curdir)   # go back to regular directory so tmpdir can be erased
   myzipout.close() 
   numbyte = os.path.getsize(zipoutpath)
   shutil.rmtree(tmpdir)    # remove temp directory
   myCursor.close();
   return numbyte

 except zipfile.error:
   print "Error " + str(errlevel) + " in " + zipoutpath + " or " + zipinpath +\
        " is an invalid zip file (tmpdir=" + tmpdir + ")"
   #dbconn.rollback()
   traceback.print_exc()
   shutil.rmtree(tmpdir)    # remove temp directory
   myCursor.close();
   if (myzipout != None):
      myzipout.close() 
      os.remove(zipoutpath)
   return 0
 except:
   print "Error " + str(errlevel) + " in " + zipoutpath + " or " + zipinpath + " (tmpdir=" + tmpdir + ")"
   #dbconn.rollback()
   traceback.print_exc()
   shutil.rmtree(tmpdir)    # remove temp directory
   myCursor.close();
   if (myzipout != None):
      myzipout.close() 
      os.remove(zipoutpath)
   return 0
Ejemplo n.º 4
0
def procDownloadRequest(dbconn, outfilename, url, jobid, userid, trigidlist):
    global DBNAME, DBNAME_ARCHIVE
    # check for empty set of trig id
    if trigidlist == "()":
        return 0

    tmpdir = tempfile.mkdtemp()
    myCursor = dbconn.cursor()
    # need to join with archive table
    query = "SELECT t.id,t.hostid,t.latitude,t.longitude,t.levelvalue,t.levelid,t.file, " +\
               "t.qcn_quakeid, q.time_utc quake_time, q.depth_km quake_depth_km, " +\
               "q.latitude quake_lat, q.longitude quake_lon, q.magnitude quake_mag, 0 is_archive, t.time_trigger " +\
                 "FROM " + DBNAME + ".qcn_trigger t " +\
                 "LEFT OUTER JOIN sensor.qcn_quake q ON q.id = t.qcn_quakeid " +\
                 "WHERE t.received_file=100 AND t.id IN " + trigidlist + " UNION " +\
           "SELECT t.id,t.hostid,t.latitude,t.longitude,t.levelvalue,t.levelid,t.file, " +\
               "t.qcn_quakeid, q.time_utc quake_time, q.depth_km quake_depth_km, " +\
               "q.latitude quake_lat, q.longitude quake_lon, q.magnitude quake_mag, 1 is_archive, t.time_trigger " +\
                 "FROM " + DBNAME_ARCHIVE + ".qcn_trigger t " +\
                 "LEFT OUTER JOIN sensor.qcn_quake q ON q.id = t.qcn_quakeid " +\
                 "WHERE t.received_file=100 AND t.id IN " + trigidlist

    try:
        myCursor.execute(query)
    except:
        print "Error in query"
        print query
        traceback.print_exc()
        sys.exit(3)

    zipoutpath = os.path.join(DOWNLOAD_WEB_DIR, outfilename)
    zipinpath = ""

    # get the resultset as a tuple
    result = myCursor.fetchall()
    numbyte = 0
    myzipout = None
    errlevel = 0

    try:

        # open a zip output file - allow zip64 compression for large (>2GB) files
        errlevel = 1
        #print zipoutpath, "  ", 'w', "  ", str(ZIP_STORED), "  ", str(True)
        myzipout = zipfile.ZipFile(zipoutpath, "w", ZIP_STORED, True)

        # iterate through resultset
        curdir = os.getcwd(
        )  # save current directory and go to the temp dir (so paths aren't stored in zip's)
        os.chdir(tmpdir)
        for rec in result:
            if rec[13] == 1:  # archive - need to get fanout dir name from file name rec[6]
                fandir, dtime = getFanoutDirFromZip(rec[6])
                fullpath = os.path.join(ARCHIVE_WEB_DIR, fandir)
                zipinpath = os.path.join(fullpath, rec[6])
            else:
                zipinpath = os.path.join(UPLOAD_WEB_DIR, rec[6])

            errlevel = 2
            #print "    ", rec[0] , "  ", rec[1], "  ", rec[2], "  ", rec[3], "  ", rec[4], "  ", rec[5], "  ", rec[6]

            #host id is rec[1], time_trigger is rec[14]
            # test for valid zip file
            try:
                myzipin = zipfile.ZipFile(zipinpath, "r")
                if os.path.isfile(zipinpath) and myzipin.testzip() == None:
                    errlevel = 3
                    # valid zip file so add to myzipout, first close
                    zipinlist = myzipin.namelist()
                    myzipin.extractall(tmpdir)
                    myzipin.close()
                    for zipinname in zipinlist:
                        errlevel = 4
                        #zipinpath = os.path.join(tmpdir, zipinname)
                        # OK - at this point the zip file requested has been unzipped, so we need to process metadata here
                        getSACMetadata(zipinname, rec[1], rec[2], rec[3],
                                       rec[4], rec[5], rec[7], rec[8], rec[9],
                                       rec[10], rec[11], rec[12])

                        # OK - at this point prepend hostid ID & trigger time so it will be sorted OK
                        nicefilename = "%09d_%d_%s" % (rec[1], rec[14],
                                                       zipinname)
                        os.rename(zipinname, nicefilename)
                        myzipout.write(nicefilename)
                        os.remove(nicefilename)
                else:
                    print "Invalid or missing file " + zipinpath

            except:
                print "Error " + str(errlevel) + " in myzipin " + zipinpath
                continue

        os.chdir(
            curdir)  # go back to regular directory so tmpdir can be erased
        myzipout.close()
        numbyte = os.path.getsize(zipoutpath)
        shutil.rmtree(tmpdir)  # remove temp directory
        myCursor.close()
        return numbyte

    except zipfile.error:
        print "Error " + str(errlevel) + " in " + zipoutpath + " or " + zipinpath +\
             " is an invalid zip file (tmpdir=" + tmpdir + ")"
        #dbconn.rollback()
        traceback.print_exc()
        shutil.rmtree(tmpdir)  # remove temp directory
        myCursor.close()
        if (myzipout != None):
            myzipout.close()
            os.remove(zipoutpath)
        return 0
    except:
        print "Error " + str(
            errlevel
        ) + " in " + zipoutpath + " or " + zipinpath + " (tmpdir=" + tmpdir + ")"
        #dbconn.rollback()
        traceback.print_exc()
        shutil.rmtree(tmpdir)  # remove temp directory
        myCursor.close()
        if (myzipout != None):
            myzipout.close()
            os.remove(zipoutpath)
        return 0
Ejemplo n.º 5
0
def procRequest(dbconn):
    global FILE_BASE

    sqlQuery = """select
    m.mydb, m.id, m.hostid, 
    from_unixtime(m.time_trigger) time_trig, FLOOR(ROUND((m.time_trigger-FLOOR(m.time_trigger)), 6) * 1e6) utime_trig, from_unixtime(m.time_received) time_rec, 
    from_unixtime(m.time_sync) time_synchronized, m.sync_offset, m.magnitude,  
    m.significance,  m.latitude, m.longitude, 
    m.file, m.numreset, m.dt,
    case m.varietyid when 0 then 'N' when 1 then 'P' when 2 then 'C' end trigtype, m.hostipaddrid,m.geoipaddrid,
    s.description sensor, IFNULL(a.description,'') alignment,IFNULL(m.levelvalue,'') level, 
    IFNULL(l.description,'') level_type, from_unixtime(q.time_utc) quake_time, q.depth_km quake_depth_km, 
    q.latitude quake_lat, q.longitude quake_lon, q.magnitude, 
    q.id, q.description quake_desc, q.guid
from
(
select 'Q' mydb, 
t.id, t.qcn_quakeid, t.hostid, t.time_trigger, t.time_received, t.time_sync, t.sync_offset, t.magnitude, t.significance, t.latitude, t.longitude,
t.file, t.numreset, t.alignid, t.levelid, t.levelvalue, t.qcn_sensorid,
t.dt, t.varietyid,t.hostipaddrid,t.geoipaddrid
from %s.qcn_trigger t
where time_trigger between %d and %d
and time_sync>0
and varietyid in (0,2)
and received_file=100
and latitude between %f and %f and longitude between %f and %f
UNION
select 'C' mydb, 
tt.id, tt.qcn_quakeid, tt.hostid, tt.time_trigger, tt.time_received, tt.time_sync, tt.sync_offset, tt.magnitude, tt.significance, tt.latitude, tt.longitude,
tt.file, tt.numreset, tt.alignid, tt.levelid, tt.levelvalue, tt.qcn_sensorid,
tt.dt, tt.varietyid,tt.hostipaddrid,tt.geoipaddrid
from %s.qcn_trigger tt
where time_trigger between %d and %d
and time_sync>0
and varietyid in (0,2)
and received_file=100
and latitude between %f and %f and longitude between %f and %f
) m
LEFT JOIN sensor.qcn_sensor s ON m.qcn_sensorid = s.id
LEFT OUTER JOIN sensor.qcn_align a ON m.alignid = a.id
LEFT OUTER JOIN sensor.qcn_level l ON m.levelid = l.id
LEFT OUTER JOIN sensor.qcn_quake q ON q.id = m.qcn_quakeid
where m.qcn_sensorid=s.id
order by time_trigger,hostid"""  \
    % ( \
        DBNAME, DATE_MIN, DATE_MAX, LAT_MIN, LAT_MAX, LNG_MIN, LNG_MAX, \
        DBNAME_CONTINUAL, DATE_MIN, DATE_MAX, LAT_MIN, LAT_MAX, LNG_MIN, LNG_MAX  \
      )

    strHeader = "db, triggerid, hostid, time_trig_utc, time_trig_micros, time_received, time_sync, sync_offset, magnitude, significance, latitude, longitude, file, " +\
       "numreset, dt, trig type, hostipaddrid, geoipaddrid, sensor, alignment, level_value, level_type, usgs_quake_time, quake_depth_km, quake_lat, quake_lon, quake_mag, quake_id, quake_desc, quake_guid\n"

    tmpdir = tempfile.mkdtemp()
    myCursor = dbconn.cursor()

    myCursor.execute(sqlQuery)

    zipoutpath = os.path.join(DOWNLOAD_WEB_DIR, FILE_BASE + ".zip")
    zipinpath = ""

    strCSVFile = os.path.join(DOWNLOAD_WEB_DIR, FILE_BASE + ".csv")
    strSQLFile = os.path.join(DOWNLOAD_WEB_DIR, FILE_BASE + ".sql")
    fileCSV = open(strCSVFile, "w")
    fileSQL = open(strSQLFile, "w")
    fileCSV.write("Query Used: Date Range: " + DATE_MIN_ORIG + " - " +
                  DATE_MAX_ORIG + "   LatMin = " + str(LAT_MIN) +
                  "  LatMax = " + str(LAT_MAX) + "  LngMin = " + str(LNG_MIN) +
                  "  LngMax = " + str(LNG_MAX) + "\n")
    fileCSV.write(strHeader)

    fileSQL.write("CREATE TABLE IF NOT EXISTS " + FILE_BASE + " (\n" +\
      " db varchar(10),\n" +\
      " triggerid int(11),\n" +\
      " hostid int(11),\n" +\
      " time_trig_utc datetime,\n" +\
      " time_trig_micros int(7),\n" +\
      " time_received datetime,\n" +\
      " time_sync datetime, sync_offset float,\n" +\
      " magnitude float, significance float, latitude float, longitude float,\n" +\
      " file varchar(255),\n" +\
      " numreset int(7), dt float, trigger_type varchar(2), hostipaddrid int(11), geoipaddrid int(11),\n" +\
      " sensor varchar(128), alignment varchar(64),\n" +\
      " level_value float, level_type varchar(64), usgs_quake_time datetime,\n" +\
      " quake_depth_km float, quake_lat float, quake_lon float, quake_mag float, quake_id int(11), quake_desc varchar(256), quake_guid varchar(256)\n" +\
      ");\n\n")

    # get the resultset as a tuple
    result = myCursor.fetchall()
    numbyte = 0
    myzipout = None
    errlevel = 0

    try:

        # open a zip output file - allow zip64 compression for large (>2GB) files
        errlevel = 1
        #print zipoutpath, "  ", 'w', "  ", str(ZIP_STORED), "  ", str(True)
        myzipout = zipfile.ZipFile(zipoutpath, "w", ZIP_STORED, True)

        # iterate through resultset
        curdir = os.getcwd(
        )  # save current directory and go to the temp dir (so paths aren't stored in zip's)
        os.chdir(tmpdir)
        for rec in result:

            # 00| mydb
            # 01| id
            # 02| hostid
            # 03| time_trig
            # 04| utime_trig
            # 05| time_rec
            # 06| time_synchronized
            # 07| sync_offset
            # 08| magnitude
            # 09| significance
            # 10| latitude
            # 11| longitude
            # 12| file
            # 13| numreset
            # 14| dt
            # 15| trigtype
            # 16| hostipaddrid
            # 17| geoipaddrid
            # 18| sensor
            # 19| alignment
            # 20| level
            # 21| level_type
            # 22| quake_time
            # 23| quake_depth_km
            # 24| quake_lat
            # 25| quake_lon
            # 26| quake_magnitude
            # 27| quake_id
            # 28| quake_description
            # 29| quake_guid

            errlevel = 2
            #print "    ", rec[0] , "  ", rec[1], "  ", rec[2], "  ", rec[3], "  ", rec[4], "  ", rec[5], "  ", rec[6]
            #"db, triggerid, hostid, time_utc, time_us, magnitude, significance, latitude, longitude, file, " +\
            # "numreset, sensor, alignment, level_value, level_type\n"

            # test for valid zip file
            try:
                dbzipfile = rec[12]
                if IS_ARCHIVE:  # need to get fanout directory
                    fandir, dtime = getFanoutDirFromZip(dbzipfile)
                    if (rec[0] == "Q"):
                        fullpath = os.path.join(UPLOAD_WEB_DIR, fandir)
                        zipinpath = os.path.join(fullpath, dbzipfile)
                    else:
                        fullpath = os.path.join(UPLOAD_WEB_DIR_CONTINUAL,
                                                fandir)
                        zipinpath = os.path.join(fullpath, dbzipfile)
                else:
                    if (rec[0] == "Q"):
                        zipinpath = os.path.join(UPLOAD_WEB_DIR, dbzipfile)
                    else:
                        zipinpath = os.path.join(UPLOAD_WEB_DIR_CONTINUAL,
                                                 dbzipfile)

                myzipin = zipfile.ZipFile(zipinpath, "r")
                if os.path.isfile(zipinpath) and myzipin.testzip() == None:
                    errlevel = 3
                    # valid zip file so add to myzipout, first close
                    zipinlist = myzipin.namelist()
                    myzipin.extractall(tmpdir)
                    myzipin.close()

                    for zipinname in zipinlist:
                        errlevel = 4
                        #zipinpath = os.path.join(tmpdir, zipinname)
                        # OK - at this point the zip file requested has been unzipped, so we need to process metadata here
                        #   m.mydb, m.id, m.hostid,   0 1 2
                        #  from_unixtime(m.time_trigger) time_trig, FLOOR(ROUND((m.time_trigger-FLOOR(m.time_trigger)), 6) * 1e6) utime_trig, 3 4
                        #  m.magnitude,  m.significance,  5 6
                        #  m.latitude, m.longitude, m.file, m.numreset,  7 8 9 10
                        #  s.description sensor, IFNULL(a.description,'') alignment,  11 12
                        #   IFNULL(m.levelvalue,'') level, IFNULL(l.description,'') level_type 13 14
                        #def getSACMetadata(zipinname, hostid, latTrig, lonTrig, lvlTrig, lvlType, idQuake, timeQuake, depthKmQuake, latQuake, lonQuake, magQuake):
                        getSACMetadata(zipinname, rec[2], rec[10], rec[11],
                                       rec[20], rec[21], rec[27], rec[22],
                                       rec[23], rec[24], rec[25], rec[26])
                        myzipout.write(zipinname)
                        os.remove(zipinname)

                    # valid file - print out line of csv
                    fileSQL.write("INSERT INTO " + FILE_BASE + " VALUES (")
                    for x in range(0, 30):
                        strPrint = str(rec[x]) if rec[x] != None else ""
                        fileCSV.write("\"" + strPrint + "\"")
                        fileSQL.write("\"" + strPrint + "\"")
                        if x < 29:
                            fileCSV.write(",")
                            fileSQL.write(",")
                    fileCSV.write("\n")
                    fileSQL.write(");\n")

            except:
                print "Error " + str(errlevel) + " in myzipin " + zipinpath
                traceback.print_exc()
                #exit(3)
                continue

        fileCSV.close()
        fileSQL.close()
        os.chdir(
            DOWNLOAD_WEB_DIR)  # go to download dir to zip up csv & sql files
        myzipout.write(FILE_BASE + ".csv")
        myzipout.write(FILE_BASE + ".sql")
        os.chdir(
            curdir)  # go back to regular directory so tmpdir can be erased
        myzipout.close()
        numbyte = os.path.getsize(zipoutpath)
        shutil.rmtree(tmpdir)  # remove temp directory
        myCursor.close()
        return numbyte

    except zipfile.error:
        print "Error " + str(errlevel) + " in " + zipoutpath + " or " + zipinpath +\
             " is an invalid zip file (tmpdir=" + tmpdir + ")"
        #dbconn.rollback()
        traceback.print_exc()
        shutil.rmtree(tmpdir)  # remove temp directory
        myCursor.close()
        if (myzipout != None):
            myzipout.close()
            os.remove(zipoutpath)
        return 0
    except:
        print "Error " + str(
            errlevel
        ) + " in " + zipoutpath + " or " + zipinpath + " (tmpdir=" + tmpdir + ")"
        #dbconn.rollback()
        traceback.print_exc()
        shutil.rmtree(tmpdir)  # remove temp directory
        myCursor.close()
        if (myzipout != None):
            myzipout.close()
            os.remove(zipoutpath)
        return 0
Ejemplo n.º 6
0
def procRequest(dbconn):
  global FILE_BASE

  sqlQuery = """select
    m.mydb, m.id, m.hostid, 
    from_unixtime(m.time_trigger) time_trig, FLOOR(ROUND((m.time_trigger-FLOOR(m.time_trigger)), 6) * 1e6) utime_trig, from_unixtime(m.time_received) time_rec, 
    from_unixtime(m.time_sync) time_synchronized, m.sync_offset, m.magnitude,  
    m.significance,  m.latitude, m.longitude, 
    m.file, m.numreset, m.dt,
    case m.varietyid when 0 then 'N' when 1 then 'P' when 2 then 'C' end trigtype, m.hostipaddrid,m.geoipaddrid,
    s.description sensor, IFNULL(a.description,'') alignment,IFNULL(m.levelvalue,'') level, 
    IFNULL(l.description,'') level_type, from_unixtime(q.time_utc) quake_time, q.depth_km quake_depth_km, 
    q.latitude quake_lat, q.longitude quake_lon, q.magnitude, 
    q.id, q.description quake_desc, q.guid
from
(
select 'Q' mydb, 
t.id, t.qcn_quakeid, t.hostid, t.time_trigger, t.time_received, t.time_sync, t.sync_offset, t.magnitude, t.significance, t.latitude, t.longitude,
t.file, t.numreset, t.alignid, t.levelid, t.levelvalue, t.qcn_sensorid,
t.dt, t.varietyid,t.hostipaddrid,t.geoipaddrid
from %s.qcn_trigger t
where time_trigger between %d and %d
and time_sync>0
and varietyid in (0,2)
and received_file=100
and latitude between %f and %f and longitude between %f and %f
UNION
select 'C' mydb, 
tt.id, tt.qcn_quakeid, tt.hostid, tt.time_trigger, tt.time_received, tt.time_sync, tt.sync_offset, tt.magnitude, tt.significance, tt.latitude, tt.longitude,
tt.file, tt.numreset, tt.alignid, tt.levelid, tt.levelvalue, tt.qcn_sensorid,
tt.dt, tt.varietyid,tt.hostipaddrid,tt.geoipaddrid
from %s.qcn_trigger tt
where time_trigger between %d and %d
and time_sync>0
and varietyid in (0,2)
and received_file=100
and latitude between %f and %f and longitude between %f and %f
) m
LEFT JOIN sensor.qcn_sensor s ON m.qcn_sensorid = s.id
LEFT OUTER JOIN sensor.qcn_align a ON m.alignid = a.id
LEFT OUTER JOIN sensor.qcn_level l ON m.levelid = l.id
LEFT OUTER JOIN sensor.qcn_quake q ON q.id = m.qcn_quakeid
where m.qcn_sensorid=s.id
order by time_trigger,hostid"""  \
  % ( \
      DBNAME, DATE_MIN, DATE_MAX, LAT_MIN, LAT_MAX, LNG_MIN, LNG_MAX, \
      DBNAME_CONTINUAL, DATE_MIN, DATE_MAX, LAT_MIN, LAT_MAX, LNG_MIN, LNG_MAX  \
    )

  strHeader = "db, triggerid, hostid, time_trig_utc, time_trig_micros, time_received, time_sync, sync_offset, magnitude, significance, latitude, longitude, file, " +\
     "numreset, dt, trig type, hostipaddrid, geoipaddrid, sensor, alignment, level_value, level_type, usgs_quake_time, quake_depth_km, quake_lat, quake_lon, quake_mag, quake_id, quake_desc, quake_guid\n"

  tmpdir = tempfile.mkdtemp()
  myCursor = dbconn.cursor()

  myCursor.execute(sqlQuery)

  zipoutpath = os.path.join(DOWNLOAD_WEB_DIR, FILE_BASE + ".zip")
  zipinpath = ""

  strCSVFile = os.path.join(DOWNLOAD_WEB_DIR, FILE_BASE + ".csv")
  strSQLFile = os.path.join(DOWNLOAD_WEB_DIR, FILE_BASE + ".sql")
  fileCSV = open(strCSVFile, "w")
  fileSQL = open(strSQLFile, "w")
  fileCSV.write("Query Used: Date Range: " + DATE_MIN_ORIG + " - " + DATE_MAX_ORIG + "   LatMin = " + str(LAT_MIN) + "  LatMax = " + str(LAT_MAX) + "  LngMin = " + str(LNG_MIN) + "  LngMax = " + str(LNG_MAX) + "\n") 
  fileCSV.write(strHeader)

  fileSQL.write("CREATE TABLE IF NOT EXISTS " + FILE_BASE + " (\n" +\
    " db varchar(10),\n" +\
    " triggerid int(11),\n" +\
    " hostid int(11),\n" +\
    " time_trig_utc datetime,\n" +\
    " time_trig_micros int(7),\n" +\
    " time_received datetime,\n" +\
    " time_sync datetime, sync_offset float,\n" +\
    " magnitude float, significance float, latitude float, longitude float,\n" +\
    " file varchar(255),\n" +\
    " numreset int(7), dt float, trigger_type varchar(2), hostipaddrid int(11), geoipaddrid int(11),\n" +\
    " sensor varchar(128), alignment varchar(64),\n" +\
    " level_value float, level_type varchar(64), usgs_quake_time datetime,\n" +\
    " quake_depth_km float, quake_lat float, quake_lon float, quake_mag float, quake_id int(11), quake_desc varchar(256), quake_guid varchar(256)\n" +\
    ");\n\n")

  # get the resultset as a tuple
  result = myCursor.fetchall()
  numbyte = 0
  myzipout = None
  errlevel = 0

  try:
 
   # open a zip output file - allow zip64 compression for large (>2GB) files
   errlevel = 1
   #print zipoutpath, "  ", 'w', "  ", str(ZIP_STORED), "  ", str(True)
   myzipout = zipfile.ZipFile(zipoutpath, "w", ZIP_STORED, True)

   # iterate through resultset
   curdir = os.getcwd()   # save current directory and go to the temp dir (so paths aren't stored in zip's)
   os.chdir(tmpdir)
   for rec in result:

# 00| mydb 
# 01| id       
# 02| hostid 
# 03| time_trig           
# 04| utime_trig 
# 05| time_rec            
# 06| time_synchronized   
# 07| sync_offset 
# 08| magnitude 
# 09| significance 
# 10| latitude  
# 11| longitude  
# 12| file                                         
# 13| numreset 
# 14| dt   
# 15| trigtype 
# 16| hostipaddrid 
# 17| geoipaddrid 
# 18| sensor               
# 19| alignment 
# 20| level 
# 21| level_type 
# 22| quake_time 
# 23| quake_depth_km 
# 24| quake_lat 
# 25| quake_lon 
# 26| quake_magnitude 
# 27| quake_id   
# 28| quake_description 
# 29| quake_guid

      errlevel = 2
      #print "    ", rec[0] , "  ", rec[1], "  ", rec[2], "  ", rec[3], "  ", rec[4], "  ", rec[5], "  ", rec[6]
      #"db, triggerid, hostid, time_utc, time_us, magnitude, significance, latitude, longitude, file, " +\
      # "numreset, sensor, alignment, level_value, level_type\n"

      # test for valid zip file
      try:
        dbzipfile = rec[12]
        if IS_ARCHIVE:  # need to get fanout directory 
          fandir, dtime = getFanoutDirFromZip(dbzipfile)
          if (rec[0] == "Q"):
            fullpath = os.path.join(UPLOAD_WEB_DIR, fandir)
            zipinpath = os.path.join(fullpath, dbzipfile)
          else:
            fullpath = os.path.join(UPLOAD_WEB_DIR_CONTINUAL, fandir)
            zipinpath = os.path.join(fullpath, dbzipfile)
        else:
          if (rec[0] == "Q"):
            zipinpath = os.path.join(UPLOAD_WEB_DIR, dbzipfile)
          else:
            zipinpath = os.path.join(UPLOAD_WEB_DIR_CONTINUAL, dbzipfile)

        myzipin = zipfile.ZipFile(zipinpath, "r")
        if os.path.isfile(zipinpath) and myzipin.testzip() == None:
           errlevel = 3
           # valid zip file so add to myzipout, first close
           zipinlist = myzipin.namelist()
           myzipin.extractall(tmpdir)
           myzipin.close()

           for zipinname in zipinlist:
             errlevel = 4
             #zipinpath = os.path.join(tmpdir, zipinname)
             # OK - at this point the zip file requested has been unzipped, so we need to process metadata here
  #   m.mydb, m.id, m.hostid,   0 1 2
  #  from_unixtime(m.time_trigger) time_trig, FLOOR(ROUND((m.time_trigger-FLOOR(m.time_trigger)), 6) * 1e6) utime_trig, 3 4
  #  m.magnitude,  m.significance,  5 6
  #  m.latitude, m.longitude, m.file, m.numreset,  7 8 9 10
  #  s.description sensor, IFNULL(a.description,'') alignment,  11 12 
  #   IFNULL(m.levelvalue,'') level, IFNULL(l.description,'') level_type 13 14 
        #def getSACMetadata(zipinname, hostid, latTrig, lonTrig, lvlTrig, lvlType, idQuake, timeQuake, depthKmQuake, latQuake, lonQuake, magQuake):
             getSACMetadata(zipinname, rec[2], rec[10], rec[11], rec[20], rec[21], rec[27], rec[22], rec[23], rec[24], rec[25], rec[26])
             myzipout.write(zipinname)
             os.remove(zipinname)


           # valid file - print out line of csv
           fileSQL.write("INSERT INTO " + FILE_BASE + " VALUES (")
           for x in range(0,30):
             strPrint = str(rec[x]) if rec[x] != None else ""
             fileCSV.write("\"" + strPrint + "\"")
             fileSQL.write("\"" + strPrint + "\"")
             if x < 29:
               fileCSV.write(",")
               fileSQL.write(",")
           fileCSV.write("\n")
           fileSQL.write(");\n")

      except:
        print "Error " + str(errlevel) + " in myzipin " + zipinpath
        traceback.print_exc()
        #exit(3)
        continue

   fileCSV.close()
   fileSQL.close()
   os.chdir(DOWNLOAD_WEB_DIR)   # go to download dir to zip up csv & sql files
   myzipout.write(FILE_BASE + ".csv")
   myzipout.write(FILE_BASE + ".sql")
   os.chdir(curdir)   # go back to regular directory so tmpdir can be erased
   myzipout.close() 
   numbyte = os.path.getsize(zipoutpath)
   shutil.rmtree(tmpdir)    # remove temp directory
   myCursor.close();
   return numbyte

  except zipfile.error:
   print "Error " + str(errlevel) + " in " + zipoutpath + " or " + zipinpath +\
        " is an invalid zip file (tmpdir=" + tmpdir + ")"
   #dbconn.rollback()
   traceback.print_exc()
   shutil.rmtree(tmpdir)    # remove temp directory
   myCursor.close();
   if (myzipout != None):
      myzipout.close() 
      os.remove(zipoutpath)
   return 0
  except:
   print "Error " + str(errlevel) + " in " + zipoutpath + " or " + zipinpath + " (tmpdir=" + tmpdir + ")"
   #dbconn.rollback()
   traceback.print_exc()
   shutil.rmtree(tmpdir)    # remove temp directory
   myCursor.close();
   if (myzipout != None):
      myzipout.close() 
      os.remove(zipoutpath)
   return 0