def write_origin(origin, output): """ Write an origin in a given output format. Arguments: origin - A core_tools.Origin object to be written out. output - A datascope database pointer to an open output database. Return Values: 0 - Sucess -1 - Failure Behaviour: This method does NOT open or close the database passed in. Additional Comments: This method assumes that the database being written out is the same as the input database (ie. NO arrival rows are created, they are assumed to already exist). """ from time import time tbl_origin = output.schema_tables['origin'] origin.orid = output.nextid('orid') origin = map_null_values(tbl_origin, origin) tbl_origin.record = tbl_origin.addnull() tbl_origin.putv(('lat', origin.lat), ('lon', origin.lon), ('depth', origin.depth), ('time', origin.time), ('orid', origin.orid), ('evid', origin.evid), ('auth', origin.auth), ('jdate', origin.jdate), ('nass', origin.nass), ('ndef', origin.ndef), ('ndp', origin.ndp), ('grn', origin.grn), ('srn', origin.srn), ('etype', origin.etype), ('review', origin.review), ('depdp', origin.depdp), ('dtype', origin.dtype), ('mb', origin.mb), ('mbid', origin.mbid), ('ms', origin.ms), ('msid', origin.msid), ('ml', origin.ml), ('mlid', origin.mlid), ('algorithm', origin.algorithm), ('commid', origin.commid)) tbl_event = output.schema_tables['event'] tbl_event.record = tbl_event.find('evid == %d' % origin.evid) tbl_event.putv(('prefor', origin.orid)) tbl_assoc = output.schema_tables['assoc'] tbl_predarr = output.schema_tables['predarr'] tbl_site = output.schema_tables['site'] for arrival in origin.arrivals: view = tbl_site.subset('sta =~ /%s/ && ondate < _%f_ && '\ '(offdate == -1 || offdate > _%f_)' % (arrival.sta, time(), time())) view.record = 0 stalat, stalon = view.getv('lat', 'lon') seaz = antpy.azimuth(stalat, stalon, origin.lat, origin.lon) esaz = antpy.azimuth(origin.lat, origin.lon, stalat, stalon) delta = antpy.distance(stalat, stalon, origin.lat, origin.lon) tbl_assoc.record = tbl_assoc.addnull() #print arrival.time, arrival.predarr tbl_assoc.putv(('arid', arrival.arid), ('orid', origin.orid), ('sta', arrival.sta), ('phase', arrival.phase), ('delta', delta), ('seaz', seaz), ('esaz', esaz), ('timeres', (arrival.time - arrival.predarr)), ('vmodel', 'PyLocEQ')) tbl_predarr.record = tbl_predarr.addnull() tbl_predarr.putv(('arid', arrival.arid), ('orid', origin.orid), ('time', arrival.predarr), ('slow', delta / (arrival.predarr - origin.time)), ('seaz', antpy.azimuth(stalat, stalon, origin.lat, origin.lon)), ('esaz', antpy.azimuth(origin.lat, origin.lon, stalat, stalon))) return 0
def write_origin(origin, dbout): """ Write an anfseistools.core.Origin object to an output databse. Arguments: origin - An anfseistools.core.Origin object to be written out. dbout - A datascope database pointer to an open output database. Returns: 0 - Sucess -1 - Failure Behaviour: This method does NOT open or close the database passed in. Caveats: This method assumes that the database being written out is the same as the input database (ie. NO arrival rows are created, they are assumed to already exist). Example: In [1]: import sys In [2]: import os In [3]: sys.path.append('%s/data/python' % os.environ['ANTELOPE']) In [4]: from antelope.datascope import closing, dbopen In [5]: from anfseistools.core import Origin, Arrival In [6]: from anfseistools.ant import write_origin In [7]: arrivals = [] In [8]: arrivals += [Arrival('SCAR', 597649500.000, 'P', chan='BHZ', deltim=0.250, arid=1001)] In [9]: arrivals += [Arrival('SAN', 1398876096.594, 'P', chan='HHZ', deltim=0.175, arid=1002)] In [10]: origin = Origin(48.4222, -123.3657, 35.0, 1267390800.000, 'white', arrivals=arrivals, orid=1001, evid=1001, nass=2, ndef=2) In [11]: with closing(dbopen('/Users/mcwhite/staging/dbs/June2010/June2010', 'r+')) as db: ....: write_origin(origin, db) ....: Out[11]: 0 """ from time import time tbl_origin = dbout.schema_tables['origin'] origin.orid = dbout.nextid('orid') origin = map_null_values(tbl_origin, origin) tbl_origin.record = tbl_origin.addnull() tbl_origin.putv(('lat', origin.lat), ('lon', origin.lon), ('depth', origin.depth), ('time', origin.time), ('orid', origin.orid), ('evid', origin.evid), ('auth', origin.auth), ('jdate', origin.jdate), ('nass', origin.nass), ('ndef', origin.ndef), ('ndp', origin.ndp), ('grn', origin.grn), ('srn', origin.srn), ('etype', origin.etype), ('review', origin.review), ('depdp', origin.depdp), ('dtype', origin.dtype), ('mb', origin.mb), ('mbid', origin.mbid), ('ms', origin.ms), ('msid', origin.msid), ('ml', origin.ml), ('mlid', origin.mlid), ('algorithm', origin.algorithm), ('commid', origin.commid)) tbl_event = dbout.schema_tables['event'] tbl_event.record = tbl_event.find('evid == %d' % origin.evid) tbl_event.putv(('prefor', origin.orid)) tbl_assoc = dbout.schema_tables['assoc'] tbl_predarr = dbout.schema_tables['predarr'] tbl_site = dbout.schema_tables['site'] for arrival in origin.arrivals: view = tbl_site.subset('sta =~ /%s/ && ondate < _%f_ && ' \ '(offdate == -1 || offdate > _%f_)' % (arrival.sta, time(), time())) view.record = 0 stalat, stalon = view.getv('lat', 'lon') seaz = antpy.azimuth(stalat, stalon, origin.lat, origin.lon) esaz = antpy.azimuth(origin.lat, origin.lon, stalat, stalon) delta = antpy.distance(stalat, stalon, origin.lat, origin.lon) tbl_assoc.record = tbl_assoc.addnull() timeres = -999.000 if arrival.predarr == None \ else (arrival.time - arrival.predarr) tbl_assoc.putv(('arid', arrival.arid), ('orid', origin.orid), ('sta', arrival.sta), ('phase', arrival.phase), ('delta', delta), ('seaz', seaz), ('esaz', esaz), ('timeres', timeres), ('vmodel', 'PyLocEQ')) if not arrival.predarr == None: tbl_predarr.record = tbl_predarr.addnull() tbl_predarr.putv(('arid', arrival.arid), ('orid', origin.orid), ('time', arrival.predarr), ('slow', delta / (arrival.predarr - origin.time)), ('seaz', antpy.azimuth(stalat, stalon, origin.lat, origin.lon)), ('esaz', antpy.azimuth(origin.lat, origin.lon, stalat, stalon))) return 0