예제 #1
0
def oldmain():
    from gprf.seismic.seismic_util import load_events
    from sigvisa.treegp.util import mkdir_p
    mkdir_p("scraped_events")

    s = load_events(basedir="/home/dmoore/mkar_stuff")

    outfile = open("fakescraped.txt", 'w')
    for i, (ev, (w, srate1)) in enumerate(s):
        try:
            #lon, lat, smaj, smin, strike, depth, depth_err = scrape_isc(ev)
            lon, lat, smaj, smin, strike, depth, depth_err = fakescrape(ev)
        except Exception as e:
            print e
            lon, lat, smaj, smin, strike, depth, depth_err = ev.lon, ev.lat, 20.0, 20.0, 0, ev.depth, 0.05 * ev.depth + 1.0
        st = "%d, %d, %.4f, %.4f, %.1f, %.1f, %d, %.1f, %.1f" % (
            i, ev.evid, lon, lat, smaj, smin, strike, depth, depth_err)
        print st
        outfile.write(st + "\n")
        outfile.flush()
예제 #2
0
        print c
        if c > best_c:
            best_c = c
            best_widxs = widxs
        if max_s is not None:
            t1 = time.time()
            if t1-t0 > max_s:
                break

    return best_c, best_widxs

fd = load_seismic_locations()
lls = fd[:, [COL_LON, COL_LAT]]


s = load_events(sta="mkar", basedir="/home/dmoore/mkar_stuff")

from sklearn.cluster import KMeans
np.random.seed(0)
n_clusters = 4000
km = KMeans(n_clusters=n_clusters, init='k-means++', n_init=2, max_iter=300, tol=0.0001, precompute_distances='auto', verbose=1, random_state=None, copy_x=True, n_jobs=1)
r = km.fit(lls)
clusters = []
for i in range(n_clusters):
    idx = km.labels_==i
    lli =lls[idx,:]
    clusters.append(fd[idx, :])

    if len(lli) > 100:
        print "cluster", i, "size", len(lli)
예제 #3
0
        print c
        if c > best_c:
            best_c = c
            best_widxs = widxs
        if max_s is not None:
            t1 = time.time()
            if t1 - t0 > max_s:
                break

    return best_c, best_widxs


fd = load_seismic_locations()
lls = fd[:, [COL_LON, COL_LAT]]

s = load_events(sta="mkar", basedir="/home/dmoore/mkar_stuff")

from sklearn.cluster import KMeans
np.random.seed(0)
n_clusters = 4000
km = KMeans(n_clusters=n_clusters,
            init='k-means++',
            n_init=2,
            max_iter=300,
            tol=0.0001,
            precompute_distances='auto',
            verbose=1,
            random_state=None,
            copy_x=True,
            n_jobs=1)
r = km.fit(lls)