Ejemplo n.º 1
0
def saveas():
    global filename

    #Need to determine what our file extension is.
    filename = tkFileDialog.asksaveasfilename(defaultextension=fextension,\
                                              filetypes=(("Wave Combiner files",fextension),\
                                                         ("All Files", "*")))

    if filename == "":
        return
    
    saveload.save(wavlst, filename)
    root.title("Wave Combiner - " + filename)
Ejemplo n.º 2
0
def finishWindow(class_respect, teachers, grades):
    sg.theme('Purple')

    if class_respect < -10 and grades < -5 and teachers < -5:
        result_name = 'Жмыра! Работаешь в кфс, хуле'
    elif class_respect < -5 and grades > 10 and teachers > 10:
        result_name = 'Жыд! Пошёл нахуй'
    elif class_respect > 10 and grades > 10 and teachers > 10:
        result_name = 'Соня Б! Респект, хуле'
    else:
        result_name = 'та хуй знает кто пока что'

    if year == 11:
        finish_layout = [[sg.Text('Игра закончена!!')],
                         [sg.Text('Ваш результат:')],
                         [sg.Text('Респект однокл - ' + str(class_respect))],
                         [sg.Text('Отношения с учителями - ' + str(teachers))],
                         [sg.Text('Успеваемость - ' + str(grades))],
                         [sg.Text('Ты' + result_name)], [sg.Button('Back')]]
    else:
        finish_layout = [[sg.Text('Год ' + str(year) + ' завершён!!')],
                         [sg.Text('Ваш результат:')],
                         [sg.Text('Респект однокл - ' + str(class_respect))],
                         [sg.Text('Отношения с учителями - ' + str(teachers))],
                         [sg.Text('Успеваемость - ' + str(grades))],
                         [sg.Text('Покачто ты' + result_name)],
                         [sg.Button('Save')], [sg.Button('Continue')],
                         [sg.Button('Back')]]

    finish_window = sg.Window('Finish window', finish_layout, size=(600, 400))

    while True:
        event, values = finish_window.read()
        if event == sg.WIN_CLOSED or event == 'Back':
            break
        if event == 'Save':
            svld.save(class_respect,
                      teachers,
                      grades,
                      year + 1,
                      situation='1)')
        if event == 'Continue':
            finish_window.close()
            return (class_respect, teachers, grades, year + 1, 1
                    )  # laast is situation

    reply_window.close()
Ejemplo n.º 3
0
print "L<U: %s" % (L < U).sum()
print "L=U: %s" % (L == U).sum()
print "L>U: %s" % (L > U).sum()
print "average U-L %s" % ((U - L).sum() / np.prod(U.shape))
print np.histogram((U - L) / (24 * 3600))

print 'Saving results'
# Result
U = np.transpose(np.reshape(U - time_scale_num[0], fxlon.shape))
L = np.transpose(np.reshape(L - time_scale_num[0], fxlon.shape))
T = np.transpose(np.reshape(T - time_scale_num[0], fxlon.shape))

print 'U L R are shifted so that zero there is time_scale_num[0] = %s' % time_scale_num[
    0]
sl.save((U, L, T), 'result')

if pen:
    result = {
        'U': U,
        'L': L,
        'T': T,
        'fxlon': fxlon,
        'fxlat': fxlat,
        'time_num': time_num,
        'time_scale_num': time_scale_num,
        'time_num_granules': tt,
        'UP': UP,
        'LP': LP
    }
else:
Ejemplo n.º 4
0
# cannot get starting time from wrfout
time_iso = ("2011-06-25T00:00:00Z", "2011-07-04T00:00:00Z") # tuple, not array

data=retrieve_af_data(bbox,time_iso)

print 'writting CSV and KML with detections'

keys=['latitude','longitude','brightness','scan','track','acq_date','acq_time','satellite','instrument','confidence','bright_t31','frp','scan_angle']
dkeys=['lat_fire','lon_fire','brig_fire','scan_fire','track_fire','acq_date','acq_time','sat_fire','instrument','conf_fire','t31_fire','frp_fire','scan_angle_fire']
N=[len(data[d]['lat_fire']) for d in data]
json=data2json(data,keys,dkeys,N)
write_csv(json,bbox)
prods={'AF':'Active Fires','FRP':'Fire Radiative Power'}
json2kml(json,'fire_detections.kml',bbox,prods)

print 'writting KML with ground'

keys=['latitude','longitude','scan','track','acq_date','acq_time','satellite','instrument','scan_angle']
dkeys=['lat_nofire','lon_nofire','scan_nofire','track_nofire','acq_date','acq_time','sat_fire','instrument','scan_angle_nofire']
N=[len(data[d]['lat_nofire']) for d in data]
json=data2json(data,keys,dkeys,N)
prods={'NF':'No Fire'}
json2kml(json,'nofire.kml',bbox,prods)

print 'saving data'

sl.save((data,fxlon,fxlat,map(time_iso2num,time_iso)),'data')

print 'run setup next'
Ejemplo n.º 5
0
def tutorial():
    clear()
    saveload.save()
    typing("Welcome to the LABYRINTH OF DOOM, where nothing is weird.\n")
    typing(
        "You will go around the labyrinth, fighting monsters and getting loot.\n"
    )
    typing("Before we start, what is your name?\n")
    name = input("> ")
    saveload.data.name = name
    saveload.save()
    typing("Well hello there, " + saveload.data.name + "!\n")
    typing(
        "Now that we know your name, we need to choose your class. This will influence your starting armor and tools.\n"
    )
    text.classes()
    classes = ""
    while classes not in ["1", "2", "3", "4"]:
        classes = input("> ")
    if classes == "1":
        clear()
        saveload.data.Class = "Tank"
        saveload.data.armor = items.iron_armor
        saveload.data.secondary = items.wooden_shield
        typing("You have chosen the TANK class!\n")
        saveload.save()
    if classes == "2":
        clear()
        saveload.data.Class = "Warrior"
        saveload.data.armor = items.leather_cap
        saveload.data.primary = items.stone_sword
        typing("You have chosen the WARRIOR class!\n")
        saveload.save()
    if classes == "3":
        clear()
        saveload.data.Class = "Moneybags"
        saveload.data.coins = 50
        typing("You have chosen the MONEYBAGS class!\n")
        saveload.save()
    if classes == "4":
        clear()
        saveload.data.Class = "Engineer"
        saveload.data.secondary = items.explosives
        typing("You have chose the ENGINEER class!\n")
        saveload.save()
    typing("Before we begin, do you want to look at your inventory? (y/n)\n")
    y_n1 = ""
    if y_n1 not in ["y", "n"]:
        y_n1 = input("> ")
    if y_n1 == "y":
        clear()
        typing("Ok! Here is your inventory!\n")
        time.sleep(1)
        inventory()
        input("Please press enter to continue.")
        level_1()
    elif y_n1 == "n":
        clear()
        typing("Let's start with our game.\n")
        level_1()
Ejemplo n.º 6
0
def frontier(clf,
             xx,
             yy,
             zz,
             bal=.5,
             plot_decision=False,
             plot_poly=False,
             using_weights=False,
             save_decision=False):
    """
    Compute the surface decision frontier for a classifier.

    :param clf: a classifier
    :param xx: meshgrid ndarray
    :param yy: meshgrid ndarray
    :param zz: meshgrid ndarray
    :param bal: number between 0 and 1, balance between lower and upper bounds in decision function (in case not level 0)
    :param plot_decision: boolean of plotting decision volume
    :param plot_poly: boolean of plotting polynomial approximation
    :return F: 2D meshes with xx, yy coordinates and the hyperplane z which gives decision functon 0

    Developed in Python 2.7.15 :: Anaconda 4.5.10, on MACINTOSH.
    Angel Farguell ([email protected]), 2019-02-20
    Modified version of:
    https://www.semipol.de/2015/10/29/SVM-separating-hyperplane-3d-matplotlib.html
    """

    # Creating the 3D grid
    XX = np.c_[np.ravel(xx), np.ravel(yy), np.ravel(zz)]

    # Evaluating the decision function
    print '>> Evaluating the decision function...'
    sys.stdout.flush()
    t_1 = time()
    if using_weights:
        from libsvm_weights.python.svmutil import svm_predict
        _, _, p_vals = svm_predict([], XX, clf, '-q')
        ZZ = np.array([p[0] for p in p_vals])
    else:
        ZZ = clf.decision_function(XX)
    t_2 = time()
    print 'elapsed time: %ss.' % str(abs(t_2 - t_1))
    hist = np.histogram(ZZ)
    print 'counts: ', hist[0]
    print 'values: ', hist[1]
    print 'decision function range: ', ZZ.min(), '~', ZZ.max()

    # Reshaping decision function volume
    Z = ZZ.reshape(xx.shape)
    print 'decision function shape: ', Z.shape
    if save_decision:
        sl.save((xx, yy, zz, Z), 'decision')

    if plot_decision:
        try:
            from skimage import measure
            from shiftcmap import shiftedColorMap
            verts, faces, normals, values = measure.marching_cubes_lewiner(
                Z, level=0, allow_degenerate=False)
            # Scale and transform to actual size of the interesting volume
            h = np.divide([
                xx.max() - xx.min(),
                yy.max() - yy.min(),
                zz.max() - zz.min()
            ],
                          np.array(xx.shape) - 1)
            verts = verts * h
            verts = verts + [xx.min(), yy.min(), zz.min()]
            mesh = Poly3DCollection(verts[faces], facecolor='orange', alpha=.9)
            fig = plt.figure()
            ax = fig.gca(projection='3d')
            fig.suptitle("Decision volume")
            col = [(0, .5, 0), (.5, .5, .5), (.5, 0, 0)]
            cm = colors.LinearSegmentedColormap.from_list('GrRdD', col, N=50)
            midpoint = 1 - ZZ.max() / (ZZ.max() + abs(ZZ.min()))
            shiftedcmap = shiftedColorMap(cm,
                                          midpoint=midpoint,
                                          name='shifted')
            kk = 1 + np.divide(xx.shape, 50)
            X = np.ravel(xx[::kk[0], ::kk[1], ::kk[2]])
            Y = np.ravel(yy[::kk[0], ::kk[1], ::kk[2]])
            T = np.ravel(zz[::kk[0], ::kk[1], ::kk[2]])
            CC = np.ravel(Z[::kk[0], ::kk[1], ::kk[2]])
            p = ax.scatter(X, Y, T, c=CC, s=.1, alpha=.5, cmap=shiftedcmap)
            cbar = fig.colorbar(p)
            cbar.set_label('decision function value',
                           rotation=270,
                           labelpad=20)
            ax.add_collection3d(mesh)
            ax.set_zlim([xx.min(), xx.max()])
            ax.set_ylim([yy.min(), yy.max()])
            ax.set_zlim([zz.min(), zz.max()])
            ax.set_xlabel("Longitude normalized")
            ax.set_ylabel("Latitude normalized")
            ax.set_zlabel("Time normalized")
            plt.savefig('decision.png')
        except Exception as e:
            print 'Warning: something went wrong when plotting...'
            print e

    # xx 2-dimensional array
    Fx = xx[:, :, 0]
    # yy 2-dimensional array
    Fy = yy[:, :, 0]
    # zz 1-dimensional array
    zr = zz[0, 0]
    # find roots
    Fz = find_roots(Fx, Fy, zr, Z, plot_poly=plot_poly)

    print 'elapsed time: %ss.' % str(abs(t_2 - t_1))
    F = [Fx, Fy, Fz]

    return F
Ejemplo n.º 7
0
def pipeline(frame, vocabname, model_type):
    vocab = gs.models.KeyedVectors.load_word2vec_format('models/' + vocabname + '.bin', binary=True)
    vocabsize = 300


    timesteps = 32
    batch_size = 64
    n_epochs = 200
    n_parts = 5


    model = None
    model_name = None
    if model_type==1:
        model = simple_lstm(timesteps, vocabsize)
        model_name = 'lstm_no_seq'
    elif model_type==2:
        model = lstm_with_mlp(timesteps, vocabsize)
        model_name = 'lstm_mlp'
    else:
        pass


    frame = shuffle(frame)
    matrices, tones = to_matrix(frame, vocabsize, vocab)
    tones = (np.array(tones) + 1) // 2
    for i in tqdm(range(len(matrices))):
        matrices[i] = np.vstack(
            (matrices[i], np.random.normal(scale=0.005, size=(timesteps - matrices[i].shape[0], vocabsize))))
    matrices = np.array(matrices)


    tones = np.array(tones, ndmin=2).T
    train_matrices = matrices[0:matrices.shape[0] * 4 // 5]
    train_tones = tones[0:matrices.shape[0] * 4 // 5]
    test_matrices = matrices[matrices.shape[0] * 4 // 5:]
    test_tones = tones[matrices.shape[0] * 4 // 5:]


    times = []
    scores = []
    start_time = time.time()


    for i in range(n_epochs):
        print('Epoch {}/{}'.format(i+1, n_epochs))
        for j in range(n_parts):
            matrices_heap = train_matrices[
                            j * train_matrices.shape[0] // n_parts:(j + 1) * train_matrices.shape[0] // n_parts]
            tones_heap = train_tones[
                         j * train_matrices.shape[0] // n_parts:(j + 1) * train_matrices.shape[0] // n_parts]
            model.fit(matrices_heap, tones_heap, batch_size=batch_size, epochs=1, verbose=2)
        if (i + 1) % 10 == 0:
            score = model.evaluate(test_matrices, test_tones, batch_size=batch_size, verbose=1)
            scores.append(score)
            times.append(time.time() - start_time)


    path = os.path.abspath(os.getcwd()) + "/" + model_name + "_" + vocabname
    if not os.path.isdir(path):
        os.makedirs(path)
    model.save(path + "/model")
    save(scores, path, "/scores")
    save(times, path, "/times")
Ejemplo n.º 8
0
import shutil
import glob

MyList = wt.searchStation('OJP')
station_name = MyList[8]
MyTS = wt.searchTimeseries('', station_name)

# Select a data range to work with:
Start = '1900-08-01 00:00:00'
End = '2060-02-08 00:00:00'

# NW VWC data
indexes = np.arange(21, 33, 2)
TS_Selection = MyTS[indexes]
df = wt.getTimeseries(TS_Selection, station_name, Start, End)
sl.save(df, 'OJP_NW_VWC')

# SW VWC data
indexes = np.arange(22, 33, 2)
TS_Selection = MyTS[indexes]
df = wt.getTimeseries(TS_Selection, station_name, Start, End)
sl.save(df, 'OJP_SW_VWC')

# NW Temperature data
indexes = np.arange(0, 12, 2)
TS_Selection = MyTS[indexes]
df = wt.getTimeseries(TS_Selection, station_name, Start, End)
sl.save(df, 'OJP_NW_T')

# SW Temperature data
indexes = np.arange(1, 12, 2)
Ejemplo n.º 9
0
def process_detections(data,
                       fxlon,
                       fxlat,
                       time_num,
                       bounds=None,
                       maxsize=500,
                       confl=0.):
    """
	Process detections to obtain upper and lower bounds

	:param data: data obtained from JPSSD
	:param fxlon: longitude coordinates of the fire mesh (from wrfout)
	:param fxlat: latitude coordinates of the fire mesh (from wrfout)
	:param time_num: numerical value of the starting and ending time
	:param bounds: optional, spatial bounds to consider (lon_min,lon_max,lat_min,lat_max)
	:param maxsize: optional, maxsize of the mesh in both directions
	:param confl: optional, minimum confidence level for the pixels
	:return result: upper and lower bounds with some parameters

	Developed in Python 2.7.15 :: Anaconda 4.5.10, on MACINTOSH.
	Angel Farguell ([email protected]), 2019-04-01
	"""

    # process satellite settings
    ut = 1  # Upper bound technique, ut=1: Center of the pixel -- ut=2: Ellipse inscribed in the pixel
    lt = 1  # Lower bound technique, lt=1: Center of the pixel -- lt=2: Ellipse inscribed in the pixel (very slow)
    mt = 2  # Mask technique, mt=1: Ball -- mt=2: Pixel -- mt=3: Ellipse
    dist = 8  # If mt=1 (ball neighbours), radius of the balls is R=sqrt(2*dist^2)
    mm = 5  # If mt=3 (ellipse neighbours), larger ellipses constant: (x/a)^2+(x/b)^2<=mm
    confa = False  # Histogram plot of the confidence level distribution
    confm = True  # Store confidence of each fire and ground detection
    conf_nofire = 70.  # In absence of nofire confidence, value for nofire confidence (satellite data)
    burn = False  # Using or not the burned scar product

    ofxlon = np.copy(fxlon)
    ofxlat = np.copy(fxlat)
    print 'mesh shape %s %s' % fxlon.shape
    coarsening = np.int(1 + np.max(fxlon.shape) / maxsize)
    print 'maximum size is %s, coarsening %s' % (maxsize, coarsening)
    fxlon = fxlon[0::coarsening, 0::coarsening]
    fxlat = fxlat[0::coarsening, 0::coarsening]
    print 'coarsened  %s %s' % fxlon.shape

    if not bounds:
        bounds = [fxlon.min(), fxlon.max(), fxlat.min(), fxlat.max()]
    vfxlon = np.ravel(fxlon)
    vfxlat = np.ravel(fxlat)
    vfgrid = np.column_stack((vfxlon, vfxlat))
    print 'Setting up interpolation'
    stree = spatial.cKDTree(vfgrid)
    vfind = np.array(
        list(
            itertools.product(np.array(range(fxlon.shape[0])),
                              np.array(range(fxlon.shape[1])))))
    itree = spatial.cKDTree(vfind)

    # Sort dictionary by time_num into an array of tuples (key, dictionary of values)
    print 'Sort the granules by dates'
    sdata = sort_dates(data)
    tt = [dd[1]['time_num'] for dd in sdata]  # array of times
    print 'Sorted?'
    stt = sorted(tt)
    print tt == stt

    # Max and min time_num
    maxt = time_num[1]
    mint = time_num[0]
    # time_scale_num = time_num
    time_scale_num = [mint - 0.5 * (maxt - mint), maxt + 2 * (maxt - mint)]

    # Creating the resulting arrays
    DD = np.prod(fxlon.shape)
    U = np.empty(DD)
    U[:] = time_scale_num[1]
    L = np.empty(DD)
    L[:] = time_scale_num[0]
    T = np.empty(DD)
    T[:] = time_scale_num[1]
    if confm:
        C = np.zeros(DD)
        Cg = np.zeros(DD)

    if confa:
        # Confidence analysis
        confanalysis = Dict({
            'f7': np.array([]),
            'f8': np.array([]),
            'f9': np.array([])
        })

    # For granules in order increasing in time
    GG = len(sdata)
    for gran in range(GG):
        t_init = time.time()
        print 'Loading data of granule %d/%d' % (gran + 1, GG)
        print 'Granule name: %s' % sdata[gran][0]
        # Load granule lon, lat, fire arrays and time number
        slon = sdata[gran][1]['lon']
        slat = sdata[gran][1]['lat']
        ti = sdata[gran][1]['time_num']
        fire = sdata[gran][1]['fire']
        print 'Interpolation to fire grid'
        sys.stdout.flush()
        # Interpolate all the granule coordinates in bounds in the wrfout fire mesh
        # gg: mask in the granule of g-points = pixel coordinates inside the fire mesh
        # ff: the closed points in fire mesh indexed by g-points
        (ff, gg) = nearest_scipy(slon, slat, stree,
                                 bounds)  ## indices to flattened granule array
        vfire = np.ravel(fire)  ## flaten the fire detection array
        gfire = vfire[gg]  # the part withing the fire mesh bounds
        fi = gfire >= 7  # where fire detected - low, nominal or high confidence (all the fire data in the granule)
        ffi = ff[fi]  # indices in the fire mesh where the fire detections are
        nofi = np.logical_or(gfire == 3, gfire == 5)  # where no fire detected
        unkn = np.logical_not(np.logical_or(fi, nofi))  # where unknown
        print 'fire detected    %s' % fi.sum()
        print 'no fire detected %s' % nofi.sum()
        print 'unknown          %s' % unkn.sum()
        if fi.any():  # at fire points
            rfire = gfire[gfire >= 7]
            conf = sdata[gran][1][
                'conf_fire']  # confidence of the fire detections
            if confa:
                confanalysis.f7 = np.concatenate(
                    (confanalysis.f7, conf[rfire == 7]))
                confanalysis.f8 = np.concatenate(
                    (confanalysis.f8, conf[rfire == 8]))
                confanalysis.f9 = np.concatenate(
                    (confanalysis.f9, conf[rfire == 9]))
            flc = conf >= confl  # fire large confidence indexes
            ffa = U[ffi][flc] > ti  # first fire arrival

            if ut > 1 or mt > 1:
                # taking lon, lat, scan and track of the fire detections which fire large confidence indexes
                lon = sdata[gran][1]['lon_fire'][flc][ffa]
                lat = sdata[gran][1]['lat_fire'][flc][ffa]
                scan = sdata[gran][1]['scan_fire'][flc][ffa]
                track = sdata[gran][1]['track_fire'][flc][ffa]

            # Set upper bounds
            if ut == 1:
                # indices with high confidence
                iu = ffi[flc][ffa]
            elif ut == 2:
                # creating the indices for all the pixel neighbours of the upper bound
                iu = neighbor_indices_ellipse(vfxlon, vfxlat, lon, lat, scan,
                                              track)
            else:
                print 'ERROR: invalid ut option.'
                sys.exit()
            mu = U[iu] > ti  # only upper bounds did not set yet
            if confm:
                if ut == 1:
                    C[iu[mu]] = conf[flc][ffa][mu]
                else:
                    print 'ERROR: ut=2 and confm=True not implemented!'
                    sys.exit(1)
            print 'U set at %s points' % mu.sum()
            if ut == 1:
                U[iu[
                    mu]] = ti  # set U to granule time where fire detected and not detected before
            else:
                U[iu][
                    mu] = ti  # set U to granule time where fire detected and not detected before

            # Set mask
            if mt == 1:
                # creating the indices for all the pixel neighbours of the upper bound indices
                kk = neighbor_indices_ball(itree, np.unique(ffi[flc]),
                                           fxlon.shape, dist)
                im = np.array(
                    sorted(
                        np.unique([
                            x[0] + x[1] * fxlon.shape[0] for x in vfind[kk]
                        ])))
            elif mt == 2:
                # creating the indices for all the pixel neighbours of the upper bound indices
                im = neighbor_indices_pixel(vfxlon, vfxlat, lon, lat, scan,
                                            track)
            elif mt == 3:
                # creating the indices for all the pixel neighbours of the upper bound indices
                im = neighbor_indices_ellipse(vfxlon, vfxlat, lon, lat, scan,
                                              track, mm)
            else:
                print 'ERROR: invalid mt option.'
                sys.exit()
            if mt > 1:
                ind = np.where(im)[0]
                mmt = ind[ti < T[im]]  # only mask did not set yet
                print 'T set at %s points' % mmt.shape
                T[mmt] = ti  # update mask T
            else:
                print 'T set at %s points' % im[T[im] > ti].shape
                T[im[T[im] > ti]] = ti  # update mask T

        # Set mask from burned scar data
        if burn:
            if 'burned' in sdata[gran][1].keys():
                # if burned scar exists, set the mask in the burned scar pixels
                burned = sdata[gran][1]['burned']
                bm = ff[np.ravel(burned)[gg]]
                T[bm] = ti

        if nofi.any():  # set L at no-fire points and not masked
            if lt == 1:
                # indices of clear ground
                jj = np.logical_and(nofi, ti < T[ff])
                il = ff[jj]
            elif lt == 2:
                # taking lon, lat, scan and track of the ground detections
                lon = sdata[gran][1]['lon_nofire']
                lat = sdata[gran][1]['lat_nofire']
                scan = sdata[gran][1]['scan_nofire']
                track = sdata[gran][1]['track_nofire']
                # creating the indices for all the pixel neighbours of lower bound indices
                nofi = neighbor_indices_pixel(vfxlon, vfxlat, lon, lat, scan,
                                              track)
                il = np.logical_and(nofi, ti < T)
            else:
                print 'ERROR: invalid lt option.'
                sys.exit()
            if confm:
                if lt == 1:
                    mask_nofi = gg[np.logical_or(vfire == 3, vfire == 5)]
                    try:
                        # get nofire confidence if we have it
                        confg = sdata[gran][1]['conf_nofire'][mask_nofi]
                    except:
                        # if not, define confidence from conf_nofire value
                        confg = conf_nofire * np.ones(nofi.sum())
                    Cg[il] = confg[(ti < T[ff])[nofi]]
                else:
                    print 'ERROR: lt=2 and confm=True not implemented!'
                    sys.exit(1)
            L[il] = ti  # set L to granule time where fire detected
            print 'L set at %s points' % jj.sum()
        t_final = time.time()
        print 'elapsed time: %ss.' % str(t_final - t_init)

    print "L<U: %s" % (L < U).sum()
    print "L=U: %s" % (L == U).sum()
    print "L>U: %s" % (L > U).sum()
    print "average U-L %s" % ((U - L).sum() / np.prod(U.shape))
    print np.histogram((U - L) / (24 * 3600))

    if (L > U).sum() > 0:
        print "Inconsistency in the data, removing lower bounds..."
        L[L > U] = time_scale_num[0]
        print "L<U: %s" % (L < U).sum()
        print "L=U: %s" % (L == U).sum()
        print "L>U: %s" % (L > U).sum()
        print "average U-L %s" % ((U - L).sum() / np.prod(U.shape))
        print np.histogram((U - L) / (24 * 3600))

    print 'Confidence analysis'
    if confa:
        plt.subplot(1, 3, 1)
        plt.hist(x=confanalysis.f7,
                 bins='auto',
                 color='#ff0000',
                 alpha=0.7,
                 rwidth=0.85)
        plt.xlabel('Confidence')
        plt.ylabel('Frequency')
        plt.title('Fire label 7: %d' % len(confanalysis.f7))
        plt.subplot(1, 3, 2)
        plt.hist(x=confanalysis.f8,
                 bins='auto',
                 color='#00ff00',
                 alpha=0.7,
                 rwidth=0.85)
        plt.xlabel('Confidence')
        plt.ylabel('Frequency')
        plt.title('Fire label 8: %d' % len(confanalysis.f8))
        plt.subplot(1, 3, 3)
        plt.hist(x=confanalysis.f9,
                 bins='auto',
                 color='#0000ff',
                 alpha=0.7,
                 rwidth=0.85)
        plt.xlabel('Confidence')
        plt.ylabel('Frequency')
        plt.title('Fire label 9: %d' % len(confanalysis.f9))
        plt.show()

    print 'Saving results'
    # Result
    U = np.reshape(U - time_scale_num[0], fxlon.shape, 'F')
    L = np.reshape(L - time_scale_num[0], fxlon.shape, 'F')
    T = np.reshape(T - time_scale_num[0], fxlon.shape, 'F')

    print 'U L R are shifted so that zero there is time_scale_num[0] = %s' % time_scale_num[
        0]

    result = {
        'U': U,
        'L': L,
        'T': T,
        'fxlon': fxlon,
        'fxlat': fxlat,
        'time_num': time_num,
        'time_scale_num': time_scale_num,
        'time_num_granules': tt,
        'ofxlon': ofxlon,
        'ofxlat': ofxlat
    }
    if confm:
        C = np.reshape(C, fxlon.shape, 'F')
        Cg = np.reshape(Cg, fxlon.shape, 'F')
        result.update({'C': C, 'Cg': Cg})

    sio.savemat('result.mat', mdict=result)
    sl.save(result, 'result')

    print 'To visualize, run in Matlab the script plot_results.m'
    print 'Multigrid using in fire_interpolation the script jpss_mg.m'

    return result
Ejemplo n.º 10
0
def main(model_type, dataset_path, ptb_path, save_path,
    num_steps, encoder_size, pos_decoder_size, chunk_decoder_size, dropout,
    batch_size, pos_embedding_size, num_shared_layers, num_private_layers, chunk_embedding_size,
    lm_decoder_size, bidirectional, lstm, write_to_file, mix_percent,glove_path,max_epoch,
    projection_size, num_batches_gold, reg_weight, word_embedding_size, embedding_trainable, \
    adam, connections, fraction_of_training_data=1, embedding=False, test=False):

    """Main."""
    config = Config(num_steps, encoder_size, pos_decoder_size, chunk_decoder_size, dropout,
    batch_size, pos_embedding_size, num_shared_layers, num_private_layers, chunk_embedding_size,
    lm_decoder_size, bidirectional, lstm, mix_percent, max_epoch, reg_weight, word_embedding_size, \
     embedding_trainable, adam, fraction_of_training_data, connections)

    raw_data_path = dataset_path + '/data'
    raw_data = reader.raw_x_y_data(
        raw_data_path, num_steps, ptb_path + '/data', embedding, glove_path)

    words_t, pos_t, chunk_t, words_v, \
        pos_v, chunk_v, word_to_id, pos_to_id, \
        chunk_to_id, words_test, pos_test, chunk_test, \
        words_c, pos_c, chunk_c, words_ptb, pos_ptb, chunk_ptb, word_embedding = raw_data

    num_train_examples = int(np.floor(len(words_t) * fraction_of_training_data))

    words_t = words_t[:num_train_examples]
    pos_t = pos_t[:num_train_examples]
    chunk_t = chunk_t[:num_train_examples]

    num_pos_tags = len(pos_to_id)
    num_chunk_tags = len(chunk_to_id)
    vocab_size = len(word_to_id)
    prev_chunk_F1 = 0.0

    ptb_batches = reader.create_batches(words_ptb, pos_ptb, chunk_ptb, config.batch_size,
                            config.num_steps, num_pos_tags, num_chunk_tags, vocab_size, continuing=True)

    ptb_iter = 0

    # Create an empty array to hold [epoch number, F1]
    if test==False:
        best_chunk_epoch = [0, 0.0]
        best_pos_epoch = [0, 0.0]
    else:
        best_chunk_epoch = [max_epoch, 0.0]

    print('constructing word embedding')

    if embedding==True:
        word_embedding = np.float32(word_embedding)
    else:
        word_embedding = np.float32((np.random.rand(vocab_size, config.word_embedding_size)-0.5)*config.init_scale)

    if test==False:
        with tf.Graph().as_default(), tf.Session() as session:
            print('building models')
            initializer = tf.random_uniform_initializer(-config.init_scale,
                                                        config.init_scale)

            # model to train hyperparameters on
            with tf.variable_scope("hyp_model", reuse=None, initializer=initializer):
                m = Shared_Model(is_training=True, config=config, num_pos_tags=num_pos_tags,
                num_chunk_tags=num_chunk_tags, vocab_size=vocab_size,
                word_embedding=word_embedding, projection_size=projection_size)

            with tf.variable_scope("hyp_model", reuse=True, initializer=initializer):
                mValid = Shared_Model(is_training=False, config=config, num_pos_tags=num_pos_tags,
                num_chunk_tags=num_chunk_tags, vocab_size=vocab_size,
                word_embedding=word_embedding, projection_size=projection_size)


            print('initialising variables')

            tf.initialize_all_variables().run()

            print("initialise word vectors")
            session.run(m.embedding_init, {m.embedding_placeholder: word_embedding})
            session.run(mValid.embedding_init, {mValid.embedding_placeholder: word_embedding})

            print('finding best epoch parameter')
            # ====================================
            # Create vectors for training results
            # ====================================

            # Create empty vectors for loss
            train_loss_stats = np.array([])
            train_pos_loss_stats = np.array([])
            train_chunk_loss_stats = np.array([])
            train_lm_loss_stats = np.array([])

            # Create empty vectors for accuracy
            train_pos_stats = np.array([])
            train_chunk_stats = np.array([])

            # ====================================
            # Create vectors for validation results
            # ====================================
            # Create empty vectors for loss
            valid_loss_stats = np.array([])
            valid_pos_loss_stats = np.array([])
            valid_chunk_loss_stats = np.array([])
            valid_lm_loss_stats = np.array([])

            # Create empty vectors for accuracy
            valid_pos_stats = np.array([])
            valid_chunk_stats = np.array([])

            for i in range(config.max_epoch):
                print(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()))

                print("Epoch: %d" % (i + 1))
                if config.random_mix == False:
                    if config.ptb == True:
                        _, _, _, _, _, _, _, _, _, _ = \
                            run_epoch(session, m,
                                      words_ptb, pos_ptb, chunk_ptb,
                                      num_pos_tags, num_chunk_tags, vocab_size, num_steps,
                                      verbose=True, model_type='LM')


                    mean_loss, posp_t, chunkp_t, lmp_t, post_t, chunkt_t, lmt_t, pos_loss, chunk_loss, lm_loss = \
                        run_epoch(session, m,
                                  words_t, pos_t, chunk_t,
                                  num_pos_tags, num_chunk_tags, vocab_size, num_steps,
                                  verbose=True, model_type=model_type)

                else:
                    # an additional if statement to get the gold vs pred connections
                    if i > num_batches_gold:
                        gold_percent = gold_percent * 0.8
                    else:
                        gold_percent = 1
                    if np.random.rand(1) < gold_percent:
                        gold_embed = 1
                    else:
                        gold_embed = 0
                    mean_loss, posp_t, chunkp_t, lmp_t, post_t, chunkt_t, lmt_t, pos_loss, chunk_loss, lm_loss, ptb_iter = \
                        run_epoch_random.run_epoch(session, m,
                                  words_t, words_ptb, pos_t, pos_ptb, chunk_t, chunk_ptb,
                                  num_pos_tags, num_chunk_tags, vocab_size, num_steps, gold_embed, config,
                                  ptb_batches, ptb_iter, verbose=True, model_type=model_type)


                print('epoch finished')
                # Save stats for charts
                train_loss_stats = np.append(train_loss_stats, mean_loss)
                train_pos_loss_stats = np.append(train_pos_loss_stats, pos_loss)
                train_chunk_loss_stats = np.append(train_chunk_loss_stats, chunk_loss)
                train_lm_loss_stats = np.append(train_lm_loss_stats, lm_loss)

                # get training predictions as list
                posp_t = reader._res_to_list(posp_t, config.batch_size, num_steps,
                                             pos_to_id, len(words_t), to_str=True)
                chunkp_t = reader._res_to_list(chunkp_t, config.batch_size, num_steps,
                                               chunk_to_id, len(words_t),to_str=True)
                lmp_t = reader._res_to_list(lmp_t, config.batch_size, num_steps,
                                                 word_to_id, len(words_t),to_str=True)
                post_t = reader._res_to_list(post_t, config.batch_size, num_steps,
                                             pos_to_id, len(words_t), to_str=True)
                chunkt_t = reader._res_to_list(chunkt_t, config.batch_size, num_steps,
                                                chunk_to_id, len(words_t), to_str=True)
                lmt_t = reader._res_to_list(lmt_t, config.batch_size, num_steps,
                                                 word_to_id, len(words_t),to_str=True)

                # find the accuracy
                print('finding accuracy')
                pos_acc = np.sum(posp_t==post_t)/float(len(posp_t))
                chunk_F1 = f1_score(chunkt_t, chunkp_t,average="weighted")

                # add to array
                train_pos_stats = np.append(train_pos_stats, pos_acc)
                train_chunk_stats = np.append(train_chunk_stats, chunk_F1)

                # print for tracking
                print("Pos Training Accuracy After Epoch %d :  %3f" % (i+1, pos_acc))
                print("Chunk Training F1 After Epoch %d : %3f" % (i+1, chunk_F1))

                valid_loss, posp_v, chunkp_v, lmp_v, post_v, chunkt_v, lmt_v, pos_v_loss, chunk_v_loss, lm_v_loss, ptb_iter = \
                    run_epoch_random.run_epoch(session, mValid,
                              words_v, words_ptb, pos_v, pos_ptb, chunk_v, chunk_ptb,
                              num_pos_tags, num_chunk_tags, vocab_size, num_steps, gold_embed, config,
                              ptb_batches, ptb_iter, verbose=True,  model_type=model_type, valid=True)

                # Save loss for charts
                valid_loss_stats = np.append(valid_loss_stats, valid_loss)
                valid_pos_loss_stats = np.append(valid_pos_loss_stats, pos_v_loss)
                valid_chunk_loss_stats = np.append(valid_chunk_loss_stats, chunk_v_loss)
                valid_lm_loss_stats = np.append(valid_lm_loss_stats, lm_v_loss)

                # get predictions as list
                posp_v = reader._res_to_list(posp_v, config.batch_size, num_steps,
                                             pos_to_id, len(words_v), to_str=True)
                chunkp_v = reader._res_to_list(chunkp_v, config.batch_size, num_steps,
                                                chunk_to_id, len(words_v), to_str=True)
                lmp_v = reader._res_to_list(lmp_v, config.batch_size, num_steps,
                                                word_to_id, len(words_v), to_str=True)
                chunkt_v = reader._res_to_list(chunkt_v, config.batch_size, num_steps,
                                                chunk_to_id, len(words_v), to_str=True)
                post_v = reader._res_to_list(post_v, config.batch_size, num_steps,
                                             pos_to_id, len(words_v), to_str=True)
                lmt_v = reader._res_to_list(lmt_v, config.batch_size, num_steps,
                                                word_to_id, len(words_v), to_str=True)

                # find accuracy
                pos_acc = np.sum(posp_v==post_v)/float(len(posp_v))
                chunk_F1 = f1_score(chunkt_v, chunkp_v, average="weighted")


                print("Pos Validation Accuracy After Epoch %d :  %3f" % (i+1, pos_acc))
                print("Chunk Validation F1 After Epoch %d : %3f" % (i+1, chunk_F1))

                # add to stats
                valid_pos_stats = np.append(valid_pos_stats, pos_acc)
                valid_chunk_stats = np.append(valid_chunk_stats, chunk_F1)

                if (abs(chunk_F1-prev_chunk_F1))<=0.001:
                    config.learning_rate = 0.8*config.learning_rate
                    print("learning rate updated")

                # update best parameters
                if(chunk_F1 > best_chunk_epoch[1]) or (pos_acc > best_pos_epoch[1]):
                    if pos_acc > best_pos_epoch[1]:
                        best_pos_epoch = [i+1, pos_acc]
                    if chunk_F1 > best_chunk_epoch[1]:
                        best_chunk_epoch = [i+1, chunk_F1]

                    saveload.save(save_path + '/val_model.pkl', session)
                    with open(save_path + '/pos_to_id.pkl', "wb") as file:
                        pickle.dump(pos_to_id, file)
                    with open(save_path + '/chunk_to_id.pkl', "wb") as file:
                        pickle.dump(chunk_to_id, file)
                    print("Model saved in file: %s" % save_path)

                    if write_to_file==False:
                        id_to_word = {v: k for k, v in word_to_id.items()}

                        words_t_unrolled = [id_to_word[k] for k in words_t[num_steps-1:]]
                        words_v_unrolled = [id_to_word[k] for k in words_v[num_steps-1:]]

                        # unroll data
                        train_custom = np.hstack((np.array(words_t_unrolled).reshape(-1,1), np.char.upper(post_t), np.char.upper(chunkt_t)))
                        valid_custom = np.hstack((np.array(words_v_unrolled).reshape(-1,1), np.char.upper(post_v), np.char.upper(chunkt_v)))
                        chunk_pred_train = np.concatenate((train_custom, np.char.upper(chunkp_t).reshape(-1,1)), axis=1)
                        chunk_pred_val = np.concatenate((valid_custom, np.char.upper(chunkp_v).reshape(-1,1)), axis=1)
                        pos_pred_train = np.concatenate((train_custom, np.char.upper(posp_t).reshape(-1,1)), axis=1)
                        pos_pred_val = np.concatenate((valid_custom, np.char.upper(posp_v).reshape(-1,1)), axis=1)

                        # write to file
                        np.savetxt(save_path + '/predictions/chunk_pred_train.txt',
                                   chunk_pred_train, fmt='%s')
                        print('writing to ' + save_path + '/predictions/chunk_pred_train.txt')
                        np.savetxt(save_path + '/predictions/chunk_pred_val.txt',
                                   chunk_pred_val, fmt='%s')
                        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
                        np.savetxt(save_path + '/predictions/pos_pred_train.txt',
                                   pos_pred_train, fmt='%s')
                        print('writing to ' + save_path + '/predictions/pos_pred_train.txt')
                        np.savetxt(save_path + '/predictions/pos_pred_val.txt',
                                   pos_pred_val, fmt='%s')
                        print('writing to ' + save_path + '/predictions/pos_pred_val.txt')

                        print('Getting Testing Predictions (Valid)')
                        test_loss, posp_test, chunkp_test, lmp_test, post_test, chunkt_test, lmt_test, pos_test_loss, chunk_test_loss, lm_test_loss, ptb_iter = \
                            run_epoch_random.run_epoch(session, mValid,
                                      words_test, words_ptb, pos_test, pos_ptb, chunk_test, chunk_ptb,
                                      num_pos_tags, num_chunk_tags, vocab_size, num_steps, gold_embed, config,
                                      ptb_batches, ptb_iter, verbose=True,  model_type=model_type, valid=True)

                        # get predictions as list
                        posp_test = reader._res_to_list(posp_test, config.batch_size, num_steps,
                                                     pos_to_id, len(words_test), to_str=True)
                        chunkp_test = reader._res_to_list(chunkp_test, config.batch_size, num_steps,
                                                        chunk_to_id, len(words_test), to_str=True)
                        lmp_test = reader._res_to_list(lmp_test, config.batch_size, num_steps,
                                                        word_to_id, len(words_test), to_str=True)
                        chunkt_test = reader._res_to_list(chunkt_test, config.batch_size, num_steps,
                                                        chunk_to_id, len(words_test), to_str=True)
                        post_test = reader._res_to_list(post_test, config.batch_size, num_steps,
                                                     pos_to_id, len(words_test), to_str=True)
                        lmt_test = reader._res_to_list(lmt_test, config.batch_size, num_steps,
                                                        word_to_id, len(words_test), to_str=True)

                        words_test_c = [id_to_word[k] for k in words_test[num_steps-1:]]
                        test_data = np.hstack((np.array(words_test_c).reshape(-1,1), np.char.upper(post_test), np.char.upper(chunkt_test)))

                        # find the accuracy
                        print('finding  test accuracy')
                        pos_acc_train = np.sum(posp_test==post_test)/float(len(posp_test))
                        chunk_F1_train = f1_score(chunkt_test, chunkp_test,average="weighted")

                        print("POS Test Accuracy: " + str(pos_acc_train))
                        print("Chunk Test F1: " + str(chunk_F1_train))

                        chunk_pred_test = np.concatenate((test_data, np.char.upper(chunkp_test).reshape(-1,1)), axis=1)
                        pos_pred_test = np.concatenate((test_data, np.char.upper(posp_test).reshape(-1,1)), axis=1)

                        print('writing to ' + save_path + '/predictions/chunk_pred_combined.txt')
                        np.savetxt(save_path + '/predictions/chunk_pred_test.txt',
                                   chunk_pred_test, fmt='%s')
                        print('writing to ' + save_path + '/predictions/chunk_pred_test.txt')

                        np.savetxt(save_path + '/predictions/pos_pred_train.txt',
                                   pos_pred_train, fmt='%s')
                        print('writing to ' + save_path + '/predictions/pos_pred_train.txt')
                        np.savetxt(save_path + '/predictions/pos_pred_val.txt',
                                   pos_pred_val, fmt='%s')
                        print('writing to ' + save_path + '/predictions/pos_pred_val.txt')

                        np.savetxt(save_path + '/predictions/pos_pred_test.txt',
                                   pos_pred_test, fmt='%s')

                prev_chunk_F1 = chunk_F1

            # Save loss & accuracy plots
            np.savetxt(save_path + '/loss/valid_loss_stats.txt', valid_loss_stats)
            np.savetxt(save_path + '/loss/valid_pos_loss_stats.txt', valid_pos_loss_stats)
            np.savetxt(save_path + '/loss/valid_chunk_loss_stats.txt', valid_chunk_loss_stats)
            np.savetxt(save_path + '/accuracy/valid_pos_stats.txt', valid_pos_stats)
            np.savetxt(save_path + '/accuracy/valid_chunk_stats.txt', valid_chunk_stats)

            np.savetxt(save_path + '/loss/train_loss_stats.txt', train_loss_stats)
            np.savetxt(save_path + '/loss/train_pos_loss_stats.txt', train_pos_loss_stats)
            np.savetxt(save_path + '/loss/train_chunk_loss_stats.txt', train_chunk_loss_stats)
            np.savetxt(save_path + '/accuracy/train_pos_stats.txt', train_pos_stats)
            np.savetxt(save_path + '/accuracy/train_chunk_stats.txt', train_chunk_stats)


    if write_to_file == True:
            with tf.Graph().as_default(), tf.Session() as session:
                initializer = tf.random_uniform_initializer(-config.init_scale,
                                                            config.init_scale)

                with tf.variable_scope("final_model", reuse=None, initializer=initializer):
                    mTrain = Shared_Model(is_training=True, config=config, num_pos_tags=num_pos_tags,
                    num_chunk_tags=num_chunk_tags, vocab_size=vocab_size,
                    word_embedding=word_embedding, projection_size=projection_size)

                with tf.variable_scope("final_model", reuse=True, initializer=initializer):
                    mTest = Shared_Model(is_training=False, config=config, num_pos_tags=num_pos_tags,
                    num_chunk_tags=num_chunk_tags, vocab_size=vocab_size,
                    word_embedding=word_embedding, projection_size=projection_size)

                print("initialise variables")
                tf.initialize_all_variables().run()
                print("initialise word embeddings")
                session.run(mTrain.embedding_init, {mTrain.embedding_placeholder: word_embedding})
                session.run(mTest.embedding_init, {mTest.embedding_placeholder: word_embedding})




                # Train given epoch parameter
                if config.random_mix == False:
                    print('Train Given Best Epoch Parameter :' + str(best_chunk_epoch[0]))
                    for i in range(best_chunk_epoch[0]):
                        print("Epoch: %d" % (i + 1))
                        if config.ptb == False:
                            _, _, _, _, _, _, _, _, _, _ = \
                                run_epoch(session, mTrain,
                                          words_ptb, pos_ptb, chunk_ptb,
                                          num_pos_tags, num_chunk_tags, vocab_size, num_steps,
                                          verbose=True, model_type="LM")

                        _, posp_c, chunkp_c, _, _, _, _, _, _, _ = \
                            run_epoch(session, mTrain,
                                      words_c, pos_c, chunk_c,
                                      num_pos_tags, num_chunk_tags, vocab_size,
                                      verbose=True, model_type=model_type)

                else:
                    print('Train Given Best Epoch Parameter :' + str(best_chunk_epoch[0]))
                    # an additional if statement to get the gold vs pred connections
                    if i > num_batches_gold:
                        gold_percent = gold_percent * 0.8
                    else:
                        gold_percent = 1
                    if np.random.rand(1) < gold_percent:
                        gold_embed = 1
                    else:
                        gold_embed = 0
                    for i in range(best_chunk_epoch[0]):
                        print("Epoch: %d" % (i + 1))
                        _, posp_c, chunkp_c, _, post_c, chunkt_c, _, _, _, _, ptb_iter = \
                            run_epoch_random.run_epoch(session, mTrain,
                                      words_c, words_ptb, pos_c, pos_ptb, chunk_c, chunk_ptb,
                                      num_pos_tags, num_chunk_tags, vocab_size, num_steps, gold_embed, config,
                                      ptb_batches, ptb_iter, verbose=True, model_type=model_type)


                print('Getting Testing Predictions')
                test_loss, posp_test, chunkp_test, lmp_test, post_test, chunkt_test, lmt_test, pos_test_loss, chunk_test_loss, lm_test_loss, ptb_iter = \
                    run_epoch_random.run_epoch(session, mTest,
                              words_test, words_ptb, pos_test, pos_ptb, chunk_test, chunk_ptb,
                              num_pos_tags, num_chunk_tags, vocab_size, num_steps, gold_embed, config,
                              ptb_batches, ptb_iter, verbose=True,  model_type=model_type, valid=True)

                print('Writing Predictions')
                # prediction reshaping
                posp_c = reader._res_to_list(posp_c, config.batch_size, num_steps,
                                             pos_to_id, len(words_c), to_str=True)
                posp_test = reader._res_to_list(posp_test, config.batch_size, num_steps,
                                                pos_to_id, len(words_test), to_str=True)
                chunkp_c = reader._res_to_list(chunkp_c, config.batch_size, num_steps,
                                               chunk_to_id, len(words_c),to_str=True)
                chunkp_test = reader._res_to_list(chunkp_test, config.batch_size, num_steps,
                                                  chunk_to_id, len(words_test),  to_str=True)

                post_c = reader._res_to_list(post_c, config.batch_size, num_steps,
                                             pos_to_id, len(words_c), to_str=True)
                post_test = reader._res_to_list(post_test, config.batch_size, num_steps,
                                                pos_to_id, len(words_test), to_str=True)
                chunkt_c = reader._res_to_list(chunkt_c, config.batch_size, num_steps,
                                               chunk_to_id, len(words_c),to_str=True)
                chunkt_test = reader._res_to_list(chunkt_test, config.batch_size, num_steps,
                                                  chunk_to_id, len(words_test),  to_str=True)

                # save pickle - save_path + '/saved_variables.pkl'
                print('saving checkpoint')
                saveload.save(save_path + '/fin_model.ckpt', session)

                words_t = [id_to_word[k] for k in words_t[num_steps-1:]]
                words_v = [id_to_word[k] for k in words_v[num_steps-1:]]
                words_c = [id_to_word[k] for k in words_c[num_steps-1:]]
                words_test = [id_to_word[k] for k in words_test[num_steps-1:]]

                # find the accuracy
                print('finding test accuracy')
                pos_acc = np.sum(posp_test==post_test)/float(len(posp_test))
                chunk_F1 = f1_score(chunkt_test, chunkp_test,average="weighted")

                print("POS Test Accuracy (Both): " + str(pos_acc))
                print("Chunk Test F1(Both): " + str(chunk_F1))

                print("POS Test Accuracy (Train): " + str(pos_acc_train))
                print("Chunk Test F1 (Train): " + str(chunk_F1_train))


                if test==False:
                    train_custom = np.hstack((np.array(words_t).reshape(-1,1), np.char.upper(post_t), np.char.upper(chunkt_t)))
                    valid_custom = np.hstack((np.array(words_v).reshape(-1,1), np.char.upper(post_v), np.char.upper(chunkt_v)))
                combined = np.hstack((np.array(words_c).reshape(-1,1), np.char.upper(post_c), np.char.upper(chunkt_c)))
                test_data = np.hstack((np.array(words_test).reshape(-1,1), np.char.upper(post_test), np.char.upper(chunkt_test)))

                print('loaded text')

                if test==False:
                    chunk_pred_train = np.concatenate((train_custom, np.char.upper(chunkp_t).reshape(-1,1)), axis=1)
                    chunk_pred_val = np.concatenate((valid_custom, np.char.upper(chunkp_v).reshape(-1,1)), axis=1)
                chunk_pred_c = np.concatenate((combined, np.char.upper(chunkp_c).reshape(-1,1)), axis=1)
                chunk_pred_test = np.concatenate((test_data, np.char.upper(chunkp_test).reshape(-1,1)), axis=1)
                if test==False:
                    pos_pred_train = np.concatenate((train_custom, np.char.upper(posp_t).reshape(-1,1)), axis=1)
                    pos_pred_val = np.concatenate((valid_custom, np.char.upper(posp_v).reshape(-1,1)), axis=1)
                pos_pred_c = np.concatenate((combined, np.char.upper(posp_c).reshape(-1,1)), axis=1)
                pos_pred_test = np.concatenate((test_data, np.char.upper(posp_test).reshape(-1,1)), axis=1)

                print('finished concatenating, about to start saving')

                if test == False:
                    np.savetxt(save_path + '/predictions/chunk_pred_train.txt',
                               chunk_pred_train, fmt='%s')
                    print('writing to ' + save_path + '/predictions/chunk_pred_train.txt')
                    np.savetxt(save_path + '/predictions/chunk_pred_val.txt',
                               chunk_pred_val, fmt='%s')
                    print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')

                np.savetxt(save_path + '/predictions/chunk_pred_combined.txt',
                           chunk_pred_c, fmt='%s')
                print('writing to ' + save_path + '/predictions/chunk_pred_combined.txt')
                np.savetxt(save_path + '/predictions/chunk_pred_test.txt',
                           chunk_pred_test, fmt='%s')
                print('writing to ' + save_path + '/predictions/chunk_pred_test.txt')

                if test == False:
                    np.savetxt(save_path + '/predictions/pos_pred_train.txt',
                               pos_pred_train, fmt='%s')
                    print('writing to ' + save_path + '/predictions/pos_pred_train.txt')
                    np.savetxt(save_path + '/predictions/pos_pred_val.txt',
                               pos_pred_val, fmt='%s')
                    print('writing to ' + save_path + '/predictions/pos_pred_val.txt')

                np.savetxt(save_path + '/predictions/pos_pred_combined.txt',
                           pos_pred_c, fmt='%s')
                np.savetxt(save_path + '/predictions/pos_pred_test.txt',
                           pos_pred_test, fmt='%s')

    else:
        print('Best Validation F1 ' + str(best_chunk_epoch[1]))
        print('Best Validation Epoch ' + str(best_chunk_epoch[0]))
Ejemplo n.º 11
0
        igns = ([-112.676039],[40.339372],['2013-08-10T20:00:00Z'])
        perims = './patch/perim'
        return bounds, time_iso, igns, perims
    def saddleridge():
        bounds = (-118.60684204101562, -118.35965728759766, 34.226539611816406, 34.43047332763672)
        time_iso = ('2019-10-10T00:00:00Z', '2019-10-15T00:00:00Z')
        igns = None
        perims = './saddleridge/perim'
        return bounds, time_iso, igns, perims
    def polecreek():
        bounds = (-111.93914, -111.311035, 39.75985, 40.239746)
        time_iso = ('2018-09-09T00:00:00Z', '2018-09-23T00:00:00Z')
        igns = None
        perims = './polecreek/perim'
        return bounds, time_iso, igns, perims

    # Creating the options
    options = {1: pioneer, 2: patch, 3: saddleridge, 4: polecreek}

    # Defining the option depending on the experiment
    bounds, time_iso, igns, perims = options[exp]()

    # Processing infrared perimeters
    p = process_infrared_perimeters(perims,bounds,time=time_iso,plot=plot,gen_polys=True)
    # Processing ignitions if defined
    if igns:
        p.update(process_ignitions(igns,bounds,time=time_iso))
    # Saving results
    sl.save(p,'perimeters')

Ejemplo n.º 12
0
            ur.append(place_tile(mxtile + 1, mytile + 1, current))
    if mytile < lvl_h - 1: # D
        ur.append(place_tile(mxtile, mytile + 1, current))
    return tile_rect(mxtile, mytile, -1, -1, 3, 3).unionall(ur)

########## draw loop ##########
while 1:
    updaterects = []
    fullstagerender = False
    
    for event in pygame.event.get():
        if event.type == pygame.QUIT: sys.exit()
        elif event.type == pygame.KEYDOWN:
            if event.key == pygame.K_z: z_down = True
            if event.key == pygame.K_SPACE: space_down = True
            if event.key == pygame.K_s: saveload.save(lvl)
        elif event.type == pygame.KEYUP:
            if event.key == pygame.K_z: z_down = False
            if event.key == pygame.K_SPACE: space_down = False
        elif event.type == pygame.MOUSEBUTTONDOWN:
            if event.button == 1:
                # tray selection
                if tray_rect.collidepoint(*event.pos):
                    tiletray.mouse_select(event.pos[0] - tray_rect.x,
                                          event.pos[1] - tray_rect.y)
                    current = tiletray.get_val()
                    updaterects.append(tray_rect)
                elif stage_rect.collidepoint(*event.pos):
                    pan_mL_down = True
        elif event.type == pygame.MOUSEBUTTONUP:
            if event.button == 1:
Ejemplo n.º 13
0
def save():
    if filename=="":
        saveas()
    else:
        saveload.save(wavlst, filename)
Ejemplo n.º 14
0
            data.update(
                process_infrared_perimeters(perim_path, bbox, time=time_iso))
        if forecast_exists and not cloud:
            print ''
            print '>> Getting forecast from %s <<' % forecast_path
            data.update(
                process_forecast_slides_wrfout(forecast_path,
                                               bbox,
                                               time=time_iso))

        if data:
            print ''
            print '>> Saving satellite data file (data) <<'
            sys.stdout.flush()
            time_num = map(time_iso2num, time_iso)
            sl.save((data, fxlon, fxlat, time_num), satellite_file)
            print 'data file saved correctly!'
        else:
            print ''
            print 'ERROR: No data obtained...'
            sys.exit(1)

    print ''
    if (not fire_exists) or (not gearth_exists and plot_observed):
        print '>> Generating KML of fire and ground detections <<'
        sys.stdout.flush()
        # sort the granules by dates
        sdata = sort_dates(data)
    if fire_exists:
        print '>> File %s already created! <<' % fire_file
    else:
Ejemplo n.º 15
0
def gameplay():
    sg.theme('Purple')

    fin = open('script/' + str(year) + 'grade.dat', 'r')
    n_of_situations = int(fin.readline())

    n_of_situations = n_of_situations - int(loaded_situation) + 1
    for _ in range(int(loaded_situation) - 1):
        fin.readline()  #situation
        fin.readline()  #option1
        fin.readline()  #consequences
        fin.readline()  # reply1
        fin.readline()  #option2
        fin.readline()  #consequences
        fin.readline()  # reply2
        fin.readline()  #option3
        fin.readline()  #consequences
        fin.readline()  # reply3
        fin.readline()  #option4
        fin.readline()  #consequences
        fin.readline()  # reply4
        fin.readline()  #void

    for _ in range(n_of_situations):
        situation = fin.readline()

        option1 = fin.readline()
        numeric_cons1 = list(map(lambda x: int(x), fin.readline().split()))
        reply1 = fin.readline()

        option2 = fin.readline()
        numeric_cons2 = list(map(lambda x: int(x), fin.readline().split()))
        reply2 = fin.readline()

        option3 = fin.readline()
        numeric_cons3 = list(map(lambda x: int(x), fin.readline().split()))
        reply3 = fin.readline()

        option4 = fin.readline()
        numeric_cons4 = list(map(lambda x: int(x), fin.readline().split()))
        reply4 = fin.readline()

        this_is_void = fin.readline()

        opt_1_3_col = [[sg.Button(option1, size=(35, 3.5))],
                       [sg.Button(option3, size=(35, 3.5))]]
        opt_2_4_col = [[sg.Button(option2, size=(35, 3.5))],
                       [sg.Button(option4, size=(35, 3.5))]]

        layout = [[
            sg.Text('Респект однокл: ' + str(class_respect) +
                    ' Отношения с учителями: ' + str(teachers) +
                    ' Успеваемость: ' + str(grades))
        ], [sg.Multiline(situation, size=(590, 15))],
                  [sg.Column(opt_1_3_col),
                   sg.Column(opt_2_4_col)], [sg.Button('Save', size=(100, 1))],
                  [sg.Button('Quit', size=(100, 1))]]

        gameplay_window = sg.Window('THE GAME', layout, size=(600, 440))

        while True:
            event, values = gameplay_window.read()
            if event == sg.WIN_CLOSED or event == 'Quit':
                gameplay_window.close()
                fin.close()
                return
            if event == 'Save':
                svld.save(class_respect, teachers, grades, year, situation)
            if event == option1:
                class_respect += numeric_cons1[0]
                teachers += numeric_cons1[1]
                grades += numeric_cons1[2]
                gameplay_window.close()
                replyWindow(reply1)
                break
            if event == option2:
                class_respect += numeric_cons2[0]
                teachers += numeric_cons2[1]
                grades += numeric_cons2[2]
                gameplay_window.close()
                replyWindow(reply2)
                break
            if event == option3:
                class_respect += numeric_cons3[0]
                teachers += numeric_cons3[1]
                grades += numeric_cons3[2]
                gameplay_window.close()
                replyWindow(reply3)
                break
            if event == option4:
                class_respect += numeric_cons4[0]
                teachers += numeric_cons4[1]
                grades += numeric_cons4[2]
                gameplay_window.close()
                replyWindow(reply4)
                break

    fin.close()
    info = finishWindow(class_respect, teachers, grades, year)
    if info != None:
        return info

    gameplay_window.close()
Ejemplo n.º 16
0
            np.arange(bounds[2],bounds[3],dlat))
maxsize = 500
coarsening=np.int(1+np.max(fxlon.shape)/maxsize)
fxlon = fxlon[0::coarsening,0::coarsening]
fxlat = fxlat[0::coarsening,0::coarsening]
bbox = (fxlon.min(),fxlon.max(),fxlat.min(),fxlat.max())
time_num = map(time_iso2num,time_iso)

print ''
print '>> Retrieving satellite data <<'
sys.stdout.flush()
data = retrieve_af_data(bbox,time_iso)

print ''
print '>> Saving data file <<'
sl.save((data,fxlon,fxlat,time_num),'data')
# sort the granules by dates
sdata=sort_dates(data)

print ''
print '>> Creating KMZ file <<'
# creating KMZ overlay of each information
# create the Basemap to plot into
bmap = Basemap(projection='merc',llcrnrlat=bbox[2], urcrnrlat=bbox[3], llcrnrlon=bbox[0], urcrnrlon=bbox[1])
# initialize array
kmld = []
# for each observed information
for idx, g in enumerate(sdata):
    # create png name
    pngfile = g[0]+'.png'
    # create timestamp for KML
Ejemplo n.º 17
0
def main(model_type, dataset_path, save_path):
    """Main"""
    config = Config()
    raw_data = reader.raw_x_y_data(
        dataset_path, config.num_steps)
    words_t, pos_t, chunk_t, words_v, \
    pos_v, chunk_v, word_to_id, pos_to_id, \
    chunk_to_id, words_test, pos_test, chunk_test, \
    words_c, pos_c, chunk_c = raw_data

    config.num_pos_tags = len(pos_to_id)
    config.num_chunk_tags = len(chunk_to_id)

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)

        # model to train hyperparameters on
        with tf.variable_scope("hyp_model", reuse=None, initializer=initializer):
            m = Shared_Model(is_training=True, config=config)
        with tf.variable_scope("hyp_model", reuse=True, initializer=initializer):
            mvalid = Shared_Model(is_training=False, config=config)

        # model that trains, given hyper-parameters
        with tf.variable_scope("final_model", reuse=None, initializer=initializer):
            mTrain = Shared_Model(is_training=True, config=config)
        with tf.variable_scope("final_model", reuse=True, initializer=initializer):
            mTest = Shared_Model(is_training=False, config=config)

        tf.initialize_all_variables().run()

        # Create an empty array to hold [epoch number, loss]
        best_epoch = [0, 100000]

        print('finding best epoch parameter')
        # ====================================
        # Create vectors for training results
        # ====================================

        # Create empty vectors for loss
        train_loss_stats = np.array([])
        train_pos_loss_stats = np.array([])
        train_chunk_loss_stats = np.array([])
        # Create empty vectors for accuracy
        train_pos_stats = np.array([])
        train_chunk_stats = np.array([])

        # ====================================
        # Create vectors for validation results
        # ====================================
        # Create empty vectors for loss
        valid_loss_stats = np.array([])
        valid_pos_loss_stats = np.array([])
        valid_chunk_loss_stats = np.array([])
        # Create empty vectors for accuracy
        valid_pos_stats = np.array([])
        valid_chunk_stats = np.array([])

        for i in range(config.max_epoch):
            print("Epoch: %d" % (i + 1))
            mean_loss, posp_t, chunkp_t, post_t, chunkt_t, pos_loss, chunk_loss = \
                run_epoch(session, m,
                          words_t, pos_t, chunk_t,
                          config.num_pos_tags, config.num_chunk_tags,
                          verbose=True, model_type=model_type)

            # Save stats for charts
            train_loss_stats = np.append(train_loss_stats, mean_loss)
            train_pos_loss_stats = np.append(train_pos_loss_stats, pos_loss)
            train_chunk_loss_stats = np.append(train_chunk_loss_stats, chunk_loss)

            # get predictions as list
            posp_t = reader.res_to_list(posp_t, config.batch_size, config.num_steps,
                                        pos_to_id, len(words_t))
            chunkp_t = reader.res_to_list(chunkp_t, config.batch_size,
                                          config.num_steps, chunk_to_id, len(words_t))
            post_t = reader.res_to_list(post_t, config.batch_size, config.num_steps,
                                        pos_to_id, len(words_t))
            chunkt_t = reader.res_to_list(chunkt_t, config.batch_size,
                                          config.num_steps, chunk_to_id, len(words_t))

            # find the accuracy
            pos_acc = np.sum(posp_t == post_t) / float(len(posp_t))
            chunk_acc = np.sum(chunkp_t == chunkt_t) / float(len(chunkp_t))

            # add to array
            train_pos_stats = np.append(train_pos_stats, pos_acc)
            train_chunk_stats = np.append(train_chunk_stats, chunk_acc)

            # print for tracking
            print("Pos Training Accuracy After Epoch %d :  %3f" % (i + 1, pos_acc))
            print("Chunk Training Accuracy After Epoch %d : %3f" % (i + 1, chunk_acc))

            valid_loss, posp_v, chunkp_v, post_v, chunkt_v, pos_v_loss, chunk_v_loss = \
                run_epoch(session, mvalid, words_v, pos_v, chunk_v,
                          config.num_pos_tags, config.num_chunk_tags,
                          verbose=True, valid=True, model_type=model_type)

            # Save loss for charts
            valid_loss_stats = np.append(valid_loss_stats, valid_loss)
            valid_pos_loss_stats = np.append(valid_pos_loss_stats, pos_v_loss)
            valid_chunk_loss_stats = np.append(valid_chunk_loss_stats, chunk_v_loss)

            # get predictions as list

            posp_v = reader.res_to_list(posp_v, config.batch_size, config.num_steps,
                                        pos_to_id, len(words_v))
            chunkp_v = reader.res_to_list(chunkp_v, config.batch_size,
                                          config.num_steps, chunk_to_id, len(words_v))
            chunkt_v = reader.res_to_list(chunkt_v, config.batch_size,
                                          config.num_steps, chunk_to_id, len(words_v))
            post_v = reader.res_to_list(post_v, config.batch_size, config.num_steps,
                                        pos_to_id, len(words_v))

            # find accuracy
            pos_acc = np.sum(posp_v == post_v) / float(len(posp_v))
            chunk_acc = np.sum(chunkp_v == chunkt_v) / float(len(chunkp_v))

            print("Pos Validation Accuracy After Epoch %d :  %3f" % (i + 1, pos_acc))
            print("Chunk Validation Accuracy After Epoch %d : %3f" % (i + 1, chunk_acc))

            # add to stats
            valid_pos_stats = np.append(valid_pos_stats, pos_acc)
            valid_chunk_stats = np.append(valid_chunk_stats, chunk_acc)

            # update best parameters
            if (valid_loss < best_epoch[1]):
                best_epoch = [i + 1, valid_loss]

        # Save loss & accuracy plots
        np.savetxt(save_path + '/loss/valid_loss_stats.txt', valid_loss_stats)
        np.savetxt(save_path + '/loss/valid_pos_loss_stats.txt', valid_pos_loss_stats)
        np.savetxt(save_path + '/loss/valid_chunk_loss_stats.txt', valid_chunk_loss_stats)
        np.savetxt(save_path + '/accuracy/valid_pos_stats.txt', valid_pos_stats)
        np.savetxt(save_path + '/accuracy/valid_chunk_stats.txt', valid_chunk_stats)

        np.savetxt(save_path + '/loss/train_loss_stats.txt', train_loss_stats)
        np.savetxt(save_path + '/loss/train_pos_loss_stats.txt', train_pos_loss_stats)
        np.savetxt(save_path + '/loss/train_chunk_loss_stats.txt', train_chunk_loss_stats)
        np.savetxt(save_path + '/accuracy/train_pos_stats.txt', train_pos_stats)
        np.savetxt(save_path + '/accuracy/train_chunk_stats.txt', train_chunk_stats)

        # Train given epoch parameter
        print('Train Given Best Epoch Parameter :' + str(best_epoch[0]))
        for i in range(best_epoch[0]):
            print("Epoch: %d" % (i + 1))
            _, posp_c, chunkp_c, _, _, _, _ = \
                run_epoch(session, mTrain,
                          words_c, pos_c, chunk_c,
                          config.num_pos_tags, config.num_chunk_tags,
                          verbose=True, model_type=model_type)

        print('Getting Testing Predictions')
        _, posp_test, chunkp_test, _, _, _, _ = \
            run_epoch(session, mTest,
                      words_test, pos_test, chunk_test,
                      config.num_pos_tags, config.num_chunk_tags,
                      verbose=True, valid=True, model_type=model_type)

        print('Writing Predictions')
        # prediction reshaping
        posp_c = reader.res_to_list(posp_c, config.batch_size, config.num_steps,
                                    pos_to_id, len(words_c))
        posp_test = reader.res_to_list(posp_test, config.batch_size, config.num_steps,
                                       pos_to_id, len(words_test))
        chunkp_c = reader.res_to_list(chunkp_c, config.batch_size,
                                      config.num_steps, chunk_to_id, len(words_c))
        chunkp_test = reader.res_to_list(chunkp_test, config.batch_size, config.num_steps,
                                         chunk_to_id, len(words_test))

        # save pickle - save_path + '/saved_variables.pkl'
        print('saving variables (pickling)')
        saveload.save(save_path + '/saved_variables.pkl', session)

        train_custom = reader.read_tokens(dataset_path + '/train.txt', 0)
        valid_custom = reader.read_tokens(dataset_path + '/validation.txt', 0)
        combined = reader.read_tokens(dataset_path + '/train_val_combined.txt', 0)
        test_data = reader.read_tokens(dataset_path + '/test.txt', 0)

        print('loaded text')

        chunk_pred_train = np.concatenate((np.transpose(train_custom), chunkp_t), axis=1)
        chunk_pred_val = np.concatenate((np.transpose(valid_custom), chunkp_v), axis=1)
        chunk_pred_c = np.concatenate((np.transpose(combined), chunkp_c), axis=1)
        chunk_pred_test = np.concatenate((np.transpose(test_data), chunkp_test), axis=1)
        pos_pred_train = np.concatenate((np.transpose(train_custom), posp_t), axis=1)
        pos_pred_val = np.concatenate((np.transpose(valid_custom), posp_v), axis=1)
        pos_pred_c = np.concatenate((np.transpose(combined), posp_c), axis=1)
        pos_pred_test = np.concatenate((np.transpose(test_data), posp_test), axis=1)

        print('finished concatenating, about to start saving')

        np.savetxt(save_path + '/predictions/chunk_pred_train.txt',
                   chunk_pred_train, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_train.txt')
        np.savetxt(save_path + '/predictions/chunk_pred_val.txt',
                   chunk_pred_val, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/chunk_pred_combined.txt',
                   chunk_pred_c, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/chunk_pred_test.txt',
                   chunk_pred_test, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/pos_pred_train.txt',
                   pos_pred_train, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/pos_pred_val.txt',
                   pos_pred_val, fmt='%s')
        print('writing to ' + save_path + '/predictions/chunk_pred_val.txt')
        np.savetxt(save_path + '/predictions/pos_pred_combined.txt',
                   pos_pred_c, fmt='%s')
        np.savetxt(save_path + '/predictions/pos_pred_test.txt',
                   pos_pred_test, fmt='%s')
Ejemplo n.º 18
0
 def save(self, event):
     saveload.save(self.manager)
Ejemplo n.º 19
0
import glob

# Select a data range to work with:
Start = '1900-08-01 00:00:00'
End = '2060-02-08 00:00:00'

# List of stations
MyList = wt.searchStation('OJP')

# Station for AE:
station_name = MyList[3]
MyTS = wt.searchTimeseries('', station_name)
indexes = np.arange(8, 9)
TS_Selection = MyTS[indexes]
df = wt.getTimeseries(TS_Selection, station_name, Start, End)
sl.save(df, 'OJP_AE')

# Station for precipitation:
station_name = MyList[5]
MyTS = wt.searchTimeseries('', station_name)
indexes = np.arange(0, 5)
TS_Selection = MyTS[indexes]
df = wt.getTimeseries(TS_Selection, station_name, Start, End)
sl.save(df, 'OJP_Precip')

# Station for snow depth:
station_name = MyList[6]
MyTS = wt.searchTimeseries('', station_name)
indexes = np.arange(0, 3)
TS_Selection = MyTS[indexes]
df = wt.getTimeseries(TS_Selection, station_name, Start, End)
Ejemplo n.º 20
0
def keyPressed(Engine,EngineModule,key,selection,objects):

	if key == EngineModule.Keys.K_J:
		ambient = Engine.getAmbientLight()
		factor = helpers.getModifiedVector(Engine,EngineModule,0.05)
		ambient = ambient + factor
		helpers.vecclamp(ambient)
		Engine.setAmbientLight(ambient)
		print("set ambient light to: " + str(ambient))

	if key == EngineModule.Keys.K_H:
		fov = Engine.getCameraFOV()
		if Engine.isKeyDown(EngineModule.Keys.K_1):
			fov += 2
		if Engine.isKeyDown(EngineModule.Keys.K_2):
			fov -= 2
		if fov < 0:
			fov = 0
		if fov > 180:
			fov = 180
		Engine.setCameraFOV(fov)
		print("set FieldOfView to: " + str(fov))

	if key == EngineModule.Keys.K_G:
		gravity = Engine.getGravity()
		factor = helpers.getModifiedVector(Engine,EngineModule,0.5)
		gravity = gravity + factor
		Engine.setGravity(gravity)
		print("set gravity to: " + str(gravity))

	if key == EngineModule.Keys.K_K:
		if Engine.isKeyDown(EngineModule.Keys.K_1):
			print("save scene.xml")
			saveload.save(Engine,EngineModule,"xmlscene/scene.xml",objects)
			print("done")
		if Engine.isKeyDown(EngineModule.Keys.K_2):
			print("save ragdoll.xml")
			saveload.save(Engine,EngineModule,"xmlscene/ragdoll.xml",objects)
			print("done")
		if Engine.isKeyDown(EngineModule.Keys.K_3):
			print("save test.xml")
			saveload.save(Engine,EngineModule,"xmlscene/test.xml",objects)
			print("done")

	if key == EngineModule.Keys.K_L:
		if Engine.isKeyDown(EngineModule.Keys.K_1):
			print("load scene.xml")
			saveload.load(Engine,EngineModule,"../xmlscene/scene.xml",objects)
			print("done")
		if Engine.isKeyDown(EngineModule.Keys.K_2):
			print("load ragdoll.xml")
			saveload.load(Engine,EngineModule,"xmlscene/ragdoll.xml",objects)
			print("done")
		if Engine.isKeyDown(EngineModule.Keys.K_3):
			print("load test.xml")
			saveload.load(Engine,EngineModule,"xmlscene/test.xml",objects)
			print("done")

	if key == EngineModule.Keys.K_Z:
		print("change material and visibilty")
		#if len(selection.get()) == 1:
		#	o = selection.get()[0]
		if (Engine.isKeyDown(EngineModule.Keys.K_1) or 
			Engine.isKeyDown(EngineModule.Keys.K_2) or
			Engine.isKeyDown(EngineModule.Keys.K_3) or
			Engine.isKeyDown(EngineModule.Keys.K_4)):
			for o in selection.get():
				if o and o.isGuiContainer():
					shapesNumber = o.howManyShapes()
					shapesList = []
					for i in range(0,shapesNumber):
						shape = o.getShapeByIndex(i)
						if Engine.isKeyDown(EngineModule.Keys.K_1):
							shape.setCustomMaterial()
						elif Engine.isKeyDown(EngineModule.Keys.K_2):
							shape.setMaterialName(Engine.getDefaultShadedMaterialName())
						elif Engine.isKeyDown(EngineModule.Keys.K_3):
							shape.setFinalShape()
						elif Engine.isKeyDown(EngineModule.Keys.K_4):
							shape.setNonFinalShape()
				elif o and o.isGuiShape():
					if Engine.isKeyDown(EngineModule.Keys.K_1):
						o.setCustomMaterial()
					elif Engine.isKeyDown(EngineModule.Keys.K_2):
						o.setMaterialName(Engine.getDefaultShadedMaterialName())
					elif Engine.isKeyDown(EngineModule.Keys.K_3):
						o.setFinalShape()
					elif Engine.isKeyDown(EngineModule.Keys.K_4):
						o.setNonFinalShape()

		if Engine.isKeyDown(EngineModule.Keys.K_5) or Engine.isKeyDown(EngineModule.Keys.K_6):
			if Engine.isKeyDown(EngineModule.Keys.K_5):
				objectsNumber = Engine.howManyObjects()
				for i in range(0,objectsNumber):
					o = Engine.getObject(i)
					if o.isGuiShape():
						if not o.isFinalShape():
							o.hide()
			elif Engine.isKeyDown(EngineModule.Keys.K_6):
				objectsNumber = Engine.howManyObjects()
				for i in range(0,objectsNumber):
					o = Engine.getObject(i)
					if o.isGuiShape():
						o.show()

	if key == EngineModule.Keys.K_N:
		if Engine.isKeyDown(EngineModule.Keys.K_1):
			if Engine.isKeyDown(EngineModule.Keys.K_EQUALS):
				Engine.setTimingFactor(Engine.getTimingFactor() * 0.5)
				print("set timingfactor: " +str(Engine.getTimingFactor()))
			else:
				Engine.setTimingFactor(Engine.getTimingFactor() * 0.9)
				print("set timingfactor: " +str(Engine.getTimingFactor()))
		if Engine.isKeyDown(EngineModule.Keys.K_2):
			if Engine.isKeyDown(EngineModule.Keys.K_EQUALS):
				Engine.setTimingFactor(Engine.getTimingFactor() * 2.0)
				print("set timingfactor: " +str(Engine.getTimingFactor()))
			else:
				Engine.setTimingFactor(Engine.getTimingFactor() * 1.1)
				print("set timingfactor: " +str(Engine.getTimingFactor()))

	if key == EngineModule.Keys.K_I:
		print("fps: " + str(float(1000.0 / Engine.getTimeDifference())))
		for o in selection.get():
			print("object: " + str(o))
			print("    name: " + str(o.getName()))
			print("    uuid: " + str(o.readUuid()))
			if o.isActor():
				print("    position: " + str(o.getPosition()))
				print("    size: " + str(o.getSize()))
				print("    orientation: " + str(o.getOrientation().toAngles()))
				print("    mass: " + str(o.getMass()))
			if o.isJoint():
				print("    yLimit: " + str(o.isJoint().getYLimit()))
				print("    zLimit: " + str(o.isJoint().getZLimit()))
				print("    anchor 1: " + str(o.isJoint().getAnchor1()))
				print("    anchor 2: " + str(o.isJoint().getAnchor2()))
				print("    anchor 1 orien: " + str(o.isJoint().getAnchor1Orientation().toAngles()))
				print("    anchor 2 orien: " + str(o.isJoint().getAnchor2Orientation().toAngles()))
				print("    motorOn: " + str(o.isJoint().isMotorOn()))
				print("    motor target: " + str(o.isJoint().getMotorTarget().toAngles()))

		body,joint = bodyjoint.getBodyJoint(selection.get())
		if ((body and joint) and bodyjoint.isBodyJointConnected(body,joint)):
			pass
			jointPos = bodyjoint.getBodyJointAnchorSizePos(body,joint)
			print("body joint size pos: " + str(jointPos))
Ejemplo n.º 21
0
def pipeline(frame, vocabname):
    alpha = 0.0001
    n_epochs = 200
    n_parts = 1
    batch_size = 128
    sentence_embeddings = []

    vocab = gs.models.KeyedVectors.load_word2vec_format('models/' + vocabname +
                                                        '.bin',
                                                        binary=True)
    vocabsize = 300

    frame = shuffle(frame)
    matrixes, tones, strings, freqs = to_matrix(frame, vocabsize, vocab)
    num_words = sum(freqs.values())
    for key in freqs.keys():
        freqs[key] = freqs[key] / num_words

    for i in tqdm(range(len(matrixes))):
        embedding = np.zeros((vocabsize, ))
        words = strings[i].split(' ')
        for j in range(len(words)):
            embedding += matrixes[i][j] * alpha / (alpha + freqs[words[j]])
        sentence_embeddings.append(embedding / len(words))

    del matrixes
    del strings

    sentence_embeddings = np.array(sentence_embeddings).T
    u, s, vh = np.linalg.svd(sentence_embeddings, full_matrices=False)
    fsv = np.array(u.T[0], ndmin=2)
    sm = np.matmul(fsv.T, fsv)
    sentence_embeddings = sentence_embeddings.T
    for i in range(sentence_embeddings.shape[0]):
        sentence_embeddings[i] -= np.matmul(sm, sentence_embeddings[i])

    tones = np.array(tones) * 0.5 + 0.5
    train_embeddings = sentence_embeddings[0:sentence_embeddings.shape[0] *
                                           4 // 5]
    train_tones = tones[0:sentence_embeddings.shape[0] * 4 // 5]

    test_embeddings = sentence_embeddings[sentence_embeddings.shape[0] * 4 //
                                          5:]
    test_tones = tones[sentence_embeddings.shape[0] * 4 // 5:]

    model_name = 'supermind'
    model = Sequential()
    model.add(Dense(2400, input_dim=300, activation='linear'))
    model.add(Dense(1, activation='sigmoid'))
    RMSpr = keras.optimizers.RMSprop(lr=0.0025)
    model.compile(loss='binary_crossentropy',
                  optimizer=RMSpr,
                  metrics=['accuracy'])

    times = []
    scores = []
    start_time = time.time()

    for i in range(n_epochs):
        model.fit(train_embeddings,
                  train_tones,
                  batch_size=batch_size,
                  epochs=1,
                  verbose=2,
                  validation_data=(test_embeddings, test_tones))
        score = model.evaluate(test_embeddings,
                               test_tones,
                               batch_size=batch_size,
                               verbose=1)
        scores.append(score)
        times.append(time.time() - start_time)
        print(score[1])

    path = os.path.abspath(os.getcwd()) + "/" + model_name + "_" + vocabname
    if not os.path.isdir(path):
        os.makedirs(path)
    model.save(path + "/model")
    save(scores, path, "/scores")
    save(times, path, "/times")
Ejemplo n.º 22
0
    return X, y, c


if __name__ == "__main__":
    import saveload as sl
    real = True
    cloud = True

    if real:
        plot = True
        if cloud:
            time_num = [1376114400.0, 1376546400.0]
            scale = [1375898400.0, 1377410400.0]
            dst = './patch/wrfout_patch'
            X, y, c = process_forecast_wrfout(dst, scale, time_num, plot=plot)
            sl.save(dict({'X': X, 'y': y, 'c': c}), 'forecast')
        else:
            bounds = (-113.85068, -111.89413, 39.677563, 41.156837)
            dst = './patch/wrfout_patch'
            f = process_forecast_slides_wrfout(dst, bounds, plot=plot)
            sl.save(f, 'forecast')
    else:
        from infrared_perimeters import process_ignitions
        from setup import process_detections
        dst = 'ideal_test'
        plot = False
        ideal = sl.load(dst)
        kk = 4
        data = process_tign_g_slices(ideal['lon'][::kk, ::kk],
                                     ideal['lat'][::kk, ::kk],
                                     ideal['tign_g'][::kk, ::kk],