Beispiel #1
0
def advection():
    #loadnamenpy = 'advection_marginal_7397.npy'
    loadnamenpy = 'advection_marginal_6328.npy'
    loadnamenpy = 'advection_marginal_8028.npy'
    loadnamenpy = 'advection_marginal_5765.npy'
    #loadnamenpy = 'advection_marginal_4527.npy'

    case = '_'.join(loadnamenpy.split('_')[:2])

    dataman = DataIO(case)
    fuk, fu, gridvars, ICparams = dataman.loadSolution(loadnamenpy)
    grid = PdfGrid(gridvars)

    V = Visualize(grid)
    V.plot_fuk3D(fuk)
    V.plot_fu3D(fu)
    V.plot_fu(fu, dim='t', steps=5)
    V.plot_fu(fu, dim='x', steps=5)
    V.show()

    # Learn
    difflearn = PDElearn(fuk,
                         grid,
                         fu=fu,
                         ICparams=ICparams,
                         scase=case,
                         trainratio=0.8,
                         debug=False,
                         verbose=True)
    difflearn.fit_sparse(feature_opt='2ndorder',
                         variableCoef=True,
                         variableCoefBasis='simple_polynomial',
                         variableCoefOrder=3,
                         use_sindy=True,
                         sindy_alpha=0.001)
    def analyze(self, adjust=False, plot=False, learn=False, adjustparams={}, learnparams={'feature_opt':'1storder', 'coeforder':1}):
        dataman = DataIO(self.case) 
        fu, gridvars, ICparams = dataman.loadSolution(self.loadnamenpy, array_opt='marginal')
        
        ##Make fu smaller (in time)
        if adjust:
            fu, gridvars = self.adjust(fu, gridvars, adjustparams)
        grid = PdfGrid(gridvars)

        if plot:
            V = Visualize(grid)
            V.plot_fu3D(fu)
            V.plot_fu(fu, dim='t', steps=5)
            V.plot_fu(fu, dim='x', steps=5)
            V.show()

        if learn:
            t0 = time.time()
            print('fu dimension: ', fu.shape)
            print('fu num elem.: ', np.prod(fu.shape))

            feature_opt = learnparams['feature_opt'] 
            coeforder = learnparams['coeforder'] 
            sindy_alpha = learnparams['sindy_alpha']
            RegCoef = learnparams['RegCoef']
            nzthresh = learnparams['nzthresh']
                
            # Learn     
            difflearn = PDElearn(grid=grid, fu=fu, ICparams=ICparams, scase=self.case, trainratio=0.8, debug=False, verbose=True)
            difflearn.fit_sparse(feature_opt=feature_opt, variableCoef=True, variableCoefBasis='simple_polynomial', \
                    variableCoefOrder=coeforder, use_sindy=True, sindy_alpha=sindy_alpha, RegCoef=RegCoef, nzthresh=nzthresh)
            
            print('learning took t = ', str(t0 - time.time()))
Beispiel #3
0
    def plot(self):
        dataman = DataIO(self.case)
        fu, gridvars, ICparams = dataman.loadSolution(self.loadnamenpy,
                                                      array_opt='marginal')
        grid = PdfGrid(gridvars)

        V = Visualize(grid)
        V.plot_fu3D(fu)
        V.plot_fu(fu, dim='t', steps=5)
        V.plot_fu(fu, dim='x', steps=5)
        V.show()
	criterion			= 'bic'


	if "savenamepdf" not in locals():
		# Check if there is already a loadfile (if not load it)
		savenamepdf = 'advection_reaction_analytical_388_128.npy'
		dataman = DataIO(case) 
		fu, gridvars, ICparams = dataman.loadSolution(savenamepdf, array_opt='marginal')

	
	grid = PdfGrid(gridvars)
	fu = grid.adjust(fu, adjustgrid)

	if plot:
		s = 10
		V = Visualize(grid)
		V.plot_fu3D(fu)
		V.plot_fu(fu, dim='t', steps=s)
		V.plot_fu(fu, dim='x', steps=s)
		V.show()


	difflearn = PDElearn(grid=grid, fu=fu, ICparams=ICparams, scase=case, trainratio=trainratio, verbose=True)
	
	output = difflearn.fit_sparse(feature_opt=feature_opt, variableCoef=variableCoef, variableCoefBasis=variableCoefBasis, \
	        variableCoefOrder=coeforder, use_rfe=use_rfe, rfe_alpha=rfe_alpha, nzthresh=nzthresh, maxiter=maxiter, \
            LassoType=LassoType, RegCoef=RegCoef, cv=cv, criterion=criterion, print_rfeiter=print_rfeiter, shuffle=shuffle, \
            basefile=savenamepdf, adjustgrid=adjustgrid, save=save, normalize=normalize, comments=comments)

	d = DataIO(case, directory=LEARNDIR)
	learndata, pdfdata, mcdata = d.readLearningResults(savenamepdf.split('.')[0]+'.txt', PDFdata=True, MCdata=True, display=False)
        else:
            # Train model. Comment out if unneeded
            logging.info("Beginning training")
            lda = Model(num_categories=NUM_CATEGORIES)
            ldamodel = lda.create_model(train_doc_matrix,
                                        train_term_dictionary,
                                        ROOT,
                                        language=lang)
            logging.info('Model created')

        # Displays topics with top words
        logging.info('TOP WORDS OF EACH CATEGORY FOR FINAL MODEL')
        for i in ldamodel.print_topics():
            for j in i:
                logging.info(j)

        if WRITE:
            # Cluster information to csv
            test_clusters = p.cluster_data(doc_matrix=test_doc_matrix,
                                           ldamodel=ldamodel,
                                           to_csv=True,
                                           keywords=test_keywords,
                                           filenames=test_filenames,
                                           num_categories=NUM_CATEGORIES)
        if VISUALIZE:
            # Visualize model
            visualize = Visualize(num_categories=NUM_CATEGORIES, language=lang)
            visualize.visualize(ldamodel=ldamodel,
                                doc_matrix=test_doc_matrix,
                                raw_documents=test_docs)
Beispiel #6
0
fig, ax = plt.subplots(1, 2, figsize=(9, 4))

# Coefficients Dependence Multi
featarray, relevant_feats = A.getCoefDependence(output_vec,
                                                threshold=0.01,
                                                invert_sign=True)
for i in range(len(relevant_feats)):
    ax[0].plot(variable, featarray[:, i], '.-', linewidth=2)
ax[0].set_xlabel(xlabel, fontsize=14)
ax[0].set_ylabel('Coefficients', fontsize=14)
ax[0].legend(latexify_varcoef(relevant_feats, cdf=True), fontsize=14)

ax[0].grid(color='k', linestyle='--', linewidth=1)

V = Visualize(grid0)
s = 10
snapidx = [int(i) for i in np.linspace(0, len(V.grid.tt) - 1, s)]
leg = []
xidx = np.where(V.grid.xx > 2.00)[0]
for tidx in snapidx:
    ax[1].plot(V.grid.uu, fu0[:, xidx, tidx], linewidth=2)
    leg.append('$t = %3.2f$ s' % (g.tt[tidx]))
ax[1].set_xlabel('$U$', fontsize=14)
ax[1].set_ylabel('$f_u(U; x^*, t)$', fontsize=14)
ax[1].legend(leg)
plt.show()

fig.savefig(FIGDIR + savename + '.pdf')

# # Plot boundary
Beispiel #7
0
        p["alpha"], p["gamma"] = .15, .30
        p["c_rr"], p["c_bb"] = .01, .05
        p["c_rb"], p["c_br"] = .20, .15
        create_artificial_data(p)

    elif run_type == 'v':
        """
        Run interactive visualisation of model
        Second argument is the timestep-interval
        """
        # set-up models
        models = initiate_models(params_model)

        # set-up visualisation
        data.assign_data(models)
        visualisation = Visualize(models)

        # run models
        for t in data.daterange():
            print("Timestep %i" % data.get_timestep(t))
            for model in models:
                if t in data.measurements:
                    model.collect_data_fit()
                model.step(visualize=True)
            if data.get_timestep(t) % vis_steps == 0:
                visualisation.update()
                res = input(
                    "Press enter to continue simulation, type 'q' to stop current run:\n\t"
                )
                if res == 'q':
                    break
Beispiel #8
0
def run_fusion(
        files, 
        has_camera=True, 
        has_wheel=True,
        data_direc='',
        write_results=True,
        is_move_video=True,
        is_interact=True,
        move_to_app=False,
        interactive_video='drivelog_temp.avi',
        ):
    """
    Callback function that
    runs fusion on the two data
    csv files
    """
    df = fusion.fuse_csv(files)
    if not 'timestamp_x' in df.columns.values.tolist():
        df['timestamp_x'] = df['timestamp']
    if write_results:
        df.to_csv('%s/fused.csv' % data_direc)
    if has_camera:
        ### 
        # All events that are dependent on the camera
        ### 
        head_ann = HeadAnnotator()
        head_events_hash, head_events_list =  head_ann.annotate_events(df)
        shc = SignalHeadClassifier(head_ann.df, head_ann.events)
        head_events_sentiment = shc.classify_signals()

        for i in xrange(len(head_events_list)):
            head_events_list[i] = head_events_list[i] + (head_events_sentiment[i])
            print head_events_list

    if has_wheel:
        ###
        # All events that are dependent on the steering wheel
        ###
        lane_events_hash, lane_events_list = LaneAnnotator(data_direc).annotate_events(df)

    if has_wheel and has_camera:
        slc = SignalLaneClassifier(df, lane_events_list, head_events_list, head_events_hash, head_events_sentiment)
        lane_events_sentiment = slc.classify_signals()

        for i in xrange(len(lane_events_list)):
            lane_events_list[i] = lane_events_list[i] + (lane_events_sentiment[i])

    #### Compute sentiment classifications

    # annotate the video
    print "Creating video report....."
    video_index = 'frameIndex'
    metadata_file = 'annotated_metadata.json'
    #interactive_video = "annotated_fused.avi"    

    # Created a fused video if possible
    if (is_move_video and has_camera and has_wheel):
        print head_events_list
        print lane_events_list
        final_fused_video = annotation.annotate_video(
                'drivelog_temp.avi',
                interactive_video,
                map(lambda (s, e, t, sent, reason): \
                        (df.loc[s, video_index], df.loc[e, video_index], t, sent, reason),
                        head_events_list),
                map(lambda (s, e, t, sent, reason): \
                        (df.loc[s, video_index], df.loc[e, video_index], t, sent, reason),
                        lane_events_list),
                metadata_file
                )

        move_video(final_fused_video, data_direc)
        move_video(metadata_file, data_direc)

    # Otherwise, create the two seperate ones
    else:
        if (is_move_video and has_camera):
            # I MAY HAVE BROKE THIS @chris
            print head_events_list
            
            final_head_video = annotation.annotate_video(
                    'drivelog_temp.avi', 
                    interactive_video, 
                    map(lambda (s, e, t, sent, reason): \
                            (df.loc[s, video_index], df.loc[e, video_index], t, sent, reason),
                            head_events_list),
                    [],
                    metadata_file
                    )

            move_video(final_head_video, data_direc)
            move_video(metadata_file, data_direc)

        elif (is_move_video and has_wheel and len(lane_events_list) > 0): 
            
            print lane_events_list
            final_lane_video = annotation.annotate_video(
                    'drivelog_temp.avi', 
                    interactive_video, 
                    [],
                    map(lambda (s, e, t, sent, reason): \
                            (df.loc[s, video_index], df.loc[e, video_index], t, sent, reason),
                            lane_events_list),
                    metadata_file
                    )

            move_video(final_lane_video, data_direc)
            move_video(metadata_file, data_direc)

        else:
            
            final_plain_video = annotation.annotate_video(
                'drivelog_temp.avi',
                interactive_video,
                [],
                [],
                metadata_file
                )

    # Also copy drivelog_temp
    if (is_move_video and has_camera):
        move_video('drivelog_temp.avi', data_direc)

    video_name = os.path.join(data_direc, interactive_video)
    if (move_to_app):

        # Convert video 
        convert_command = 'ffmpeg -i ' + video_name + ' ' + data_direc + '/annotated_fused.mp4'
        os.system(convert_command)
        time.sleep(1)

        # Replace most recent, and add to data dir
        shutil.rmtree('../app/static/data/recent', ignore_errors = True)
        time.sleep(1)
        shutil.copytree(data_direc, '../app/static/data/recent')
        time.sleep(1)
        dir_name = data_direc.split('/')[-1]
        shutil.copytree(data_direc, '../app/static/data/' + dir_name)

    if (has_camera and has_wheel and write_results):
        print "Plotting...."
        vis = Visualize(
                        df,
                        {
                            "head_turns": head_events_list, 
                            "lane_changes": lane_events_list,
                            "head_sentiment": head_events_sentiment,
                            "lane_sentiment": lane_events_sentiment
                        },
                        video_name=video_name,
                        data_direc=data_direc
            )
        vis.visualize(is_interact=is_interact)

    if (has_wheel and has_camera):
        return dict(
                head_events_hash=head_events_hash,
                head_events_list=head_events_list,
                lane_events_hash=lane_events_hash,
                lane_events_list=lane_events_list,
                head_events_sentiment=head_events_sentiment,
                lane_events_sentiment=lane_events_sentiment,
                df=df,
                )
    else:
        return None