Example #1
0
    def get(self):
        global gl
        # return gl
        file_id = request.args.get('id')
        ch1 = request.args.get('ch1')
        ch2 = request.args.get('ch2')
        transformation = request.args.get('transformation')
        bins = request.args.get('bins')
        gl = {
            'server_start_ts': dt.microsecond,
            'id': file_id,
            'ch1': ch1,
            'bins': bins,
            'transformation': transformation
        }
        if not file_id or not ch1 or not ch2:
            file_id = 'default'
            ch1 = 'HDR-T'
            ch2 = 'FSC-A'
            print('PLOTTING Default FCS and  chs', ch1, ch2)
        else:
            print('PLOTTING RECEIVED FCS and chs', file_id, ch1, ch2)

        plotting = Plotting(file_id, ch1, ch2, transformation, bins)
        plots = plotting.get_plots()
        gl.update(plots)
        return plots
Example #2
0
def play():

    (
        controller_parameters,
        visual,
        parameters,
        history,
    ) = process_game_parameters()
    controllers = Controllers(
        parameters,
        history,
        controller_parameters,
    )
    result = Result(parameters, history, controllers)
    plotting = Plotting(
        visual,
        parameters,
        history,
        controllers,
        result,
    )
    animation = Animation(
        visual,
        parameters,
        history,
        controllers,
        plotting,
        result,
    )
    animation.run()
Example #3
0
    def plotFigure(self):
        config = self.getConfig()
        plotsconfig = self.getPlotsConfig()

        print("plotsconfig: ", plotsconfig)

        annotations = self.getAnnotations()

        plotting = Plotting(config, plotsconfig, annotations)
Example #4
0
def main():
    settings = Settings()
    settings.Initalize_Global_Settings()

    preprocess = Preprocess(settings)
    preprocess.Load_Into_Dataframes()

    analysis = Analysis(preprocess)
    experiments = Experiments(analysis)

    data = analysis.Core(experiments)
    data_experimentals = experiments.Run_Experiments()

    models, best_fit, gals_df = analysis.Mocks_And_Models(experiments)

    plotting = Plotting(preprocess)
    plotting.Plot_Core(data, models, best_fit)
    plotting.Plot_Experiments(data, data_experimentals, models, best_fit)
Example #5
0
def evaluateVideos(seqs, tag):
    dh = DeviceHelper()
    print('Using ' + str(dh.device))
    cp = Checkpointer(params.checkpoint_path, dh.device)
    model, _ = Driver.CreateModelAndOpt(params, dh, cp)

    for seq_info in seqs:
        seq, _, _ = seq_info
        print('Processing ' + seq)
        s = SingleVideo(params.image_dir, params.pose_dir, seq_info)
        ds = DeviceDataset(s)
        output = os.path.join(params.pred_dir, tag + '_' + seq + '.npy')
        Driver.evalOnVideo(model, ds, output)

    seq_list = [x for x, _, _ in seqs]

    Plotting.plot(params.pose_dir, params.pred_dir, tag + '_',
                  params.result_dir, seq_list)
    def __init__(self, filename, number_of_resamples=10000):

        """
        Initializing the Bootstrapit class executes the resampling of the imported dataset.
        :param filename: The filename including filepath to the import data file.
        :param number_of_resamples: The number of resamples to perform.
        """
        self.number_of_resamples = number_of_resamples

        # import dataset from file
        self.__fh = FileHandling()
        self.original_data_dict = self.__fh.import_spreadsheet(filename)

        # resample dataset
        self.__bootstrapper = Bootstrapper(self.original_data_dict, number_of_resamples)

        #init bootstrap analysis tools
        self.__analysis = BootstrapAnalysis(self.__bootstrapper)

        #init plotter
        self.__plotter = Plotting(self.__fh.export_order)
Example #7
0
    def __init__(self, param, grid):

        self.list_param = [
            'modelname', 'tend', 'fixed_dt', 'dt', 'cfl', 'plot_var', 'cax',
            'colorscheme', 'plot_interactive', 'fixed_dt', 'dtmax',
            'freq_save', 'freq_plot'
        ]

        param.copy(self, self.list_param)

        self.list_grid = ['dx', 'nh', 'msk']
        grid.copy(self, self.list_grid)

        if param.modelname == 'euler':
            from euler import Euler
            self.model = Euler(param, grid)

        if param.modelname == 'advection':
            from advection import Advection
            self.model = Advection(param, grid)

        if param.modelname == 'boussinesq':
            from boussinesq import Boussinesq
            self.model = Boussinesq(param, grid)

        if param.modelname == 'quasigeostrophic':
            from quasigeostrophic import QG
            self.model = QG(param, grid)

        self.diag = Diag(param, grid)
        self.plotting = Plotting(param)

        # here's a shortcut to the model state
        self.state = self.model.var.state

        self.t = 0.
        self.kt = 0

        self.output = Output(param, grid, self.diag)
Example #8
0
def phat_data(inputs, object_mass=None):
    vsfhs, vsfh_kws = prepare_vsfh_run(inputs)
    nruns = len(vsfhs) * inputs.nsfhs
    if nruns > 10:
        os.system('ipcluster start -n=%i' % inputs.nprocs)
        time.wait(45)
        calibrate.main(vsfhs, vsfh_kws=vsfh_kws)
        os.system('ipcluster stop')

    opt_files, ir_files = load_data_files(inputs)
    for i, vsfh in enumerate(vsfhs):
        if nruns < 10:
            if inputs.dry_run is False:
                res_dict = vsfh.vary_the_SFH(object_mass=object_mass, dry_run=inputs.dry_run)
                vsfh.write_results(res_dict)
        pl = Plotting(vsfh)
        gal_kw = {'filetype': inputs.data_ftype, 'angst': False, 'hla': False}
        opt_gal = rsp.Galaxy(opt_files[i], filter1='F475W', filter2='F814W',
                             **gal_kw)
        ir_gal = rsp.Galaxy(ir_files[i], filter1='F110W', filter2='F160W',
                            **gal_kw)

        pl.compare_to_gal(opt_gal, ir_gal, 28, 28, narratio=False, ylim=(.1, 1e5))
Example #9
0
def main():

    parser = optparse.OptionParser()
    parser.add_option('-i', '--in',
                      dest="phot_class",
    )
    parser.add_option('-a', '--aperture',
                      dest="aperture",
    )
    parser.add_option('-o', '--output',
                      dest="out_plot",
    )

    options, remainder = parser.parse_args()

    print 'load ', options.phot_class

    photometry = pickle.load(open(options.phot_class))

    plot = Plotting(photometry, options.out_plot)
    print 'Plot noise fire'

    plot.plot_noise(float(options.aperture))
Example #10
0
    def set_up_all(self):
        """
        Run at the start of each test suite.

        PMD prerequisites.
        """
        self.frame_sizes = [64, 65, 128, 256, 512, 1024, 1280, 1518]

        self.rxfreet_values = [0, 8, 16, 32, 64, 128]

        self.test_cycles = [{
            'cores': '1S/1C/1T',
            'Mpps': {},
            'pct': {}
        }, {
            'cores': '1S/1C/2T',
            'Mpps': {},
            'pct': {}
        }, {
            'cores': '1S/2C/1T',
            'Mpps': {},
            'pct': {}
        }, {
            'cores': '1S/2C/2T',
            'Mpps': {},
            'pct': {}
        }, {
            'cores': '1S/4C/2T',
            'Mpps': {},
            'pct': {}
        }]

        self.table_header = ['Frame Size']
        for test_cycle in self.test_cycles:
            self.table_header.append("%s Mpps" % test_cycle['cores'])
            self.table_header.append("% linerate")

        self.blacklist = ""

        # Based on h/w type, choose how many ports to use
        self.dut_ports = self.dut.get_ports()

        self.headers_size = HEADER_SIZE['eth'] + HEADER_SIZE[
            'ip'] + HEADER_SIZE['udp']

        self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])

        self.plotting = Plotting(self.dut.crb['name'], self.target, self.nic)

        self.pmdout = PmdOutput(self.dut)
Example #11
0
def main():
    # First initiate preprocessor and read files which takes 1 - 2 minutes
    preprocessing = Preprocessing(
        config['weather_file_path'], config['fire_data_file_path'],
        ['Datetime'], [
            'dt_iso', 'temp', 'pressure', 'humidity', 'wind_speed', 'wind_deg',
            'rain_1h', 'rain_3h', 'snow_1h', 'snow_3h', 'clouds_all'
        ])
    # start preprocessing
    preprocessing.start_preprocessing()

    # pass joined weather and fire data to Data class
    data = Data(preprocessing.joined_data)

    # get train test split
    # train data contains 2014 to 2018 years
    # test data contains 2019 as prediction year
    train_x, train_y, test_x, test_y = data.get_train_test()
    xgb = Model((train_x, train_y, test_x, test_y))
    xgb.fit()
    predictions, target = xgb.make_predict()
    description = 'xgb model which contains previous_calls as feature. It exludes weather description feature as it was incearing feature space and was not of importance'
    Plot = Plotting(predictions, target, description, xgb)
    Plot.start_plotting()
Example #12
0
    def run(self, max_iter=100):

        candidates = self.generate_descendence(self.initialize_chromosomes(),
                                               self.num_nc)
        self.plotting(candidates)

        best_candidate = candidates[0]
        score = min(candidates[:, -1])
        prev_score = np.Inf

        for iteration in range(0, max_iter):
            if prev_score > score:
                candidates = self.generate_descendence(
                    self.initialize_chromosomes(), self.num_nc)
                self.plotting(candidates)
                prev_score = score
                score = min(candidates[:, -1])
                if prev_score > score:
                    best_candidate = candidates[0]
            else:
                print(
                    'Convergence at iteration {}.\nCandidate Chromosome: {}.\nScore: {}\nCenters at: {}'
                    .format(
                        iteration, best_candidate[0], best_candidate[2],
                        str(''.join(
                            ['\n\t' + str(x) for x in best_candidate[1]]))))
                self.plotting(best_candidate, single_cluster=True)
                break

        w, r = self.get_hyperparameters(best_candidate[0])
        it_best = gk(self.data_scaled, 3, w, r)
        points_closet_centroid = it_best.get_points_closet_centroid(
            best_candidate[1])

        pt.schema(points_closet_centroid, best_candidate[1],
                  points_closet_centroid[:, -1], best_candidate)
Example #13
0
    def set_up_all(self):
        """
        Run at the start of each test suite.

        L2fwd prerequisites.
        """
        self.frame_sizes = [64, 65, 128, 256, 512, 1024, 1280, 1518]

        self.test_queues = [{'queues': 1, 'Mpps': {}, 'pct': {}},
                            {'queues': 2, 'Mpps': {}, 'pct': {}},
                            {'queues': 4, 'Mpps': {}, 'pct': {}},
                            {'queues': 8, 'Mpps': {}, 'pct': {}}
                            ]

        self.core_config = "1S/4C/1T"
        self.number_of_ports = 2
        self.headers_size = HEADER_SIZE['eth'] + HEADER_SIZE['ip'] + \
            HEADER_SIZE['udp']

        self.dut_ports = self.dut.get_ports_performance()

        self.verify(len(self.dut_ports) >= self.number_of_ports,
                    "Not enough ports for " + self.nic)

        self.ports_socket = self.dut.get_numa_id(self.dut_ports[0])

        # compile
        out = self.dut.build_dpdk_apps("./examples/l2fwd")
        self.verify("Error" not in out, "Compilation error")
        self.verify("No such" not in out, "Compilation error")

        self.table_header = ['Frame']
        for queue in self.test_queues:
            self.table_header.append("%d queues Mpps" % queue['queues'])
            self.table_header.append("% linerate")

        dts.results_table_add_header(self.table_header)
        self.plotting = Plotting(self.dut.crb['name'], self.target, self.nic)
    def main():
        m = 5
        use_cache = os.path.isfile(
            train_data_bow_file_name) and os.path.isfile(
                test_data_bow_file_name) and os.path.isfile(
                    train_data_word2vec_file_name) and os.path.isfile(
                        test_data_word2vec_file_name)
        print("Preparing data with min_occurrences=" + str(m))

        training_data, word2vec_training_data, testing_data, word2vec_testing_data = preprare_data(
            m, use_cache, duration=None)

        log("********************************************************")
        log("Validating for {0} min_occurrences:".format(m))
        if use_cache:
            col_names = [
                "author", "title", "timestamp_ms", "summary", "sentiment",
                "sentiment_score"
            ]
            data = DataInitializer()
            data.initialize("data/clean_train_with_sentiments.csv",
                            col_names=col_names)
            print("printing head:\n*******************************\n")
            data.processed_data = data.processed_data.reset_index(drop=True)
            # data.processed_data.rename(columns={"author": "timestamp_ms", "timestamp_ms", "summary"})
            print(data.processed_data.head())
            original_data = data.processed_data
            data.data_model = pd.read_csv(train_data_bow_file_name)
            data.wordlist = pd.read_csv("data/wordlist.csv")
            data = Plotting(data)
            data.plot()
        """
        Naive Bayes
        """
        print("***************************************************\n"
              "FOR NAIVE BAYES:\n"
              "***************************************************\n")
        print("testing_data shape: ", testing_data.shape)
        print("testing_data head: ", testing_data.head())
        X_train, X_test, y_train, y_test = train_test_split(
            training_data.iloc[:, 1:],
            training_data.iloc[:, 0],
            train_size=0.7,
            stratify=training_data.iloc[:, 0],
            random_state=seed)

        if use_test_data:
            X_train = training_data.iloc[:, 1:]
            y_train = training_data.iloc[:, 0]

            X_test = testing_data.iloc[:, 1:]
            y_test = testing_data.iloc[:, 0]
        precision, recall, accuracy, f1 = Classification.test_classifier(
            X_train, y_train, X_test, y_test, BernoulliNB())

        # nb_acc = Classification.cv(BernoulliNB(), training_data.iloc[:, 1:], training_data.iloc[:, 0])
        """
        Random Forest
        """
        print("***************************************************\n"
              "FOR RANDOM FORESTS:\n"
              "***************************************************\n")
        X_train, X_test, y_train, y_test = train_test_split(
            training_data.iloc[:, 1:],
            training_data.iloc[:, 0],
            train_size=0.7,
            stratify=training_data.iloc[:, 0],
            random_state=seed)
        if use_test_data:
            X_train = training_data.iloc[:, 1:]
            y_train = training_data.iloc[:, 0]

            X_test = testing_data.iloc[:, 1:]
            y_test = testing_data.iloc[:, 0]

        precision, recall, accuracy, f1 = Classification.test_classifier(
            X_train, y_train, X_test, y_test,
            RandomForestClassifier(random_state=seed,
                                   n_estimators=403,
                                   n_jobs=-1))
        # rf_acc = Classification.cv(RandomForestClassifier(n_estimators=403, n_jobs=-1, random_state=seed), training_data.iloc[:, 1:],
        #                            training_data.iloc[:, 0])
        """
         Word2Vec + Random Forest
        """
        print("***************************************************\n"
              "FOR WORD2VEC WITH RANDOM FORESTS:\n"
              "***************************************************\n")

        X_train, X_test, y_train, y_test = train_test_split(
            word2vec_training_data.iloc[:, 2:],
            word2vec_training_data.iloc[:, 1],
            train_size=0.7,
            stratify=word2vec_training_data.iloc[:, 1],
            random_state=seed)
        # word2vec_training_data.drop(columns=['index'], inplace=True)
        # word2vec_testing_data.drop(columns=['index'], inplace=True)
        print("word2vec_training_data.columns: ",
              word2vec_training_data.columns)

        if use_test_data:
            X_train = word2vec_training_data.iloc[:, 3:]
            y_train = word2vec_training_data.iloc[:, 1]

            X_test = word2vec_testing_data.iloc[:, 3:]
            y_test = word2vec_testing_data.iloc[:, 1]

        precision, recall, accuracy, f1 = Classification.test_classifier(
            X_train, y_train, X_test, y_test,
            RandomForestClassifier(n_estimators=403,
                                   n_jobs=-1,
                                   random_state=seed))

        print("***************************\n")
        print("For Regression\n")
        print("***************************\n")

        print("first five rows: ", word2vec_training_data.head())
        X_train = word2vec_training_data.iloc[:, 4:]
        y_train = word2vec_training_data.iloc[:, 3]

        X_test = word2vec_testing_data.iloc[:, 4:]
        y_test = word2vec_testing_data.iloc[:, 3]

        regr = RandomForestRegressor(max_depth=2, random_state=0)
        regr.fit(X_train, y_train)
        # print(regr.feature_importances_)
        # print(regr.predict([[0, 0, 0, 0]]))
        predictions = regr.predict(X_test)
        print("predictions:\n*****************************", predictions,
              "\n****************************\n")
        print("Real values:\n*****************************", y_test,
              "\n****************************\n")
        print("score: ", regr.score(X_test, y_test))

        redditposts_sentiment = pd.DataFrame()
        # Create a column from the datetime variable
        redditposts_sentiment['datetime'] = word2vec_testing_data[
            "timestamp_ms"]
        redditposts_sentiment['sentiment_score'] = predictions
        # Convert that column into a datetime datatype
        redditposts_sentiment['datetime'] = pd.to_datetime(
            redditposts_sentiment['datetime'])
        # Set the datetime column as the index
        redditposts_sentiment.index = redditposts_sentiment['datetime']

        reddit_posts = [
            Scatter(x=redditposts_sentiment.resample('5Min').mean().index,
                    y=redditposts_sentiment.resample('5Min').mean()
                    ["sentiment_score"],
                    mode="lines")
        ]

        plotly.offline.plot(
            {
                "data": reddit_posts,
                "layout": graph_objs.Layout(title="Reddit posts sentiment")
            },
            filename='plots/redditposts_predicted_sentiment.html')

        print("***************************************************\n"
              "FOR KERAS:\n"
              "***************************************************\n")
        X_train, X_test, y_train, y_test = train_test_split(
            word2vec_training_data.iloc[:, 2:],
            word2vec_training_data.iloc[:, 1],
            train_size=0.7,
            stratify=word2vec_training_data.iloc[:, 1],
            random_state=seed)
        # word2vec_training_data.drop(columns=['index'], inplace=True)
        # word2vec_testing_data.drop(columns=['index'], inplace=True)
        print("word2vec_training_data.columns: ",
              word2vec_training_data.columns)
        if use_test_data:
            X_train = word2vec_training_data.iloc[:, 3:]
            y_train = word2vec_training_data.iloc[:, 1]

            X_test = word2vec_testing_data.iloc[:, 3:]
            y_test = word2vec_testing_data.iloc[:, 1]

        # params
        use_gpu = True

        config = tf.ConfigProto(
            intra_op_parallelism_threads=multiprocessing.cpu_count(),
            inter_op_parallelism_threads=multiprocessing.cpu_count(),
            allow_soft_placement=True,
            device_count={
                'CPU': 1,
                'GPU': 1 if use_gpu else 0
            })

        session = tf.Session(config=config)
        K.set_session(session)

        model_location = './data/model/'

        # Keras convolutional model
        batch_size = 32
        nb_epochs = 10
        vector_size = 200
        # Tweet max length (number of tokens)
        max_tweet_length = 15
        print("X_train shape:", X_train.shape)
        print("Y_train shape:", y_train.shape)
        print("x_test shape:", X_test.shape)
        print("y_test shape:", y_test.shape)
        model = Sequential()

        model = Sequential()
        model.add(Dense(32, activation='relu', input_dim=204))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(optimizer='rmsprop',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

        # Fit the model
        model.fit(X_train,
                  y_train,
                  batch_size=batch_size,
                  shuffle=True,
                  epochs=nb_epochs,
                  validation_data=(X_test, y_test),
                  callbacks=[EarlyStopping(min_delta=0.00025, patience=2)])

        score = model.evaluate(X_test, y_test, verbose=0)
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])

        # Save the model
        # serialize model to JSON
        model_json = model.to_json()
        with open("model.json", "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model.save_weights("model.h5")
        print("Saved model to disk")

        print("****************************\n")
        print("Building a Neural Network\n")
        print("****************************\n")

        with open('sequences', 'rb') as fp:
            sequences = pickle.load(fp)

        with open('sentiments', 'rb') as fp:
            sentiments = pickle.load(fp)

        EarlyStopping(monitor='val_loss',
                      min_delta=0,
                      patience=0,
                      verbose=0,
                      mode='auto')
        model = Sequential()
        model.add(Embedding(20000, 128, input_length=200))
        model.add(Dropout(0.2))
        model.add(Conv1D(64, 5, activation='relu'))
        model.add(MaxPooling1D(pool_size=4))
        model.add(LSTM(128))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.fit(sequences,
                  np.array(sentiments),
                  validation_split=0.5,
                  epochs=10)
Example #15
0
def pl_bokeh_js2():
    bkh = Plotting(database_name)
    rend2 = bkh.bokeh_plot(1, 5)
    return json.dumps(json_item(rend2))
Example #16
0
def pl_bokeh_js():
    bkh = Plotting(database_name)
    rend1 = bkh.bokeh_plot(1, 4)
    return json.dumps(json_item(rend1))
def sweep_maxGain(log,
                  f1,
                  f2,
                  nums,
                  rstart,
                  angle,
                  rstop,
                  tpolar,
                  cpolar,
                  spos=spos_default):
    # --------------------------------------------------------------------------
    # Reset motor positions
    #
    motorSet[STAND_ROTATION].goto_zero()
    if spos:  # Stand translation
        motorSet[S_TRANSLATION].rot_deg(STAND_OFFSET)
    set_polarization(log, motorSet, tpolar, cpolar, mycursor)
    #
    # End reset motor positions
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Move test antenna to start degree position
    #
    log.info("Start Position: " + str(rstart))
    motorSet[M1].rot_deg(rstart)
    log.info("Motor setup complete")
    #
    # End move test antenna to start position
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Load state
    #
    analyzer.load_state()
    #
    # End load state
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Set network analyzer parameters
    #
    channel = 1
    trace = 1
    analyzer.setup(channel, trace)
    # analyzer.enable_display(False)

    # Set start frequency
    start = float(analyzer.set_start(channel, f1))
    if f1 != start:
        msg = "WARNING: Invalid start frequency, using " + str(start)
        print(msg)
        log.warning(msg)
        # f1_old = f1
        f1 = start

    # Set stop frequency
    stop = float(analyzer.set_stop(channel, f2))
    if f2 != stop:
        msg = "WARNING: Invalid stop frequency, using " + str(stop)
        print(msg)
        log.warning(msg)
        # f2_old = f2
        f2 = stop

    # Set number of points
    points = int(analyzer.set_points(channel, nums))
    if nums != points:
        msg = "WARNING: Invalid number of steps, using " + str(points)
        print(msg)
        log.warning(msg)
        # nums_old = nums
        nums = points

    # Create csv files
    # d = datetime.today()
    # file_name = os.path.join(DATA_PATH, d.strftime("%Y%m%d%H%M%S"))
    # s21_filename = file_name + "_s21.csv"
    s21_filename = os.path.join(DATA_PATH, "S21.csv")
    s21File = open(s21_filename, "w")
    #
    # End set network analyzer parameters
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Check for network analyzer errors
    log.info("Checking network analyzer error queue")
    err_nums, err_msgs = analyzer.get_errors()
    if len(err_nums) > 0:
        msg = "Error in setting network analyzer parameters"
        print(msg)
        log.warning(msg)
    else:
        # No errors
        log.info("No network analyzer errors detected")
    #
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Complete frequency sweep
    #
    log.info("Measuring S21")
    print("Starting S21 Measurement")
    print("Start Frequency: " + str(f1 / 1e9) + " GHz")
    print("Stop Frequency: " + str(f2 / 1e9) + " GHz")
    print("Number of Points: " + str(nums))
    analyzer.set_measurement(channel, trace, 2, 1)
    analyzer.trigger()
    analyzer.update_display()
    analyzer.auto_scale(channel, trace)
    #
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Retrieve and store data
    #
    # If first position, get frequency data
    s21Freq = analyzer.get_x(channel)
    s21File.write(s21Freq)
    # Get s21 data and write to file
    s21Data = analyzer.get_corr_data(channel)
    # s21Data = analyzer.get_form_data(channel)
    s21File.write(s21Data)

    #
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Reset motor positions to zero index
    #
    motorSet[STAND_ROTATION].goto_zero()
    if spos:  # Stand translation
        motorSet[S_TRANSLATION].rot_deg(-STAND_OFFSET)
    #
    # End reset motor positions
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Close csv files
    #
    s21File.close()
    #
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Update database
    #
    if db.is_connected():
        fstart = f1 / 1e9
        fstop = f2 / 1e9
        rowcount = mycursor.rowcount

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # Antenna polarization
        #
        log.info("Updating tpolar and cpolar in sql database")
        update_config_db(log, mycursor, tpolar, "'antenna_polarization'")
        update_config_db(log, mycursor, cpolar, "'chamber_polarization'")

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # Network analyzer parameters
        #
        log.info("Updating fstart, fstop, and nums in sql database")
        update_config_db(log, mycursor, fstart, "'frequency_start'")
        update_config_db(log, mycursor, fstop, "'frequency_stop'")
        update_config_db(log, mycursor, nums, "'num_steps'")

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # Commit changes
        log.info("Committing changes")
        db.commit()
        if rowcount == mycursor.rowcount:
            log.warning("Failed to store updated antenna polarization data")

    #
    # End update database
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Call normalization function, plot data, and write zip
    #
    log.info("Normalized data written to file: " +
             S21Normalize(os.path.basename(s21_filename), maxGain=True))
    Plotting(f1, f2, nums, rstart, angle, rstop, 0, 0, 0, 0, 0, 0, "maxGain")
    #
    # End normalization
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Check for network analyzer errors
    log.info("Checking network analyzer error queue")
    err_nums, err_msgs = analyzer.get_errors()
    if len(err_nums) > 0:
        msg = "Error measuring S21"
        print(msg)
        log.warning(msg)
    else:
        # No errors
        msg = "S21 Measurement Successful"
        print(msg)
        log.info(msg)
Example #18
0
    def __init__(self, hist_path: str, varname="b", video_path=None, visible=True):
        """Open the history file and load parts of it."""
        # Initialize self.hist_file to prevent the destructor from failing
        self.hist_file = None

        self.tracers = False

        # Set parameters needed for Plotting that cannot be determined
        # so far; maybe make them command line arguments in the future
        param = {
            "figsize": (RESOLUTION[0] / DPI, RESOLUTION[1] / DPI),
            "aspect": "equal",
            "rotation_speed": 3,
        }

        if varname == "b":
            param["style"] = "b-interface"
            param["stable_stratification"] = True  # TODO make this a command line argument
        elif varname == "t0":
            # This option is to plot only the first tracer and also a
            # shorter notation in the common case with only one tracer
            param["style"] = "tracer"
            param["n_tracers"] = 1
        elif varname == "tracer":
            param["style"] = "tracer"
            self.tracers = True
        else:
            raise NotImplementedError("The given variable is not yet supported.")

        # Save necessary arguments
        self.video_path = video_path
        self.visible = visible

        # Create the metadata for the video
        if self.video_path:
            # Extract the name of the experiment
            exp_name = os.path.basename(
                hist_path[:-8] if hist_path.endswith("_hist.nc") else hist_path
            )
            self.metadata = {
                "title": "Nyles experiment {}".format(exp_name),
                "artist": CREATOR,
                "genre": "Computational Fluid Dynamics (CFD)",
                "comment": "Created on {} with Nyles.  Nyles is a Large Eddy "
                           "Simulation written in Python.  For more information"
                           " visit https://github.com/pvthinker/Nyles."
                           .format(time.strftime('%d %b %Y')),
                "date": time.strftime("%Y-%m-%d"),
            }

        # Open the history file and keep it open to allow sequential reading
        print("Loading history file {!r}:".format(hist_path))
        self.hist_file = nc.Dataset(hist_path)
        print(self.hist_file)

        # Load the needed data
        if self.tracers:
            param["n_tracers"] = self.hist_file.n_tracers
            self.tracers_data = [
                self.hist_file["t{}".format(i)] for i in range(self.hist_file.n_tracers)
            ]
        else:
            self.vardata = self.hist_file[varname]
        self.t = self.hist_file["t"]
        self.n = self.hist_file["n"]
        self.n_frames = self.n.size

        # Load parameters needed for Grid
        param["Lx"] = self.hist_file.Lx
        param["Ly"] = self.hist_file.Ly
        param["Lz"] = self.hist_file.Lz
        param["nx"] = self.hist_file.global_nx
        param["ny"] = self.hist_file.global_ny
        param["nz"] = self.hist_file.global_nz

        # Set parameters needed for Scalar
        param["nh"] = 0
        param["neighbours"] = {}

        grid = Grid(param)

        # Create one or several Scalar variables as an interface for
        # passing data to the plotting module.  Note: Scalar takes
        # actually a dimension instead of a unit, but that does not
        # matter because this information is not processed here.
        if self.tracers:
            tracer_list = []
            self.arrays = []
            for data in self.tracers_data:
                tracer = Scalar(param, data.long_name, data.name, data.units)
                tracer_list.append(tracer)
                self.arrays.append(tracer.view("i"))
            state = State(tracer_list)
        else:
            scalar = Scalar(param, self.vardata.long_name, varname, self.vardata.units)
            self.array = scalar.view("i")
            state = State([scalar])

        self.p = Plotting(param, state, grid)
Example #19
0
class Bootstrapit:
    """
    The Bootstrapit class builds the API for the Bootstrapit application. The Methods inside this class can directly
    be used to interact with the application.
    """
    def __init__(self, filename, number_of_resamples=10000):

        """
        Initializing the Bootstrapit class executes the resampling of the imported dataset.
        :param filename: The filename including filepath to the import data file.
        :param number_of_resamples: The number of resamples to perform.
        """
        self.number_of_resamples = number_of_resamples

        # import dataset from file
        self.__fh = FileHandling()
        self.original_data_dict = self.__fh.import_spreadsheet(filename)

        # resample dataset
        self.__bootstrapper = Bootstrapper(self.original_data_dict, number_of_resamples)

        #init bootstrap analysis tools
        self.__analysis = BootstrapAnalysis(self.__bootstrapper)

        #init plotter
        self.__plotter = Plotting(self.__fh.export_order)

    # TODO: Export bootstrapped data array to json or excel
    def export(self, *export_datasets_dicts, filename="bootstrapit_results.xlsx"):

        """
        The export method does merge all data analysis result dictionaries and exports them using the FileHandler class.
        :param export_datasets_dicts: All result dictionaries from the bootstrapping analysis.
        :param filename: the export filename.
        """
        merged_dict = self.__merge_dicts(*export_datasets_dicts)

        filetype_check = filename.split('.')
        filetype = filetype_check[-1]
        filename = filetype_check[0]

        if filetype == "xlsx":
            self.__fh.file_type = FileType.XLSX
        elif filetype == "xls":
            self.__fh.file_type = FileType.XLS
        elif filetype == "csv":
            self.__fh.file_type = FileType.CSV
        else:
            print("Error: Unsupported file type.")

        self.__fh.file_name = filename
        self.__fh.export(merged_dict)

    def __merge_dicts(self, *dict_args):

        """
        Helper method to merge multiple result dictionaries.
        :param dict_args: List of dictionaries
        :return: The merged dictionary containing all results of the analysis.
        """
        result = {}
        for dictionary in dict_args:
            result.update(dictionary)
        return result

    def mean(self):
        return self.__analysis.get_bootstrapped_mean()

    def median(self):
        return self.__analysis.get_bootstrapped_median()

    def SEM(self):
        return self.__analysis.get_SEM()

    def barchart(self, figure, data_dict, errorbar = {}):
        return self.__plotter.plot_barchart(figure, data_dict, errorbar)


    def set_axis_label(self, axes, title, xlabel, ylabel):
        self.__plotter.set_axis_labels(axes, title, xlabel, ylabel)
Example #20
0
def test():
    bytes_in_terabyte = 1099511627776
    scrub_fraction = 1
    sector_size = 512
    num_sectors_per_disk = (585937500) * 1
    num_tb_per_disk = mpf(sector_size * num_sectors_per_disk) / bytes_in_terabyte
    sector_error_rate = 4.096e-11
    sector_writes_per_disk_per_hr = mpf(143750) / 24
    writes_to_sector_per_hour = 0.0045
    low_scrub = 168
    high_scrub = 169
    scrub_incr = 12
    
    print writes_to_sector_per_hour
    
    x = []
    y = []
    dp = DataPoints()
    for i in range(low_scrub, high_scrub, scrub_incr):
        ssfm = RandomScrubSectorFailModel(num_sectors_per_disk, scrub_fraction*num_sectors_per_disk, i, sector_error_rate, writes_to_sector_per_hour, 1)
        #x.append(ssfm.disk_scrub_period)
        x.append(i)
        y.append(ssfm.prob_of_bad_sector(i))
        
    dp.addDataSet(x,y,"Random Scrub Model")
    
    x = []
    y = []
    for i in range(low_scrub, high_scrub, scrub_incr):
        
        dsfm = DeterministicScrubSectorFailModel(num_sectors_per_disk, scrub_fraction*num_sectors_per_disk, i, sector_error_rate, writes_to_sector_per_hour, 1)
        #x.append(ssfm.disk_scrub_period)
        x.append(i)
        y.append(dsfm.prob_of_bad_sector(i))
        print "Scrub", i, dsfm.prob_of_bad_sector(i)
        
    dp.addDataSet(x,y,"Deterministic Scrub Model")
    
    x = []
    y = []
    
    for i in range(low_scrub, high_scrub, scrub_incr):
        berfm = BERSectorFailModel(num_sectors_per_disk, scrub_fraction*num_sectors_per_disk, i, sector_error_rate, writes_to_sector_per_hour, 1)
        #x.append(ssfm.disk_scrub_period)
        x.append(i)
        y.append(berfm.prob_of_bad_sector())
        print "BER", i, berfm.prob_of_bad_sector()
    dp.addDataSet(x,y,"BER Model")
    
    x = []
    y = []
    
    for i in range(low_scrub, high_scrub, scrub_incr):
        nsfm = NoScrubSectorFailModel(num_sectors_per_disk, scrub_fraction*num_sectors_per_disk, i, sector_error_rate, writes_to_sector_per_hour, 1)
        #x.append(ssfm.disk_scrub_period)
        x.append(i)
        y.append(nsfm.prob_of_bad_sector())
    
    dp.addDataSet(x,y,"No Scrub Model")
    
    
    pl = Plotting(dp, "graphs/sector_fail_write_%.1f.%.3f.pdf" % (num_tb_per_disk, scrub_fraction), "Comparison of Sector Failure Models \n for %.1f TB Disk (Region %d Perc.)" % (num_tb_per_disk, 100*scrub_fraction), "Scrub Interval (hours)", "Probability of Sector Failure", type=Plotting.YLOG, legend_loc=2)
    pl.plot()
    for i in range(len(channels_array)):
        id_chan = str(channels_array[i])
        print("Processing channel: "+id_chan)
        series = Series(np.array(df[id_chan]))
        egxog = None
        if(is_egxog):
            egxog = df.drop([id_chan], axis=1, inplace=True, errors='ignore')
        model = SarimaModel([(0, 0, 0), (0, 0, 0, 10080), "n"])
        rmse = model.evaluate_model(series, egxog, group_size)
        print(rmse)
        db.save_result(int(Method_type.Sarima_10080), int(
            Granulation.minute), id_chan, rmse)


db = Database()
plotting = Plotting()
channels = db.get_channels()
channels_array = np.array(channels["id_chan"])
df_grp_hours = db.get_grp_aggregated_hourly('1,2,3,4,5,6,7')
df_grp_minute = db.get_grp_none_aggregated('1,2,3,4,5,6,7')
group_size = 10080

create_series_plots(df_grp_hours, channels_array, True)
create_acf_and_pacf_plots(df_grp_hours, channels_array, True)

# for i in range(len(channels_array)):
#     id_chan = str(channels_array[i])
#     series = Series(array(df_grp_hours[id_chan]))
#     test_stationarity(series, id_chan)
run_sarima(df_grp_minute, channels_array, is_egxog=True)
Example #22
0
def main(params):
    """This is the main loop which is repeated every timestep. Currently, this follows the Update Strain Last paradigm (does it?!?).

    :param mode: The name of the input file to use.
    :type mode: str
    :returns: int -- The return code.

    """

    P, G, L = initialise.get_parameters(params)
    plot = Plotting()
    #     if P.O.plot_material_points: plot.draw_material_points(L,P,G,'initial')

    while P.t <= P.t_f:  # Time march
        G.wipe(P)  # Discard previous grid
        P.update_forces()  # update time dependent gravity
        L.update_forces(P, G)  # pass body forces to material points
        L.get_reference_node(P, G)  # Find node down and left
        L.get_basis_functions(P, G)  # Make basis functions
        if P.O.check_positions:
            L.recover_position(P, G)  # check basis functions
        L.get_nodal_mass_momentum(P, G)  # Initialise from grid state
        if P.B.cyclic_lr:
            #             G.apply_cyclic_BCs(P)
            G.make_cyclic(P, G, ['m', 'q'])
        L.update_stress_strain(P, G)  # Update stress and strain
        L.get_nodal_forces(P, G)  # Compute internal and external forces
        G.BCs(P)  # Add external forces from BCs
        G.update_momentum(P)  # Compute rate of momentum and update nodes
        G.calculate_gammadot(P, G)
        if P.segregate_grid:
            G.calculate_grad_gammadot(P, G)
            G.calculate_phi_increment(P)
            L.move_grainsize_on_grid(P, G)

        L.move_material_points(
            P, G)  # Update material points (position and velocity)
        # Move/Remove any particles that have left the grid
        if P.B.outlet_left: L.outlet_left(P, G)
        if P.B.outlet_bottom: L.outlet_bottom(P, G)
        if P.B.inlet_right: L.inlet_right(P, G)
        if P.B.cyclic_lr: L.cyclic_lr(P, G)

        # Output related things
        if P.O.measure_energy:
            P.O.energy[P.tstep] = L.measure_elastic_energy(P,
                                                           G)  # measure energy
        print('{0:.4f}'.format(P.t * 100. / P.t_f) + '% Complete, t = ' +
              '{0:.4f}'.format(P.t) + ', g = ' + str(P.g),
              end='\r')

        if P.t % P.savetime < P.dt:
            if P.O.plot_gsd_mp: plot.draw_gsd_mp(L, P, G)
            if P.O.plot_gsd_grid: plot.draw_gsd_grid(L, P, G)
            #             plot.draw_voronoi(P,G)
            if P.O.plot_continuum: plot.draw_continuum(G, P)
            if P.O.plot_material_points: plot.draw_material_points(L, P, G)
            if P.mode == 'anisotropy': plot.draw_gamma_dot(L, P, G)
            if P.O.measure_energy: P.O.measure_E(P, L)
            if P.O.save_u: plot.save_u(L, P, G)
            if P.O.save_s_bar: plot.save_s_bar(L, P, G)
        if P.mode == 'dp_unit_test' or P.mode == 'dp_rate_unit_test':
            P.O.store_p_q(P, G, L, P.tstep)
        if P.mode == 'pouliquen_unit_test': P.O.store_mu(P, G, L, P.tstep)

        # Increment time
        P.t += P.dt
        P.tstep += 1


#         for p in range(P.phases):
#             if (P.S[p].law is 'von_mises' or P.S[p].law is 'dp') and not P.has_yielded:
#             if P.S[p].law is 'von_mises' and not P.has_yielded:
#                 L.update_timestep(P) # Check for yielding and reduce timestep

# Final things to do
    if P.O.plot_material_points: plot.draw_material_points(L, P, G, 'final')
    if P.O.measure_stiffness: P.O.measure_E(P, L)
    if P.O.measure_energy: plot.draw_energy(P)
    if P.O.plot_gsd_mp: plot.draw_gsd_mp(L, P, G)
    if P.O.plot_gsd_grid: plot.draw_gsd_grid(L, P, G)
    if P.O.save_u: plot.save_u(L, P, G)
    if P.O.save_s_bar: plot.save_s_bar(L, P, G)
    if P.mode == 'dp_unit_test' or P.mode == 'dp_rate_unit_test':
        P.O.draw_p_q(P, G, L, plot, P.tstep)
    if P.mode == 'pouliquen_unit_test': P.O.draw_mu(P, G, L, plot, P.tstep)
    print('')
    return 0
Example #23
0
    flux = P.c * f_c * g - P.D * dCdx
    flux[boundary] = 0
    return flux  #, boundary


def increment_grainsize(P, G):
    return KT(P, G, 0) + KT(P, G, 1)


#     return NT(P,G,0) + NT(P,G,1)

if __name__ == "__main__":
    import initialise
    from numpy import random, maximum, ones
    from plotting import Plotting
    plot = Plotting()
    P, G, L = initialise.get_parameters(['bi_seg_test', '22', '2'])
    G.wipe(P)
    L.get_reference_node(P, G)  # Find node down and left
    L.get_basis_functions(P, G)  # Make basis functions
    L.get_nodal_mass_momentum(P, G)  # Initialise from grid state
    plot.draw_gsd_grid(L, P, G)
    G.grad_gammadot = -1. * ones([P.G.ny * P.G.nx, 3])
    # G.grad_gammadot[:,1] = 0
    #     P.dt *= 10
    #     G.phi = zeros_like(G.phi)
    #     G.phi[40:120,0] = 0.5
    #     G.phi[:,1] = 1- G.phi[:,0]
    #     G.phi[40:100,1] = 0

    # P.c *= 100
Example #24
0
def main(params):
    """This is the main loop which is repeated every timestep. Currently, this follows the Update Strain Last paradigm (does it?!?).

    :param mode: The name of the input file to use.
    :type mode: str
    :returns: int -- The return code.

    """

    P,G,L = initialise.get_parameters(params)
    plot = Plotting()
#     if P.O.plot_material_points: plot.draw_material_points(L,P,G,'initial')

    while P.t <= P.t_f:# Time march
        G.wipe(P) # Discard previous grid
        P.update_forces() # update time dependent gravity
        L.update_forces(P,G) # pass body forces to material points
        L.get_reference_node(P,G) # Find node down and left
        L.get_basis_functions(P,G) # Make basis functions
        if P.O.check_positions: L.recover_position(P,G) # check basis functions
        L.get_nodal_mass_momentum(P,G) # Initialise from grid state
        if P.B.cyclic_lr: G.make_cyclic(P,G,['m','q'])
        L.update_stress_strain(P,G) # Update stress and strain
        L.get_nodal_forces(P,G) # Compute internal and external forces
        G.BCs(P) # Add external forces from BCs
        G.update_momentum(P) # Compute rate of momentum and update nodes
        G.calculate_gammadot(P,G)
        if P.segregate_grid:
            G.update_pk(P,G) # NOTE: THIS IS BRAND NEW AND PROBABLY BROKEN
            #if P.B.cyclic_lr: G.make_cyclic(P,G,['phi','pk','s_bar'])
            # if P.B.cyclic_lr: G.make_cyclic(P,G,['pk'])
            # G.calculate_grad_gammadot(P,G)
            G.calculate_phi_increment(P)
            L.move_grainsize_on_grid(P,G)
            G.make_cyclic(P,G,['eta','dphi'])
        L.move_material_points(P,G) # Update material points (position and velocity)
        # Move/Remove any particles that have left the grid
        if P.B.outlet_left: L.outlet_left(P,G)
        if P.B.outlet_bottom: L.outlet_bottom(P,G)
        if P.B.inlet_right: L.inlet_right(P,G)
        if P.B.inlet_top: L.inlet_top(P,G)
        if P.B.cyclic_lr: L.cyclic_lr(P,G)

        # Output related things
        if P.O.measure_energy: P.O.energy[P.tstep] = L.measure_elastic_energy(P,G) # measure energy

        print('{0:.4f}'.format(P.t*100./P.t_f) + '% Complete, t = ' +
              '{0:.4f}'.format(P.t) + ', g = ' + str(P.g), end='\r')

        if P.t%P.savetime < P.dt:
            if P.O.plot_gsd_mp: plot.draw_gsd_mp(L,P,G)
            if P.O.plot_gsd_grid: plot.draw_gsd_grid(L,P,G)
#             plot.draw_voronoi(P,G)
            if P.O.plot_continuum: plot.draw_continuum(G,P)
            if P.O.plot_material_points: plot.draw_material_points(L,P,G)
            if P.mode == 'anisotropy': plot.draw_gamma_dot(L,P,G)
            if P.O.measure_energy: P.O.measure_E(L,P,G)
            # for [field,fieldname] in P.O.save_fields: P.O.save_field(L,P,G,field,fieldname)
            if P.O.save_u: plot.save_u(L,P,G)
            if P.O.save_s_bar: plot.save_s_bar(L,P,G)
            if P.O.save_density: plot.save_density(L,P,G)
        if P.mode == 'dp_unit_test' or P.mode == 'dp_rate_unit_test': P.O.store_p_q(P,G,L,P.tstep)
        if P.mode == 'pouliquen_unit_test': P.O.store_mu(P,G,L,P.tstep)

        # Increment time
        P.t += P.dt
        P.tstep += 1

        if P.time_stepping == 'dynamic': P.update_timestep(P,G)

        # for p in range(P.phases):
#             if (P.S[p].law is 'von_mises' or P.S[p].law is 'dp') and not P.has_yielded:
#             if P.S[p].law is 'von_mises' and not P.has_yielded:
#                 L.update_timestep(P) # Check for yielding and reduce timestep

    # Final things to do
    if P.O.plot_material_points: plot.draw_material_points(L,P,G,'final')
    if P.O.measure_stiffness: P.O.measure_E(L,P,G)
    if P.O.measure_energy: plot.draw_energy(P)
    if P.O.plot_gsd_mp: plot.draw_gsd_mp(L,P,G)
    if P.O.plot_gsd_grid: plot.draw_gsd_grid(L,P,G)
    if P.O.save_u: plot.save_u(L,P,G)
    if P.O.save_s_bar: plot.save_s_bar(L,P,G)
    if P.O.save_density: plot.save_density(L,P,G)
    if P.mode == 'dp_unit_test' or P.mode == 'dp_rate_unit_test': P.O.draw_p_q(P,G,L,plot,P.tstep)
    if P.mode == 'pouliquen_unit_test': P.O.draw_mu(P,G,L,plot,P.tstep)
    print('')
    return 0
Example #25
0
sys.stdout.flush()

# Logger
logfile_path = '/home/pi/mu_code/monitoraggio_MP/new_files/monitoring_data/monitoring.log'
logging.basicConfig(filename=logfile_path, filemode='w', level=logging.INFO)

# Data Sampling Time
# 10 min -> 600000 ms
data_sampling_time = 1000  # ms

# Graphical Sampling Time
plot_sampling_time = 2.5 * data_sampling_time  # ms

# filename
save = True
#filename = '/home/pi/mu_code/monitoring_file.csv'

if __name__ == '__main__':

    pm_sens = PMsensor(connect=True, logfile_path=logfile_path)
    data_manage = DataManagement(save=save,
                                 sensor=pm_sens,
                                 sampling_interval=data_sampling_time,
                                 logfile_path=logfile_path)
    plot = Plotting(sensor=pm_sens, update_time=plot_sampling_time)

    data_manage.start_measurement()
    plot.start_animation()

# Buon divetimento Pablito!
# Seba
def preprocess(data_path,
               is_testing,
               min_occurrences=5,
               cache_bow_output=None,
               cache_word2vec_output=None,
               duration=None):
    if duration:
        data = DataInitializer()
        data.initialize(data_path, is_testing, duration=duration)
    else:
        data = DataInitializer()
        data.initialize(data_path, is_testing)

    if os.path.isfile("data/BTC.csv"):
        prices_data = GetPricesData()
        prices_data.main()

    data = DataCleaning(data, is_testing)
    data.cleanup(DataCleaner(is_testing))

    if is_testing:
        print("Testing data shape:", data.processed_data.shape)
    else:
        print("Training data shape:", data.processed_data.shape)

    data = Sentiments(data)
    data.sentiment_analysis_by_text()
    print("First five rows with sentiment: ", data.processed_data.head())
    if is_testing:
        data.processed_data.to_csv("data/clean_test_with_sentiments.csv",
                                   sep=',',
                                   encoding='utf-8',
                                   index=False)
        # os.remove(data_path)
    else:
        data.processed_data.to_csv("data/clean_train_with_sentiments.csv",
                                   sep=',',
                                   encoding='utf-8',
                                   index=False)
        # os.remove(data_path)

    data = DataTokenize(data)
    data.tokenize()
    data.stem()

    data = WordList(data)
    data.build_wordlist(min_occurrences=min_occurrences)

    word2vec_data = data
    data = BagOfWords(data.processed_data, data.wordlist, is_testing)
    data.build_data_model()
    print("data model head: ", data.data_model.head(5))
    """
    Word 2 vec
    """

    word2vec = Word2VecProvider()

    # REPLACE PATH TO THE FILE
    word2vec.load("../twitter/data/glove.twitter.27B.200d.txt")

    word2vec_data = RedditData(word2vec_data)
    word2vec_data.build_final_model(word2vec)

    word2vec_data_model = word2vec_data.data_model
    if "index" in word2vec_data_model.columns:
        word2vec_data_model.drop("index", axis=1, inplace=True)
    word2vec_data_model.dropna(axis=0, inplace=True)
    word2vec_data_model.reset_index(inplace=True)
    word2vec_data_model.index = word2vec_data_model['timestamp_ms']
    print("final word2vec data model: \n", word2vec_data_model.head(), "\n")
    """
    Tokenizing the data
    """
    texts = []
    sentiments = []
    tokenized_data = pd.DataFrame()
    for text in data.processed_data["summary"]:
        texts.append(text)
    for sentiment in data.processed_data["sentiment"]:
        sentiments.append(sentiment)
    print("texts: ", texts[0:5])
    tokenizer = Tokenizer(num_words=20000)
    tokenizer.fit_on_texts(texts)
    sequences = tokenizer.texts_to_sequences(texts)
    padded_sequences = pad_sequences(sequences, maxlen=200)

    print(
        "\n\n##################################################\npadded sequence head: \n",
        padded_sequences[0:5])
    print(
        "\n####################################################\n padded sequence length \n",
        len(padded_sequences))

    if not is_testing:
        data = Plotting(data)
        data.plot()

    if cache_bow_output is not None:
        data.data_model.to_csv(cache_bow_output,
                               index=False,
                               float_format="%.6f")
        word2vec_data_model.to_csv(cache_word2vec_output,
                                   index=False,
                                   float_format="%.6f")
        with open('sequences', 'wb') as fp:
            pickle.dump(padded_sequences, fp)
        with open('sentiments', 'wb') as fp:
            pickle.dump(sentiments, fp)

    return data.data_model, word2vec_data_model
Example #27
0
class Fluid2d(Param):
    def __init__(self, param, grid):

        self.list_param = [
            'modelname', 'tend', 'fixed_dt', 'dt', 'cfl', 'plot_var', 'cax',
            'colorscheme', 'plot_interactive', 'fixed_dt', 'dtmax',
            'freq_save', 'freq_plot'
        ]

        param.copy(self, self.list_param)

        self.list_grid = ['dx', 'nh', 'msk']
        grid.copy(self, self.list_grid)

        if param.modelname == 'euler':
            from euler import Euler
            self.model = Euler(param, grid)

        if param.modelname == 'advection':
            from advection import Advection
            self.model = Advection(param, grid)

        if param.modelname == 'boussinesq':
            from boussinesq import Boussinesq
            self.model = Boussinesq(param, grid)

        if param.modelname == 'quasigeostrophic':
            from quasigeostrophic import QG
            self.model = QG(param, grid)

        self.diag = Diag(param, grid)
        self.plotting = Plotting(param)

        # here's a shortcut to the model state
        self.state = self.model.var.state

        self.t = 0.
        self.kt = 0

        self.output = Output(param, grid, self.diag)

    def update_fig(self, *args):
        self.diag.integrals(self.model.var)
        #        print(self.diag.ke)
        self.set_dt()
        self.model.step(self.t, self.dt)
        self.kt += 1
        self.t = self.t + self.dt
        #        print(self.t,self.dt)
        if (self.kt % self.freq_plot) == 0:  #self.plot_freq)==0:
            self.z2d[self.plotting_msk == 0] = 0.
            #            self.cax=self.get_cax(self.z2d)
            self.set_cax(self.z2d)
            self.im.set_array(self.z2d)
            self.im.set_clim(vmin=self.cax[0], vmax=self.cax[1])

        #self.output.do(self.model.var.state,self.diag,self.t,self.kt)

        return self.im,

    def update_fig2(self, *args):
        self.diag.integrals(self.model.var)
        #        print(self.diag.ke)
        self.set_dt()
        self.model.step(self.t, self.dt)
        self.kt += 1
        self.t = self.t + self.dt
        #        print(self.t,self.dt)
        if (self.kt % self.freq_plot) == 0:  #self.plot_freq)==0:
            self.z2d[self.plotting_msk == 0] = 0.
            self.set_cax(self.z2d)
            self.im.set_array(self.z2d)
            self.im.set_clim(vmin=self.cax[0], vmax=self.cax[1])

        self.output.do(self.model.var.state, self.diag, self.t, self.kt)
        self.ti.label = '%f4.2' % self.t
        #print(self.ti.label)

        return self.im, self.ti,

    def loop(self):
        nh = self.nh
        self.plotting_msk = self.msk[nh:-nh, nh:-nh]
        self.z2d = self.model.var.get(self.plot_var)[nh:-nh, nh:-nh]

        import matplotlib.pyplot as plt
        import matplotlib.animation as animation
        if not (self.plot_interactive):

            fig = plt.figure()
            self.ti = plt.title('')
            self.ti.animated = True
            self.im = plt.imshow(self.z2d,
                                 vmin=self.cax[0],
                                 vmax=self.cax[1],
                                 cmap=plt.get_cmap('jet'),
                                 origin='lower',
                                 interpolation='nearest')

            plt.colorbar()
            ani = animation.FuncAnimation(fig,
                                          self.update_fig,
                                          interval=0.0000001,
                                          blit=True)
            plt.show()
            self.plotting.set_im(self.z2d,
                                 self.plotting_msk,
                                 self.cax,
                                 ani=True,
                                 update=self.update_fig)

        else:
            self.diag.integrals(self.model.var)
            time_str = 'time = %-6.2f'

            fig = plt.figure(figsize=(10, 8))
            ax1 = fig.add_subplot(1, 1, 1)
            ax1.cla()
            ax1.hold(True)

            ax1.set_title(time_str % self.t)
            ax1.set_xlabel('X')
            ax1.set_ylabel('Y')

            # ax2 = fig.add_subplot(1, 2, 2)
            # ax2.cla()
            # ax2.hold(True)
            # ax2.set_title( 'integral quantities')
            # ax2.set_xlabel('t')
            # ax2.set_ylabel('Ke')

            def on_press(event):
                print('stop')
                if self.ion:
                    plt.ioff()
                else:
                    plt.ion()
                plt.pause(1)

            self.ion = True

            #plt.ion()
            #plt.show()#False)
            im = ax1.imshow(self.z2d,
                            vmin=self.cax[0],
                            vmax=self.cax[1],
                            cmap=plt.get_cmap('jet'),
                            origin='lower',
                            interpolation='nearest')

            time = [0]

            z = self.diag.ke
            y = [z]

            #line1, = ax2.plot(time, y, '.-', alpha=0.8, color="gray", markerfacecolor="red")
            cb = plt.colorbar(im)

            fig.show()
            fig.canvas.draw()
            background1 = fig.canvas.copy_from_bbox(ax1.bbox)

            cid = fig.canvas.mpl_connect('button_press_event', on_press)
            fig.canvas.key_press_event = on_press

            #background2 = fig.canvas.copy_from_bbox(ax2.bbox)
            self.output.do(self.model.var.state, self.diag, self.t, self.kt)

            while (self.t < self.tend):
                self.diag.integrals(self.model.var)
                self.set_dt()
                self.model.step(self.t, self.dt)
                self.t += self.dt
                self.kt += 1
                self.output.do(self.model.var.state, self.diag, self.t,
                               self.kt)

                #print(self.t)
                if self.kt % self.freq_plot == 0:
                    time.append(self.t)
                    y.append(self.diag.ke)

                    if len(time) > 1000:
                        del time[0]
                        del y[0]

                    #line1.set_xdata(time)
                    #line1.set_ydata(y)

                    im.set_array(self.z2d)
                    ti = ax1.title
                    ax1.set_title(time_str % self.t)
                    self.set_cax(self.z2d)

                    im.set_clim(vmin=self.cax[0], vmax=self.cax[1])
                    if False:
                        fig.canvas.restore_region(
                            background1)  # restore background
                        ax1.draw_artist(im)  # redraw just the points
                        #ax1.draw_artist(ti)                   # redraw just the points
                        fig.canvas.blit(ax1.bbox)  # fill in the axes rectangle

                        ax2.axis([min(time), max(time), min(y), max(y)])
                        fig.canvas.restore_region(
                            background2)  # restore background
                        ax2.draw_artist(line1)
                        fig.canvas.blit(ax2.bbox)  # fill in the axes rectangle
                    else:
                        fig.canvas.draw()

    def set_dt(self):

        if ((self.fixed_dt == 0) & (self.diag.maxspeed != 0)):
            dt = self.cfl * self.dx / self.diag.maxspeed
            # filter in time
            self.filter_coef = 1.
            self.dt = (1. - self.filter_coef) * self.dt + self.filter_coef * dt
            if self.dt > self.dtmax:
                #                print('dt=%g'%self.dt)
                self.dt = self.dtmax
#            else:
#                print(self.dt)
        else:
            pass
        # transfer this information to the advection scheme
        self.model.ope.cst[2] = self.diag.maxspeed
#        print('dt=%g / cfl=%g'%(self.dt,self.cfl))

    def set_cax(self, z):
        if self.colorscheme == 'minmax':
            self.cax = [min(z.ravel()), max(z.ravel())]
        if self.colorscheme == 'symmetric':
            mm = max(abs(z.ravel()))
            self.cax = [-mm, +mm]
        if self.colorscheme == 'imposed':
            #self.cax is already defined
            pass
Example #28
0
from database import Database
from mqtt_connection import ConnToSensors
from plotting import Plotting

data = Database('newdb.db')
print(data.view_table())

#if __name__ == '__main__':

wykres = Plotting('newdb.db')
wykres.temp_hum()  #plt.show() in plotting

# def on_message(self, mqttc, obj, msg):
#     self.dict_msg = []
#     #print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
#     #print(self.database.view_table()
#     self.dict_msg.append(msg.topic)
#     self.dict_msg.append(msg.payload)
#
#     print(self.dict_msg)
Example #29
0
    def training(self, episodes):
        self.env.set_speed_mode(self.env.my_car_id, 0)
        state = None
        steps = 0

        # reward_type = "collision"
        # reward_type = "horizon"
        reward_type = "security_distance"
        speed_limit = True

        plt_data = {
            "collisions": [],
            "space_headway": [],
            "relative_speed": [],
            "speed": [],
            "steps": 0
        }

        while True:
            print(state)
            if state:
                plt_data["space_headway"].append(state.get("space_headway"))
                plt_data["relative_speed"].append(
                    round(state.get("relative_speed") * 3.6, 0))
                plt_data["speed"].append(round(state.get("speed") * 3.6, 0))

                d_t, ds_t, s_t = \
                    self.framing(state.get('space_headway'), self.i_dict_space_headway), \
                    self.framing(state.get('relative_speed'), self.i_dict_relative_speed), \
                    self.framing(state.get('speed'), self.i_dict_speed)

                a = self.e_greedy_policy(d_t, ds_t, s_t)

                q_t = self.q[self.i_dict_space_headway.get(d_t),
                             self.i_dict_relative_speed.get(ds_t),
                             self.i_dict_speed.get(s_t),
                             self.i_dict_action.get(self.action[a])]

                new_speed = self.new_speed(self.action[a], state.get('speed'))
                self.env.set_speed(self.env.my_car_id, new_speed)
                self.env.simulation_step()
                next_state = self.env.get_state(self.env.my_car_id)

                q_max_t1 = None
                if self.env.is_collided(self.env.my_car_id):
                    self.set_reward_collision(reward_type)
                    self.env.set_speed(self.env.my_car_id, 0)
                    q_max_t1 = 0
                    state = None
                    plt_data["collisions"].append(steps)

                elif next_state:
                    """REWARD"""
                    """
                    if reward_type == "horizon":
                        self.set_reward_horizon_speed(next_state.get('space_headway'), next_state.get('speed'), speed_limit)
                    """

                    if reward_type == "security_distance":
                        self.set_reward_security_dist_speed(
                            next_state.get('space_headway'),
                            next_state.get('speed'), speed_limit)

                    print(f"reward {self.reward}")

                    d_t1, ds_t1, s_t1 = \
                        self.framing(next_state.get('space_headway'), self.i_dict_space_headway), \
                        self.framing(next_state.get('relative_speed'), self.i_dict_relative_speed), \
                        self.framing(next_state.get('speed'), self.i_dict_speed)

                    q_max_t1 = np.max(
                        self.q[self.i_dict_space_headway.get(d_t1),
                               self.i_dict_relative_speed.get(ds_t1),
                               self.i_dict_speed.get(s_t1)])

                    state = next_state

                if q_max_t1 is not None:
                    self.q[
                        self.i_dict_space_headway.get(d_t),
                        self.i_dict_relative_speed.get(ds_t),
                        self.i_dict_speed.get(s_t),
                        self.i_dict_action.get(self.action[a])] = \
                        (1 - self.alpha) * q_t + self.alpha * (self.reward + self.gamma * q_max_t1)
                    """ PRINT Q"""
                    print(
                        f"q: {self.q[self.i_dict_space_headway.get(d_t), self.i_dict_relative_speed.get(ds_t), self.i_dict_speed.get(s_t)]}"
                    )

                steps += 1
                self.epsilon_decay(steps)
                # print(steps)
                # print(f"time: {self.env.get_current_time()}")
            else:
                self.env.simulation_step()
                state = self.env.get_state(self.env.my_car_id)
                self.env.set_speed(self.env.my_car_id, 0)

            if steps > (episodes * 10000):
                time.sleep(.1)

            if steps == episodes * 10000:
                plt_data["steps"] = steps
                plotting = Plotting(self, plt_data)
                plotting.plot_()
Example #30
0
        dCdx,tt,tt1 = gradient(phi,P.G.dy)

    f_c = 1. - 1./(S_1_bar*S)
    flux = P.c*f_c*g - P.D*dCdx
    flux[boundary] = 0
    return flux#, boundary

def increment_grainsize(P,G):
    return KT(P,G,0) + KT(P,G,1)
#     return NT(P,G,0) + NT(P,G,1)

if __name__ == "__main__":
    import initialise
    from numpy import random, maximum, ones
    from plotting import Plotting
    plot = Plotting()
    P,G,L = initialise.get_parameters(['bi_seg_test','22','2'])
    G.wipe(P)
    L.get_reference_node(P,G) # Find node down and left
    L.get_basis_functions(P,G) # Make basis functions
    L.get_nodal_mass_momentum(P,G) # Initialise from grid state
    plot.draw_gsd_grid(L,P,G)
    G.grad_gammadot = -1.*ones([P.G.ny*P.G.nx,3])
    # G.grad_gammadot[:,1] = 0
#     P.dt *= 10
#     G.phi = zeros_like(G.phi)
#     G.phi[40:120,0] = 0.5
#     G.phi[:,1] = 1- G.phi[:,0]
#     G.phi[40:100,1] = 0

    # P.c *= 100
Example #31
0
    def set_up_all(self):
        """
        Run at the start of each test suite.


        L3fwd Prerequisites
        """
        # Based on h/w type, choose how many ports to use
        ports = self.dut.get_ports(socket=1)
        if not ports:
            ports = self.dut.get_ports(socket=0)

        # Verify that enough ports are available
        self.verify(len(ports) >= 2, "Insufficient ports for speed testing")

        # Verify that enough threads are available
        cores = self.dut.get_core_list("2S/4C/2T")
        self.verify(cores is not None, "Insufficient cores for speed testing")

        global valports
        valports = [_ for _ in ports if self.tester.get_local_port(_) != -1]
        self.verify(len(valports) >= 2, "Insufficient active ports for speed testing")

        pat = re.compile("P([0123])")

        # Prepare long prefix match table, replace P(x) port pattern
        lpmStr = "static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {\\\n"
        for idx in range(len(TestL3fwd.lpm_table)):
            TestL3fwd.lpm_table[idx] = pat.sub(self.portRepl, TestL3fwd.lpm_table[idx])
            lpmStr = lpmStr + ' ' * 4 + TestL3fwd.lpm_table[idx] + ",\\\n"
        lpmStr = lpmStr + "};"
        self.logger.debug(lpmStr)

        # Prepare host route table, replace P(x) port pattern
        exactStr = "static struct ipv4_l3fwd_route ipv4_l3fwd_route_array[] = {\\\n"
        for idx in range(len(TestL3fwd.host_table)):
            TestL3fwd.host_table[idx] = pat.sub(self.portRepl, TestL3fwd.host_table[idx])
            exactStr = exactStr + ' ' * 4 + TestL3fwd.host_table[idx] + ",\\\n"
        exactStr = exactStr + "};"
        self.logger.debug(exactStr)

        # Compile l3fwd with LPM lookup.
        self.dut.send_expect(r"sed -i '/ipv4_l3fwd_route_array\[\].*{/,/^\}\;/c\\%s' examples/l3fwd/main.c" % lpmStr, "# ")
        out = self.dut.build_dpdk_apps("./examples/l3fwd", "USER_FLAGS=-DAPP_LOOKUP_METHOD=1")
        self.verify("Error" not in out, "compilation error 1")
        self.verify("No such file" not in out, "compilation error 2")

        # Backup the LPM exe and clean up the build.
        self.dut.send_expect("mv -f examples/l3fwd/build/l3fwd examples/l3fwd/build/l3fwd_lpm", "# ")
        out = self.dut.send_expect("make clean -C examples/l3fwd", "# ")

        # Compile l3fwd with hash/exact lookup.
        self.dut.send_expect(r"sed -i -e '/ipv4_l3fwd_route_array\[\].*{/,/^\}\;/c\\%s' examples/l3fwd/main.c" % exactStr, "# ")
        out = self.dut.build_dpdk_apps("./examples/l3fwd", "USER_FLAGS=-DAPP_LOOKUP_METHOD=0")

        self.verify("Error" not in out, "compilation error 1")
        self.verify("No such file" not in out, "compilation error 2")

        # Backup the Hash/Exact exe.
        self.dut.send_expect("mv -f examples/l3fwd/build/l3fwd examples/l3fwd/build/l3fwd_exact", "# ")

        self.l3fwd_test_results = {'header': [],
                                   'data': []}

        self.plotting = Plotting(self.dut.crb['name'], self.target, self.nic)
Example #32
0
def sweep_s11(log, f1, f2, nums, rstart, angle, rstop, tpolar, cpolar):
    # --------------------------------------------------------------------------
    # Initialize values
    #
    ant_no = int(
            numpy.floor((rstop - rstart) / angle) + 1)  # Number of degree steps
    # If meas 0-360, don't take measurement at 360
    if (rstop == 360) and (rstart == 0):
        ant_no = ant_no - 1
    #
    # End initialize values
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Reset motor positions
    #
    motorSet[STAND_ROTATION].goto_zero()
    set_polarization(log, motorSet, tpolar, cpolar, mycursor)
    #
    # End reset motor positions
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Move test antenna to start degree position
    #
    log.info("Start Position: " + str(rstart))
    motorSet[M1].rot_deg(rstart)
    log.info("Motor setup complete")
    #
    # End move test antenna to start position
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Load state
    #
    analyzer.load_state()
    #
    # End load state
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Set network analyzer parameters
    #
    channel = 1
    trace = 1
    analyzer.setup(channel, trace)
    # analyzer.enable_display(False)

    # Set start frequency
    start = float(analyzer.set_start(channel, f1))
    if f1 != start:
        msg = "WARNING: Invalid start frequency, using " + str(start)
        print(msg)
        log.warning(msg)
        # f1_old = f1
        f1 = start

    # Set stop frequency
    stop = float(analyzer.set_stop(channel, f2))
    if f2 != stop:
        msg = "WARNING: Invalid stop frequency, using " + str(stop)
        print(msg)
        log.warning(msg)
        # f2_old = f2
        f2 = stop

    # Set number of points
    points = int(analyzer.set_points(channel, nums))
    if nums != points:
        msg = "WARNING: Invalid number of steps, using " + str(points)
        print(msg)
        log.warning(msg)
        # nums_old = nums
        nums = points

    # Create csv files
    # d = datetime.today()
    # file_name = os.path.join(DATA_PATH, d.strftime("%Y%m%d%H%M%S"))
    # s11_filename = file_name + "_s11.csv"
    s11_filename = os.path.join(DATA_PATH, "S11.csv")
    s11File = open(s11_filename, "w")
    #
    # End set network analyzer parameters
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Check for network analyzer errors
    log.info("Checking network analyzer error queue")
    err_nums, err_msgs = analyzer.get_errors()
    if len(err_nums) > 0:
        msg = "Error in setting network analyzer parameters"
        print(msg)
        log.warning(msg)
    else:
        # No errors
        log.info("No network analyzer errors detected")
    #
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Measure S11 (actually S22)
    #
    log.info("Measuring S11")
    print("Starting S11 Measurement")
    print("Start Frequency: " + str(f1 / 1e9) + " GHz")
    print("Stop Frequency: " + str(f2 / 1e9) + " GHz")
    print("Number of Points: " + str(nums))
    analyzer.set_measurement(channel, trace, 2, 2)
    analyzer.trigger()
    analyzer.update_display()
    analyzer.auto_scale(channel, trace)
    s11Freq = analyzer.get_x(channel)
    s11Data = analyzer.get_corr_data(channel)
    # s11Data = analyzer.get_form_data(channel)
    # Write to csv file
    log.info("Writing s11 data to file")
    s11File.write(s11Freq)
    s11File.write(s11Data)
    #
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Check for network analyzer errors
    log.info("Checking network analyzer error queue")
    err_nums, err_msgs = analyzer.get_errors()
    if len(err_nums) > 0:
        msg = "Error measuring S11"
        print(msg)
        log.warning(msg)
    else:
        # No errors
        msg = "S11 Measurement Successful"
        print(msg)
        log.info(msg)
    #
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Reset motor positions
    #
    motorSet[STAND_ROTATION].goto_zero()
    #
    # End reset motor positions
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Close csv files
    #
    s11File.close()
    #
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Update database
    #
    if db.is_connected():
        fstart = f1 / 1e9
        fstop = f2 / 1e9
        rowcount = mycursor.rowcount

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # Antenna polarization
        #
        log.info("Updating tpolar and cpolar in sql database")
        update_config_db(log, mycursor, 0, "'antenna_polarization'")
        update_config_db(log, mycursor, 0, "'chamber_polarization'")

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # Network analyzer parameters
        #
        log.info("Updating fstart, fstop, and nums in sql database")
        update_config_db(log, mycursor, fstart, "'frequency_start'")
        update_config_db(log, mycursor, fstop, "'frequency_stop'")
        update_config_db(log, mycursor, nums, "'num_steps'")

        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # Commit changes
        log.info("Committing changes")
        db.commit()
        if rowcount == mycursor.rowcount:
            log.warning("Failed to store updated antenna polarization data")

    #
    # End update database
    # --------------------------------------------------------------------------

    # --------------------------------------------------------------------------
    # Call plotting function and write zip file
    #
    Plotting(f1, f2, nums, rstart, angle, rstop, 0, 0, 0, 0, 0, 0, "s11")
Example #33
0
class Animator:
    """Creator for animations and videos."""

    def __init__(self, hist_path: str, varname="b", video_path=None, visible=True):
        """Open the history file and load parts of it."""
        # Initialize self.hist_file to prevent the destructor from failing
        self.hist_file = None

        self.tracers = False

        # Set parameters needed for Plotting that cannot be determined
        # so far; maybe make them command line arguments in the future
        param = {
            "figsize": (RESOLUTION[0] / DPI, RESOLUTION[1] / DPI),
            "aspect": "equal",
            "rotation_speed": 3,
        }

        if varname == "b":
            param["style"] = "b-interface"
            param["stable_stratification"] = True  # TODO make this a command line argument
        elif varname == "t0":
            # This option is to plot only the first tracer and also a
            # shorter notation in the common case with only one tracer
            param["style"] = "tracer"
            param["n_tracers"] = 1
        elif varname == "tracer":
            param["style"] = "tracer"
            self.tracers = True
        else:
            raise NotImplementedError("The given variable is not yet supported.")

        # Save necessary arguments
        self.video_path = video_path
        self.visible = visible

        # Create the metadata for the video
        if self.video_path:
            # Extract the name of the experiment
            exp_name = os.path.basename(
                hist_path[:-8] if hist_path.endswith("_hist.nc") else hist_path
            )
            self.metadata = {
                "title": "Nyles experiment {}".format(exp_name),
                "artist": CREATOR,
                "genre": "Computational Fluid Dynamics (CFD)",
                "comment": "Created on {} with Nyles.  Nyles is a Large Eddy "
                           "Simulation written in Python.  For more information"
                           " visit https://github.com/pvthinker/Nyles."
                           .format(time.strftime('%d %b %Y')),
                "date": time.strftime("%Y-%m-%d"),
            }

        # Open the history file and keep it open to allow sequential reading
        print("Loading history file {!r}:".format(hist_path))
        self.hist_file = nc.Dataset(hist_path)
        print(self.hist_file)

        # Load the needed data
        if self.tracers:
            param["n_tracers"] = self.hist_file.n_tracers
            self.tracers_data = [
                self.hist_file["t{}".format(i)] for i in range(self.hist_file.n_tracers)
            ]
        else:
            self.vardata = self.hist_file[varname]
        self.t = self.hist_file["t"]
        self.n = self.hist_file["n"]
        self.n_frames = self.n.size

        # Load parameters needed for Grid
        param["Lx"] = self.hist_file.Lx
        param["Ly"] = self.hist_file.Ly
        param["Lz"] = self.hist_file.Lz
        param["nx"] = self.hist_file.global_nx
        param["ny"] = self.hist_file.global_ny
        param["nz"] = self.hist_file.global_nz

        # Set parameters needed for Scalar
        param["nh"] = 0
        param["neighbours"] = {}

        grid = Grid(param)

        # Create one or several Scalar variables as an interface for
        # passing data to the plotting module.  Note: Scalar takes
        # actually a dimension instead of a unit, but that does not
        # matter because this information is not processed here.
        if self.tracers:
            tracer_list = []
            self.arrays = []
            for data in self.tracers_data:
                tracer = Scalar(param, data.long_name, data.name, data.units)
                tracer_list.append(tracer)
                self.arrays.append(tracer.view("i"))
            state = State(tracer_list)
        else:
            scalar = Scalar(param, self.vardata.long_name, varname, self.vardata.units)
            self.array = scalar.view("i")
            state = State([scalar])

        self.p = Plotting(param, state, grid)

    def __del__(self):
        """Close the history file in the destructor."""
        if self.hist_file:
            self.hist_file.close()

    def init(self):
        """Show the inital frame."""
        if self.tracers:
            print("Variable:", len(self.tracers_data), "tracer")
        else:
            print("Variable:", self.vardata.long_name)
        print("Number of frames:", self.n_frames)
        if self.video_path:
            print("Output file:", self.video_path, end="")
            if os.path.exists(self.video_path):
                print(" -- file exists already and will be overwritten!")
            else:
                print("")
            if not self.visible:
                print("Fast mode: no animation will be visible during the process.")
            else:
                print('Slow mode: call script with "--fast" to speed up the video creation.')
        else:
            print("No video will be created.")
        # Load the initial data and show it
        if self.tracers:
            for array, data in zip(self.arrays, self.tracers_data):
                array[...] = data[0]
        else:
            self.array[...] = self.vardata[0]
        self.p.init(self.t[0], self.n[0])

    def run(self):
        """Create the animation and optionally save it."""
        if not self.video_path:
            plt.ioff()
        self.anim = animation.FuncAnimation(
            self.p.fig,
            self.update,
            frames=self.n_frames,
            repeat=False,
            interval=0,
        )
        if self.visible:
            plt.show()
        if self.video_path:
            self.anim.save(
                self.video_path,
                fps=FPS,
                dpi=DPI,
                bitrate=BPS,
                metadata=self.metadata,
            )

    def update(self, frame):
        """Load the data of the given frame and display it."""
        print("\rProcessing frame {} of {} ...".format(frame+1, self.n_frames), end="")
        # Load the data and show it
        if self.tracers:
            for array, data in zip(self.arrays, self.tracers_data):
                array[...] = data[frame]
        else:
            self.array[...] = self.vardata[frame]
        self.p.update(self.t[frame], self.n[frame], self.visible)
        # At the end
        if frame + 1 == self.n_frames:
            if self.video_path:
                print("\b\b\b-- saved.")
            else:
                print("\b\b\b-- finished.")
                plt.pause(0.5)
                plt.close(self.p.fig)