Esempio n. 1
0
    def update(num_growth_pix):
        # print(f"Num_growth_pix: {num_growth_pix}")
        nrows = IGrid.nrows
        ncols = IGrid.ncols
        total_pixels = nrows * ncols
        road_pixel_count = IGrid.get_road_pixel_count(
            Processing.get_current_year())
        excluded_pixel_count = IGrid.get_excld_count()

        # Compute this year stats
        Stats.compute_cur_year_stats()
        # Set num growth pixels
        Stats.set_num_growth_pixels(num_growth_pix)
        # Calibrate growth rate
        Stats.cal_growth_rate()
        # Calibrate Percent Urban
        Stats.cal_percent_urban(total_pixels, road_pixel_count,
                                excluded_pixel_count)

        output_dir = Scenario.get_scen_value('output_dir')
        cur_run = Processing.get_current_run()
        cur_year = Processing.get_current_year()
        if IGrid.test_for_urban_year(Processing.get_current_year()):
            Stats.cal_leesalee()
            filename = f"{output_dir}grow_{cur_run}_{cur_year}.log"
            Stats.save(filename)

        if Processing.get_processing_type() == Globals.mode_enum['predict']:
            filename = f"{output_dir}grow_{cur_run}_{cur_year}.log"
            Stats.save(filename)
Esempio n. 2
0
    def __init__(self, file_str, window_length, n_average):

        self.file_folder = "/data/"
        self.file_str = file_str

        self.window_length = window_length
        self.n_average = n_average
        self.bud = Processing(self.file_folder + self.file_str, verbose=True)
        self.n_buffer = 26214400
Esempio n. 3
0
    def analyze(fmatch):
        output_dir = Scenario.get_scen_value('output_dir')
        run = Processing.get_current_run()
        write_avg_file = Scenario.get_scen_value('write_avg_file')
        avg_filename = f'{output_dir}avg.log'
        write_std_dev_file = Scenario.get_scen_value('write_std_dev_file')
        std_filename = f'{output_dir}std_dev.log'
        control_filename = f'{output_dir}control_stats.log'

        if write_avg_file:
            if not os.path.isfile(avg_filename):
                Stats.create_stats_val_file(avg_filename)

        if write_std_dev_file:
            if not os.path.isfile(std_filename):
                Stats.create_stats_val_file(std_filename)

        if Processing.get_processing_type() != Globals.mode_enum['predict']:
            if not os.path.isfile(control_filename):
                Stats.create_control_file(control_filename)

            # start at i = 1; i = 0 is the initial seed
            # I think I need to put a dummy stats_val to represent the initial seed
            Stats.average.append(StatsVal())
            for i in range(1, IGrid.igrid.get_num_urban()):
                year = IGrid.igrid.get_urban_year(i)
                Stats.calculate_averages(i)
                Stats.process_grow_log(run, year)

                if write_avg_file:
                    Stats.write_stats_val_line(avg_filename, run, year,
                                               Stats.average[i], i)
                if write_std_dev_file:
                    Stats.write_stats_val_line(std_filename, run, year,
                                               Stats.std_dev[i], i)

            Stats.do_regressions()
            Stats.do_aggregate(fmatch)
            Stats.write_control_stats(control_filename)

        if Processing.get_processing_type() == Globals.mode_enum['predict']:
            start = int(Scenario.get_scen_value('prediction_start_date'))
            stop = Processing.get_stop_year()

            for year in range(start + 1, stop + 1):
                Stats.clear_stats()
                Stats.process_grow_log(run, year)
                if write_avg_file:
                    Stats.write_stats_val_line(avg_filename, run, year,
                                               Stats.average[0], 0)
                if write_std_dev_file:
                    Stats.write_stats_val_line(std_filename, run, year,
                                               Stats.std_dev[0], 0)

        Stats.clear_stats()
Esempio n. 4
0
def main():
    try:
        ui = Interface()

        file_path, config_file = ui.file_selector()
        p = Processing(file_path, config_file)
        result = p.analysis()
        name = p.output(result)

        ui.finish(name)
    except:
        sys.exit(0)
Esempio n. 5
0
    def monte_carlo(cumulate, land1):
        log_it = Scenario.get_scen_value("logging")

        z = PGrid.get_z()
        total_pixels = IGrid.get_total_pixels()
        num_monte_carlo = int(
            Scenario.get_scen_value("monte_carlo_iterations"))

        for imc in range(num_monte_carlo):
            Processing.set_current_monte(imc)
            '''print("--------Saved-------")
            print(Coeff.get_saved_diffusion())
            print(Coeff.get_saved_spread())
            print(Coeff.get_saved_breed())
            print(Coeff.get_saved_slope_resistance())
            print(Coeff.get_saved_road_gravity())
            print("--------------------")'''

            # Reset the Parameters
            Coeff.set_current_diffusion(Coeff.get_saved_diffusion())
            Coeff.set_current_spread(Coeff.get_saved_spread())
            Coeff.set_current_breed(Coeff.get_saved_breed())
            Coeff.set_current_slope_resistance(
                Coeff.get_saved_slope_resistance())
            Coeff.set_current_road_gravity(Coeff.get_saved_road_gravity())

            if log_it and Scenario.get_scen_value("log_initial_coefficients"):
                Coeff.log_current()

            # Run Simulation
            Stats.init_urbanization_attempts()
            TimerUtility.start_timer('grw_growth')
            Grow.grow(z, land1)
            TimerUtility.stop_timer('grw_growth')

            if log_it and Scenario.get_scen_value("log_urbanization_attempts"):
                Stats.log_urbanization_attempts()

            # Update Cumulate Grid
            for i in range(total_pixels):
                if z.gridData[i] > 0:
                    cumulate.gridData[i] += 1

            # Update Annual Land Class Probabilities
            if Processing.get_processing_type(
            ) == Globals.mode_enum["predict"]:
                LandClass.update_annual_prob(land1.gridData, total_pixels)

        # Normalize Cumulative Urban Image
        for i in range(total_pixels):
            cumulate.gridData[i] = (100 *
                                    cumulate.gridData[i]) / num_monte_carlo
Esempio n. 6
0
    def completion_status():
        mc_iters = int(Scenario.get_scen_value('monte_carlo_iterations'))
        cur_mc = int(Processing.get_current_monte())
        total_runs = int(Processing.get_total_runs())

        total_mc = (mc_iters * total_runs) / int(Globals.npes)
        total_mc_executed = mc_iters * Processing.get_num_runs_exec_this_cpu(
        ) + cur_mc
        complete = min(total_mc_executed / total_mc, 1.0)

        Logger.log(
            f"Run= {Processing.get_current_run()} of {total_runs} MC= {cur_mc} of {mc_iters}"
        )
Esempio n. 7
0
class Crawler:
    def __init__(self, start_url):
        self.start_url = start_url
        h = urlparse(start_url)
        self.host = h[1]
        self.http = urllib3.PoolManager()
        self.processing = Processing()

    def neighbor(self,url):
            try:
               response = self.http.request("GET",url)
               self.html = response.data
               soup = BeautifulSoup(self.html,"lxml")
               return soup
            except:
                print("could not open url %s "  %url)

    def test(self,url):

        response = self.http.request("GET",url)
        html = response.data
        self.processing.data_processing(html)


    def bfs(self):
        url = []
        visited = []
        url.append(self.start_url)

        while len(url) > 0:
            link = url.pop()
            print(len(url))

            uri = urljoin(self.start_url, link)
            parse_url = urlparse(uri)
            #f = open(self.host+"\n", 'a+')

            try:
                for tag in self.neighbor(link).findAll('a',href=True):
                    tag = urljoin(self.start_url, tag['href'])
                    tag = tag.split('#')[0]
                    junk, ext = os.path.splitext(tag)
                    if tag not in visited and ext != '.jpg' and ext != '.JPG' and ext != '.pdf':
                        visited.append(tag)
                        if parse_url[1]  == str(self.host):
                              url.append(tag)
                              self.processing.data_processing(self.html)
                              #f.writelines(tag+"\n")
                              print(tag)
            except:
                pass
Esempio n. 8
0
def main():

    process = Processing()

    process.read_csv()
    process.load_data()
    process.apply_learning_algo(0.01)
Esempio n. 9
0
    def grow_landuse(land1, num_growth_pix):
        nrows = IGrid.nrows
        ncols = IGrid.ncols
        ticktock = Processing.get_current_year()
        landuse0_year = IGrid.igrid.get_landuse_year(0)
        landuse1_year = IGrid.igrid.get_landuse_year(1)
        urban_code = LandClass.get_urban_code()
        new_indices = LandClass.get_new_indices()
        landuse_classes = LandClass.get_landclasses()
        class_indices = LandClass.get_reduced_classes()
        background = IGrid.igrid.get_background()
        slope = IGrid.igrid.get_slope()
        deltatron = PGrid.get_deltatron()
        z = PGrid.get_z()
        land2 = PGrid.get_land2()
        class_slope = Transition.get_class_slope()
        ftransition = Transition.get_ftransition()

        if ticktock >= landuse0_year:
            # Place the New Urban Simulation into the Land Use Image
            Utilities.condition_gt_gif(z.gridData, 0, land1.gridData,
                                       urban_code)
            Deltatron.deltatron(new_indices, landuse_classes, class_indices,
                                deltatron, land1, land2, slope, num_growth_pix,
                                class_slope, ftransition)

            # Switch the old to the new
            for i in range(len(land2.gridData)):
                land1.gridData[i] = land2.gridData[i]

        if Processing.get_processing_type() == Globals.mode_enum['predict'] or \
            (Processing.get_processing_type() == Globals.mode_enum['test'] and
             Processing.get_current_monte() == Processing.get_last_monte()):
            #Write land1 to file
            if IGrid.using_gif:
                filename = f"{Scenario.get_scen_value('output_dir')}{IGrid.igrid.location}_land_n_urban" \
                           f".{Processing.get_current_year()}.gif"
            else:
                filename = f"{Scenario.get_scen_value('output_dir')}{IGrid.igrid.location}_land_n_urban" \
                           f".{Processing.get_current_year()}.tif"
                IGrid.echo_meta(
                    f"{Scenario.get_scen_value('output_dir')}{IGrid.igrid.location}_land_n_urban."
                    f"{Processing.get_current_year()}.tfw", "landuse")

            date = f"{Processing.get_current_year()}"
            ImageIO.write_gif(land1, Color.get_landuse_table(), filename, date,
                              nrows, ncols)

        # Compute final match statistics for landuse
        Utilities.condition_gt_gif(z.gridData, 0, land1.gridData, urban_code)
Esempio n. 10
0
	def agent_query(self, shipment):
		current_process().daemon = False
		processing_object = Processing()
		self.q = processing_object.create_queue()
		response = {}
		p = processing_object.new_process(
			self._child_process,
			[processing_object, shipment]
			)
		response = []	
		for route in shipment.routes():
			queue_response = self.q.get(timeout=4)
			shipment.manifest[route] = queue_response
		p.join()
		return shipment
def post_image_rev_video():
    """
    Inverse the intensities of a grayscale image.
    Only works for grayscale images

    POSTed request should contain:
        email: ID of the current user.

    Returns:
        dict: image with inverted intensities.
    """
    content = request.get_json()
    user_image_id = db.get_current_image_id(content["email"])
    current_image = db.find_image(user_image_id, content["email"])
    new_image = _link_new_image(current_image)
    try:
        image_data, new_image["processing_time"] = \
            Processing(b64str_to_numpy(
                current_image.image_data)).reverse_video()
    except ValueError:
        return error_handler(400, "must be grayscale", "ValueError")
    new_image = _populate_image_meta(new_image, image_data)
    # maybe something else
    new_image["image_data"] = numpy_to_b64str(image_data,
                                              format=new_image["format"])
    new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
    new_image["histogram"] = _get_b64_histogram(image_data, is_gray=True)
    new_image["process"] = "reverse_video"
    db.update_user_process(content["email"], new_image["process"])
    return jsonify(new_image)
def post_image_blur():
    """
    Takes CURRENT image and performs image blur on whole image.

    POSTed request should contain:
        email: ID of the current user.

    Returns:
        object: blurred image.
    """
    content = request.get_json()
    user_image_id = db.get_current_image_id(content["email"])
    current_image = db.find_image(user_image_id, content["email"])
    new_image = _link_new_image(current_image)

    image_data, new_image["processing_time"] = \
        Processing(b64str_to_numpy(current_image.image_data)).blur()
    new_image = _populate_image_meta(new_image, image_data)
    new_image["image_data"] = numpy_to_b64str(image_data,
                                              format=new_image["format"])
    new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
    new_image["histogram"] = _get_b64_histogram(image_data)
    new_image["process"] = "blur"
    db.update_user_process(content["email"], new_image["process"])
    return jsonify(new_image)
Esempio n. 13
0
    def write_z_prob_grid(z, name):
        # copy background int z_prob_ptr and remap background pixels
        # which collide with the seed, prob colors, and date
        nrows = IGrid.nrows
        ncols = IGrid.ncols
        total_pix = nrows * ncols

        background = IGrid.igrid.get_background_grid()
        prob_color_cnt = len(Scenario.get_scen_value('probability_color'))

        lower_bounds = [UGMDefines.SEED_COLOR_INDEX, UGMDefines.DATE_COLOR_INDEX]
        upper_bounds = [UGMDefines.SEED_COLOR_INDEX + prob_color_cnt, UGMDefines.DATE_COLOR_INDEX]
        indices = [UGMDefines.SEED_COLOR_INDEX + prob_color_cnt + 1, UGMDefines.DATE_COLOR_INDEX - 1]

        z_prob = Utilities.map_grid_to_index(background, lower_bounds, upper_bounds, indices, total_pix)

        if Processing.get_processing_type() == Globals.mode_enum['predict']:
            # Map z_ptr pixels into desired prob indices and save in overlay
            prob_list = Scenario.get_scen_value('probability_color')
            lower_bounds = []
            upper_bounds = []
            indices = []
            for i, prob in enumerate(prob_list):
                lower_bounds.append(prob.lower_bound)
                upper_bounds.append(prob.upper_bound)
                indices.append(i + 2)

            indices[0] = 0
            overlay = Utilities.map_grid_to_index(z, lower_bounds, upper_bounds, indices, total_pix)

            # Overlay overlay grid onto the z_prob grid
            z_prob = Utilities.overlay(z_prob, overlay)

            # Overlay urban_seed into the z_prob grid
            z_prob = Utilities.overlay_seed(z_prob, total_pix)
        else:
            # TESTING
            # Map z grid pixels into desired seed_color_index and save in overlay pt
            lower_bounds = [1]
            upper_bounds = [100]
            indices = [UGMDefines.SEED_COLOR_INDEX]

            overlay = Utilities.map_grid_to_index(z.gridData, lower_bounds, upper_bounds, indices, total_pix)

            # Overlay overlay grid onto the z_prob grid
            z_prob = Utilities.overlay(z_prob, overlay)

        # The file writer needs to take in a Grid, so we're going to wrap our z_prob list in a grid
        z_prob_grid = IGrid.wrap_list(z_prob)
        if IGrid.using_gif:
            filename = f"{Scenario.get_scen_value('output_dir')}{IGrid.igrid.get_location()}" \
                       f"{name}{Processing.get_current_year()}.gif"
        else:
            filename = f"{Scenario.get_scen_value('output_dir')}{IGrid.igrid.get_location()}" \
                       f"{name}{Processing.get_current_year()}.tif"
            IGrid.echo_meta(f"{Scenario.get_scen_value('output_dir')}"
                            f"{IGrid.igrid.get_location()}{name}{Processing.get_current_year()}.tfw", "urban")

        date = f"{Processing.get_current_year()}"
        ImageIO.write_gif(z_prob_grid, Color.get_probability_table(), filename, date, IGrid.nrows, IGrid.ncols)
Esempio n. 14
0
def execute_algorithm(algorithm_id, feedback=None, **parameters):

    algorithm = QgsApplication.processingRegistry().createAlgorithmById(
        algorithm_id)

    if feedback is None:
        feedback = QgsProcessingFeedback()

    context = createContext(feedback)

    parameters_ok, msg = algorithm.checkParameterValues(parameters, context)
    if not parameters_ok:
        raise QgsProcessingException(msg)

    if not algorithm.validateInputCrs(parameters, context):
        feedback.reportError(
            Processing.
            tr('Warning: Not all input layers use the same CRS.\nThis can cause unexpected results.'
               ))

    results, execution_ok = algorithm.run(parameters, context, feedback)

    if execution_ok:
        return results
    else:
        msg = Processing.tr("There were errors executing the algorithm.")
        raise QgsProcessingException(msg)
def post_hist_eq():
    """
    Takes CURRENT image and performs histogram eq on image.

    POSTed request should contain:
        email: ID of the current user.

    Returns:
        object: New hist eq'd image.
    """
    # should take the current image with all info
    content = request.get_json()
    # grab the user's current image.
    user_image_id = db.get_current_image_id(content["email"])
    current_image = db.find_image(user_image_id, content["email"])
    new_image = _link_new_image(current_image)
    image_data, new_image["processing_time"] = \
        Processing(b64str_to_numpy(current_image.image_data)).hist_eq()
    new_image = _populate_image_meta(new_image, image_data)
    new_image["image_data"] = numpy_to_b64str(image_data,
                                              format=new_image["format"])
    new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
    new_image["histogram"] = _get_b64_histogram(image_data)
    new_image["process"] = "hist_eq"
    db.update_user_process(content["email"], new_image["process"])
    return jsonify(new_image)
def post_image_contrast_stretch():
    """
    Takes CURRENT image and performs contrast stretch on image.

    POSTed request should contain:
        email: ID of the current user.

    Returns:
        object: New contrast stretched image.
    """
    content = request.get_json()
    p_low = request.args.get("l", 10)
    p_high = request.args.get("h", 90)
    percentile = (p_low, p_high)

    user_image_id = db.get_current_image_id(content["email"])
    current_image = db.find_image(user_image_id, content["email"])
    new_image = _link_new_image(current_image)

    image_data, new_image["processing_time"] = \
        Processing(b64str_to_numpy(current_image.image_data)
                   ).contrast_stretch(percentile)
    new_image = _populate_image_meta(new_image, image_data)
    new_image["image_data"] = numpy_to_b64str(image_data,
                                              format=new_image["format"])
    new_image["image_data"], _ = _get_b64_format(new_image["image_data"])
    new_image["histogram"] = _get_b64_histogram(image_data)
    new_image["process"] = "contrast_stretch"
    db.update_user_process(content["email"], new_image["process"])
    return jsonify(new_image)
Esempio n. 17
0
    def set_processing_data(self, sim_data, item_type, item_configuration):
        """
        get processing (parallelization) data from database and store in sim_data
        :param sim_data: (class SimulationData)
        :param item_type: (string)
        :param item_configuration: (string)
        :return:
        """
        processing = Processing()

        processing.number_cpus = self.__gateToDatabase.query_column_entry(
            'types', self.__gateToDatabase.query_id_for_name('types', item_type), 'number_cpus')
        processing.mode = self.__gateToDatabase.query_column_entry(
            'configurations', self.__gateToDatabase.query_id_for_name(
                'configurations', item_configuration), 'processing')

        sim_data.processing = processing
Esempio n. 18
0
 def calculate_stand_dev(idx):
     temp = StatsVal()
     total_mc = int(Scenario.get_scen_value('monte_carlo_iterations'))
     if idx == 0 and Processing.get_processing_type(
     ) != Globals.mode_enum['predict']:
         raise ValueError()
     temp.calculate_sd(total_mc, Stats.record, Stats.average[idx])
     Stats.std_dev.append(temp)
Esempio n. 19
0
 def load(self, filename) -> None:
     """
     Loads an image containing a mathematical expression and extracts individual symbols from it
     @param filename: Path to a file
     """
     self.image = cv2.imread(filename)
     self.segments = Processing.extract_segments(self.image,
                                                 draw_rectangles=True)
Esempio n. 20
0
def show_image(image, title='', save_path=None):
    plt.figure()
    image = image.cpu().clone().squeeze(0)
    image = Processing.postprocessor(image)
    plt.imshow(image)
    plt.title(title)
    if save_path is not None:
        plt.savefig(save_path)
    plt.pause(0.001)
Esempio n. 21
0
    def save(filename):
        Stats.record.run = Processing.get_current_run()
        Stats.record.monte_carlo = Processing.get_current_monte()
        Stats.record.year = Processing.get_current_year()
        index = 0
        if Processing.get_processing_type() != Globals.mode_enum['predict']:
            index = IGrid.igrid.urban_yr_to_idx(Stats.record.year)

        Stats.update_running_total(index)

        # Now we are writing the record to file for now...
        if Stats.record.monte_carlo == 0:
            # Create file
            with open(filename,
                      'wb') as output:  # Overwrites any existing file.
                _pickle.dump(Stats.record, output, -1)
        else:
            with open(filename, 'ab') as output:
                _pickle.dump(Stats.record, output, -1)
Esempio n. 22
0
    def process_grow_log(run, year):
        output_dir = Scenario.get_scen_value('output_dir')
        filename = f'{output_dir}grow_{run}_{year}.log'
        mc_iters = int(Scenario.get_scen_value('monte_carlo_iterations'))
        mc_count = 0
        grow_records = []
        # if Processing.get_processing_type() != Globals.mode_enum['predict']:
        with (open(filename, "rb")) as openfile:
            while True:
                try:
                    grow_records.append(_pickle.load(openfile))
                except EOFError:
                    break
        """print(f"****************Year {year}***********************")
        for record in grow_records:
            print(record)
        print("***************************************")"""
        if len(grow_records) > int(
                Scenario.get_scen_value('monte_carlo_iterations')):
            raise AssertionError(
                "Num Records is larger than Monte Carlo iters")
        if Processing.get_processing_type() == Globals.mode_enum['predict']:
            for record in grow_records:
                Stats.record = record
                Stats.update_running_total(0)
            Stats.calculate_averages(0)

        for record in grow_records:
            Stats.record = record
            if mc_count >= mc_iters:
                Logger.log("mc_count >= scen_GetMonteCarloIterations ()")
                sys.exit(1)
            if Processing.get_processing_type(
            ) != Globals.mode_enum['predict']:
                index = IGrid.igrid.urban_yr_to_idx(Stats.record.year)
                Stats.calculate_stand_dev(index)
            else:
                Stats.calculate_stand_dev(0)

            mc_count += 1
        os.remove(filename)
Esempio n. 23
0
 def load(self) -> None:
     """
     Loads fonts from data/ folder and creates data and labels used for training the network
     """
     for font in range(self.fonts):
         image = cv2.imread('data/font' + str(font) + '.png')
         self.data.extend(Processing.extract_segments(image))
     self.labels = [[0] * len(Constants.symbols)
                    for _ in range(len(Constants.symbols))]
     for i in range(len(self.labels)):
         self.labels[i][i] = 1
     self.labels *= self.fonts
class Data():
    def __init__(self):
        self.processing = Processing()
        self.file = pd.read_csv('file.csv')
        self.header = self.file.columns.tolist()
        self.option_list = []
        self.list = []

    def initialize(self):
        index = len(self.header)
        print("Choose two options for statistical evaluation: \n")
        for i in range(0, index):
            print(i, " = ", self.header[i])
        self.selections(index)

    def selections(self, index):
        try:
            choice = int(input())
            if choice >= index:
                print("Choose an element from the list ...")
                self.selections(index)
            self.list.append(choice)
            if len(self.list) < 2:
                self.selections(index)
            else:
                self.controller(self.list)
        except ValueError:
            print("Select one of the above options (numeric value): ")
            self.selections(index)

    def controller(self, list):
        for x in list:
            self.option_list.append(self.header[x])

        index1 = self.file[self.option_list[0]]
        index2 = self.file[self.option_list[1]]

        self.processing.data_processing(index1, index2, self.option_list)
Esempio n. 25
0
def main():
    sparky = Processing()
    logging.basicConfig(level=logging.WARN)
    global Logger

    # These function calls has to be done the first time running the program.
    # They are used to write twitter texts to the file "irmaHurricaneTweets.csv
    Logger = logging.getLogger('get_tweets_by_id')
    """
        ******************************************
        TWEET COLLECTION;
        IF(!) you want to collect your own dataset instead of the sample.
        The four lines below this comment must be uncommented(remove #) and run.
        Remember to comment back in order to not overwrite the file again when running functionality on dataset 
    
    """
    #fhand = open("irmaHurricaneTweets.csv", "w+")
    #fhand.truncate()
    #fhand.close()
    #get_tweets_bulk(api=authentication(), file="irma_tweet_ids.txt", outputcsvfile="irmaHurircaneTweets.csv")

    td.dialogue(sparky)
    sparky.stopspark()
Esempio n. 26
0
    def __init__(self,root):
        self.process = Processing()
        self.root = root
        self.buttonFrame = tk.Frame(root)
        self.container    = tk.Frame(root)
        self.buttonFrame.pack(side = 'top',fill = 'x',expand = False)
        self.container.pack(side = 'top',fill = 'both',expand = True)

        self.login_frame = tk.Frame(root) 
        self.select_sub_frame = tk.Frame(root)
        self.attendence_frame = tk.Frame(root)
        self.graph_frame = tk.Frame(root)
        
        self.login_frame.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1)
        self.select_sub_frame.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1)
        self.attendence_frame.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1)
        self.graph_frame.place(in_=self.container, x=0, y=0, relwidth=1, relheight=1)

        self.roll_numbers_frame = tk.Frame(self.attendence_frame)
        self.subbmit_button_frame = tk.Frame(self.attendence_frame)
        self.roll_numbers_frame.pack(side="top", fill="both", expand=True)
        self.subbmit_button_frame.pack(side="top")
        self.fill_button_frame()
Esempio n. 27
0
    def process_tweets(self):
        """
        Process all tweets collected
        """
        for data in self.datas:
            t = {
                'id': data['id'],
                'user': data['user']['screen_name'],
                'original': data['text'],
                'processed': Processing(data['text']).execute(),
                'evaluation': 0
            }
            self.tweets.append(t)

        return self.tweets
def _get_b64_histogram(image_data, is_gray=False):
    """
    Gets a base 64 representation of a histogram for an image

    Args:
        image_data (np.ndarray): Image.

    Returns:
        str: Base 64 representation of the histogram for image.
    """
    histogram = Processing(image_data,
                           is_color=False).histogram(image_data,
                                                     is_gray=is_gray)
    histogram = histogram[:, :, :3]
    return numpy_to_b64str(histogram)
Esempio n. 29
0
    def grow_non_landuse(z):
        num_monte = int(Scenario.get_scen_value('monte_carlo_iterations'))
        cumulate_monte_carlo = Grid()
        filename = f"{Scenario.get_scen_value('output_dir')}cumulate_monte_carlo.year_{Processing.get_current_year()}"

        if Processing.get_processing_type() != Globals.mode_enum['calibrate']:
            if Processing.get_current_monte() == 0:
                # Zero out accumulation grid
                cumulate_monte_carlo.init_grid_data(IGrid.total_pixels)
            else:
                Input.read_file_to_grid(filename, cumulate_monte_carlo)

            # Accumulate Z over monte carlos
            for i in range(IGrid.total_pixels):
                if z[i] > 0:
                    cumulate_monte_carlo.gridData[i] += 1

            if Processing.get_current_monte() == num_monte - 1:
                if Processing.get_processing_type(
                ) == Globals.mode_enum['test']:
                    Utilities.condition_gt_gif(z, 0,
                                               cumulate_monte_carlo.gridData,
                                               100)
                else:
                    # Normalize Accumulated grid
                    for i in range(IGrid.total_pixels):
                        cumulate_monte_carlo.gridData[
                            i] = 100 * cumulate_monte_carlo.gridData[
                                i] / num_monte

                Utilities.write_z_prob_grid(cumulate_monte_carlo, "_urban_")
                if Processing.get_current_monte() != 0:
                    os.remove(filename)
            else:
                # Dump accumulated grid to disk
                Output.write_grid_to_file(filename, cumulate_monte_carlo)
Esempio n. 30
0
    def landuse_init(deltatron, land1):
        total_pixels = IGrid.nrows * IGrid.ncols

        # Initialize Deltatron Grid to Zero
        for pixel in deltatron:
            pixel = 0

        if Processing.get_processing_type() == Globals.mode_enum['predict']:
            landuse = IGrid.igrid.get_landuse_igrid(1)
            for i in range(total_pixels):
                land1[i] = landuse[i]

        else:
            landuse = IGrid.igrid.get_landuse_igrid(0)
            for i in range(total_pixels):
                land1[i] = landuse[i]
Esempio n. 31
0
    def start(self):
        # Script statup steps
        logger.info('PolyEngine v1.0')
        config = Config('config.ini')

        project_name = config.check_setting('PolyEngine', 'Name')
        logger.info('Starting project {}', project_name)

        message = config.check_setting('PolyEngine', 'Message')
        logger.info(message)

        # Source directory of project based on config file
        source_directory = config.check_setting('Compile', 'SourceDirectory')

        # Create the temporary code modification workspace
        workspace = Workspace(source_directory)
        workspace.create_workspace()

        # Process the files
        for f in workspace.source_files:
            if f is not None:
                processor = Processing(f)
                processor.process()

        for f in workspace.header_files:
            if f is not None:
                processor = Processing(f)
                processor.process()

        # Initialize the compiler once information has been loaded
        output_file = config.check_setting('Compile', 'Output')
        commands = config.check_setting('Compile', 'Commands')
        compiler_option = config.check_setting('Compile', 'Compiler')

        if compiler_option == 'gcc' or compiler_option == 'g++':
            compiler = Compile(compiler_option, workspace.source_files,
                               commands, output_file)
            compiler.compile()
        else:
            logger.error('Invalid compiler option selected.')
            exit('Invalid compiler.')

        # Cleanup workspace and exit
        print()
        Cleanup.clean_exit(workspace.work_path)
Esempio n. 32
0
    def set_base_stats():
        Stats.record = Record()
        urban_num = IGrid.igrid.get_num_urban()
        slope = IGrid.igrid.get_slope().gridData
        for i in range(urban_num):
            urban = IGrid.igrid.get_urban_idx(i).gridData
            stats_info = StatsInfo()
            Stats.compute_stats(urban, slope, stats_info)
            road_pixel_count = IGrid.get_road_pixel_count(
                Processing.get_current_year())
            excluded_pixel_count = IGrid.get_excld_count()

            percent_urban = 100.0 * 100.0 * (stats_info.pop + road_pixel_count) / \
                            (IGrid.nrows * IGrid.ncols - road_pixel_count - excluded_pixel_count)

            stats_info.percent_urban = percent_urban
            Stats.actual.append(stats_info)
Esempio n. 33
0
def main():

    preProcessing = PreProcessing("mnist_train.csv")
    #preProcessing.preProcessData()

    # number or hidden units
    processing = Processing(10)
    processing.load_data("mnist_train_scaled.csv",
                         "mnist_train_targetClass.csv")

    processing.processing()

    for arg in sys.argv[1:]:
        print(arg)
Esempio n. 34
0
def process_images():
    for img in images:
        img = Image(img, 1)
        processing = Processing(img)
        processing.get_domino_points()
Esempio n. 35
0
 def __init__(self, start_url):
     self.start_url = start_url
     h = urlparse(start_url)
     self.host = h[1]
     self.http = urllib3.PoolManager()
     self.processing = Processing()