def get_data(data_info,
             data_path=args.data_path,
             image_folder=args.images_folder,
             mask_folder=args.masks_folder,
             image_type=args.image_type,
             mask_type=args.mask_type):

    x = []
    y = []
    for _, row in data_info.iterrows():
        filename = get_fullname(row['name'], row['position'])

        image_path = get_filepath(data_path,
                                  row['name'],
                                  image_folder,
                                  filename,
                                  file_type=image_type)
        mask_path = get_filepath(data_path,
                                 row['name'],
                                 mask_folder,
                                 filename,
                                 file_type=mask_type)

        x.append(read_tensor(image_path))
        y.append(read_tensor(mask_path))

    x = np.array(x)
    y = np.array(y)
    y = y.reshape([*y.shape, 1])

    return x, y
Ejemplo n.º 2
0
def main(save_filename, unique_metatiles_file, player_img):

    print("Creating id maps from unique metatiles file %s..." %
          unique_metatiles_file)
    start_time = datetime.now()

    metatile_count = 0
    id_metatile_map = {}
    metatile_id_map = {}

    unique_metatiles = read_pickle(unique_metatiles_file)

    for metatile in unique_metatiles:
        metatile_count += 1
        metatile_id = "t%d" % metatile_count
        metatile_str = metatile.to_str()
        id_metatile_map[metatile_id] = metatile_str
        metatile_id_map[metatile_str] = metatile_id

    level_saved_files_dir = "level_saved_files_%s/" % player_img
    outfile = "%s.pickle" % save_filename

    id_metatile_map_file = get_filepath(
        level_saved_files_dir + "id_metatile_maps", outfile)
    metatile_id_map_file = get_filepath(
        level_saved_files_dir + "metatile_id_maps", outfile)

    write_pickle(id_metatile_map_file, id_metatile_map)
    write_pickle(metatile_id_map_file, metatile_id_map)

    end_time = datetime.now()
    runtime = str(end_time - start_time)
    print("Runtime: %s" % runtime)

    return id_metatile_map_file, metatile_id_map_file, runtime
Ejemplo n.º 3
0
    def process_answer_set(self, model_str):
        player_img, prolog_filename = Solver.parse_prolog_filepath(
            self.prolog_file)
        answer_set_filename = self.get_cur_answer_set_filename(prolog_filename)

        # Create assignments dictionary {(tile_x, tile_y): tile_id}
        assignments_dict = Solver.create_assignments_dict(model_str)

        # Create and save structural txt file for the generated level
        level_structural_txt = ""
        for row in range(self.level_h):
            for col in range(self.level_w):
                tile_xy = (col, row)
                tile_id = assignments_dict.get(tile_xy)
                tile_char = self.get_tile_char(tile_id)
                level_structural_txt += tile_char
            level_structural_txt += "\n"

        if self.save:
            generated_level_txt_dir = "level_structural_layers/generated/"
            level_structural_txt_file = get_filepath(
                generated_level_txt_dir, "%s.txt" % answer_set_filename)
            write_file(level_structural_txt_file, level_structural_txt)

            generated_level_assignments_dir = "level_saved_files_%s/generated_level_assignments_dicts/" % player_img
            level_assignments_file = get_filepath(
                generated_level_assignments_dir,
                "%s.pickle" % answer_set_filename)
            write_pickle(level_assignments_file, assignments_dict)

            generated_level_model_str_dir = "level_saved_files_%s/generated_level_model_strs/" % player_img
            level_model_str_file = get_filepath(generated_level_model_str_dir,
                                                "%s.txt" % answer_set_filename)
            write_pickle(level_model_str_file, model_str)

        if self.print_level:
            print(level_structural_txt)

        if self.validate:
            asp_valid = Solver.asp_is_valid(
                check_path=True,
                check_onground=self.require_all_platforms_reachable,
                check_bonus=self.require_all_bonus_tiles_reachable,
                model_str=model_str,
                player_img=player_img,
                answer_set_filename=answer_set_filename,
                tile_ids=self.tile_ids.copy(),
                save=self.save)
            self.asp_valid_levels_count += 1 if asp_valid else 0

            # state_graph_valid_path = Solver.get_state_graph_valid_path(assignments_dict, player_img, prolog_filename,
            #                                                            answer_set_filename, save=self.save)
            # self.state_graph_valid_levels_count += 1 if state_graph_valid_path is not None else 0

        self.increment_answer_set_count()
Ejemplo n.º 4
0
    def get_input_pair(self, data_info_row):
        if len(self.channels) == 0:
            raise Exception('You have to specify at least one channel.')

        instance_name = '_'.join(
            [data_info_row['name'],
             str(data_info_row['position'])])
        image_path = get_filepath(self.dataset_path,
                                  data_info_row['name'],
                                  self.images_folder,
                                  instance_name,
                                  file_type=self.image_type)
        mask_path = get_filepath(self.dataset_path,
                                 data_info_row['name'],
                                 self.masks_folder,
                                 instance_name,
                                 file_type=self.mask_type)

        images_array = filter_by_channels(read_tensor(image_path),
                                          self.channels, self.neighbours)

        if images_array.ndim == 2:
            images_array = np.expand_dims(images_array, -1)

        masks_array = read_tensor(mask_path)

        aug = Compose([
            RandomRotate90(),
            Flip(),
            OneOf(
                [
                    RandomSizedCrop(min_max_height=(int(
                        self.image_size * 0.7), self.image_size),
                                    height=self.image_size,
                                    width=self.image_size),
                    RandomBrightnessContrast(brightness_limit=0.15,
                                             contrast_limit=0.15),
                    #MedianBlur(blur_limit=3, p=0.2),
                    MaskDropout(p=0.6),
                    ElasticTransform(alpha=15, sigma=5, alpha_affine=5),
                    GridDistortion(p=0.6)
                ],
                p=0.8),
            ToTensor()
        ])

        augmented = aug(image=images_array, mask=masks_array)
        augmented_images = augmented['image']
        augmented_masks = augmented['mask']
        if self.classification_head:
            masks_class = ((augmented_masks.sum() > 0) *
                           1).unsqueeze(-1).float()  #.type(torch.FloatTensor)
            return augmented_images, [augmented_masks, masks_class]
        else:
            return {'features': augmented_images, 'targets': augmented_masks}
Ejemplo n.º 5
0
def predict(data_path, model_weights_path, network, test_df_path, save_path,
            size, channels, neighbours, classification_head):
    model = get_model(network, classification_head)
    model.encoder.conv1 = nn.Conv2d(count_channels(channels) * neighbours,
                                    64,
                                    kernel_size=(7, 7),
                                    stride=(2, 2),
                                    padding=(3, 3),
                                    bias=False)

    model, device = UtilsFactory.prepare_model(model)

    if classification_head:
        model.load_state_dict(torch.load(model_weights_path))
    else:
        checkpoint = torch.load(model_weights_path, map_location='cpu')
        model.load_state_dict(checkpoint['model_state_dict'])

    test_df = pd.read_csv(test_df_path)

    predictions_path = os.path.join(save_path, "predictions")

    if not os.path.exists(predictions_path):
        os.makedirs(predictions_path, exist_ok=True)
        print("Prediction directory created.")

    for _, image_info in tqdm(test_df.iterrows()):
        filename = '_'.join([image_info['name'], image_info['position']])
        image_path = get_filepath(data_path,
                                  image_info['dataset_folder'],
                                  'images',
                                  filename,
                                  file_type='tiff')

        image_tensor = filter_by_channels(read_tensor(image_path), channels,
                                          neighbours)
        if image_tensor.ndim == 2:
            image_tensor = np.expand_dims(image_tensor, -1)

        image = transforms.ToTensor()(image_tensor)
        if classification_head:
            prediction, label = model.predict(
                image.view(1,
                           count_channels(channels) * neighbours, size,
                           size).to(device, dtype=torch.float))
        else:
            prediction = model.predict(
                image.view(1,
                           count_channels(channels) * neighbours, size,
                           size).to(device, dtype=torch.float))

        result = prediction.view(size, size).detach().cpu().numpy()

        cv.imwrite(get_filepath(predictions_path, filename, file_type='png'),
                   result * 255)
def describe(filepath='../data/dataset_train.csv', number_of_columns=15):
    # filename = get_filename()
    if ul.get_filepath() is not None:
        filepath = ul.get_filepath()
    # filepath = '../data/dataset_train.csv'
    # filepath = filepath.join(filename)
    with open(filepath, 'r') as f:
        header = f.readline()
        h = header.split(',')
        h.insert(0, '')

    data = np.genfromtxt(filepath, delimiter=',', skip_header=1)
    # data = data[:,1:]
    # print('\t\t' + '\t'.join([i for i in header.split(',') if i!='Index']) + '\n')

    count = list(np.count_nonzero(~np.isnan(data), axis=0))
    count.insert(0, 'Count')
    mean = list(np.mean(~np.isnan(data), axis=0))
    mean.insert(0, 'Mean')
    std = list(np.std(~np.isnan(data), axis=0))
    std.insert(0, 'Standard Dev.')
    min = list(np.min(~np.isnan(data), axis=0))
    min.insert(0, 'Min.')
    perc_25 = list(np.percentile(~np.isnan(data), 0.25, axis=0))
    perc_25.insert(0, '25%')
    perc_50 = list(np.percentile(~np.isnan(data), 0.5, axis=0))
    perc_50.insert(0, '50%')
    perc_75 = list(np.percentile(~np.isnan(data), 0.75, axis=0))
    perc_75.insert(0, '75%')
    max = list(np.max(~np.isnan(data), axis=0))
    max.insert(0, 'Max.')

    # print('Count\t' + '\t'.join(np.array2string(count)))
    if ul.get_number_of_columns() is not None:
        number_of_columns = int(ul.get_number_of_columns())
    h = h[:number_of_columns]
    count = count[:number_of_columns]
    mean = mean[:number_of_columns]
    std = std[:number_of_columns]
    min = min[:number_of_columns]
    perc_25 = perc_25[:number_of_columns]
    perc_50 = perc_50[:number_of_columns]
    perc_75 = perc_75[:number_of_columns]
    max = max[:number_of_columns]

    print(
        tabulate([count, mean, std, min, perc_25, perc_50, perc_75, max],
                 headers=h,
                 floatfmt='.2f',
                 tablefmt='github'))
Ejemplo n.º 7
0
def get_ocr_transformations(image):
    filepath = utils.get_filepath(app.config['INPUT_FOLDER'], image)
    if filepath is None:
        return redirect(url_for('index'))
    text = request.form.get('text', '')
    transformations = utils.get_images(app.config['OUTPUT_FOLDER'])
    results = []
    for transformation in transformations:
        result_text, percentage = ocr.compare(text, utils.get_filepath(app.config['OUTPUT_FOLDER'], transformation[1]))
        results.append({'transformation': transformation[1].split('-')[0], 'original': text, 'result': result_text, 'percentage': percentage})
    results = sorted(results, key=itemgetter('percentage'), reverse=True)

    result_text, percentage = ocr.compare(text, filepath)
    results.insert(0, {'transformation': 'original', 'original': text, 'result': result_text, 'percentage': percentage})
    return jsonify(results)
Ejemplo n.º 8
0
def get_ocr_steps(original, folder):
    filepath = utils.get_filepath(app.config['INPUT_FOLDER'], original)
    if filepath is None:
        return redirect(url_for('index'))
    text = request.form.get('text', '')
    steps = utils.get_images('static/img/pipelines/steps/{}'.format(folder))
    steps.sort(key=lambda x: int(x[1].split(')')[0]))
    results = []
    for step in steps:
        result_text, percentage = ocr.compare(text, utils.get_filepath('static/img/pipelines/steps/{}'.format(folder), step[1]))
        results.append({'step': step[1].split('-')[0], 'original': text, 'result': result_text, 'percentage': percentage})

    result_text, percentage = ocr.compare(text, filepath)
    results.insert(0, {'step': 'original', 'original': text, 'result': result_text, 'percentage': percentage})
    return jsonify(results)
Ejemplo n.º 9
0
def get_ocr(image):
    filepath = utils.get_filepath(app.config['INPUT_FOLDER'], image)
    if filepath is None:
        return redirect(url_for('index'))
    text = request.form.get('text', '')
    pipelines = utils.get_images(app.config['OUTPUT_FOLDER_PIPELINES'])
    pipelines.sort(key=lambda x: int(x[1].split('-')[0]))
    results = []
    for pipeline in pipelines:
        result_text, percentage = ocr.compare(text, utils.get_filepath(app.config['OUTPUT_FOLDER_PIPELINES'], pipeline[1]))
        results.append({'pipeline': pipeline[1].split('-')[0], 'original': text, 'result': result_text, 'percentage': percentage})
    results = sorted(results, key=itemgetter('percentage'), reverse=True)

    result_text, percentage = ocr.compare(text, filepath)
    results.insert(0, {'pipeline': 'original', 'original': text, 'result': result_text, 'percentage': percentage})
    return jsonify(results)
Ejemplo n.º 10
0
def get_metatile_coords_dict_file(state_graph_file, player_img):
    level_info = parse_state_graph_filename(state_graph_file)
    game, level = level_info['game'], level_info['level']
    save_directory = "level_saved_files_%s/metatile_coords_dicts/%s/" % (player_img, game)
    save_file = "%s.pickle" % level
    metatile_coords_dict_file = utils.get_filepath(save_directory, save_file)
    return metatile_coords_dict_file
Ejemplo n.º 11
0
    def get_asp_valid_path(model_str,
                           player_img,
                           answer_set_filename,
                           save=True):
        # Initialize start and goal fact variables
        start_nodes = []
        goal_nodes = []
        is_start_idx = State.prolog_state_contents_is_start_index()
        goal_reached_idx = State.prolog_state_contents_goal_reached_index()

        # Create new graph for model
        graph = nx.Graph()

        # Add nodes from reachable facts
        reachable_facts = Solver.get_facts_as_list(model_str,
                                                   fact_name='reachable')

        for reachable_fact in reachable_facts:
            reachable_contents = Solver.get_fact_contents_as_list(
                reachable_fact)
            reachable_node = str(reachable_contents)
            graph.add_node(reachable_node)
            if reachable_contents[is_start_idx] == '1':
                start_nodes.append(reachable_node)
            if reachable_contents[goal_reached_idx] == '1':
                goal_nodes.append(reachable_node)

        # Check that reachable start and goal states exist
        if len(start_nodes) == 0:
            error_exit('No reachable start states found in model str')
        if len(goal_nodes) == 0:
            error_exit('No reachable goal states found in model str')

        # Add edges from link facts
        link_facts = Solver.get_facts_as_list(model_str, fact_name='link')
        for link_fact in link_facts:
            link_contents = Solver.get_fact_contents_as_list(link_fact)
            src_node = str(link_contents[:len(link_contents) // 2])
            dest_node = str(link_contents[len(link_contents) // 2:])
            graph.add_edge(src_node, dest_node)

        # Check if valid path exists from start to goal
        for start_node in start_nodes:
            for goal_node in goal_nodes:
                valid_path_exists = nx.has_path(graph,
                                                source=start_node,
                                                target=goal_node)
                if valid_path_exists:
                    valid_path = nx.dijkstra_path(graph,
                                                  source=start_node,
                                                  target=goal_node)
                    if save:
                        valid_path_str = " => \n".join(valid_path)
                        valid_path_file = get_filepath(
                            "level_saved_files_%s/generated_level_paths" %
                            player_img, "%s.pickle" % answer_set_filename)
                        write_pickle(valid_path_file, valid_path_str)
                    return valid_path

        return None
def stratify(data_info,
             data_path=args.data_path,
             test_size=0.2,
             random_state=42,
             instance_type=args.instance_type,
             instances_folder=args.instances_folder):

    X, _ = get_data(data_info)
    areas = []
    for _, row in data_info.iterrows():
        instance_name = get_fullname(row['name'], row['position'])
        instance_path = get_filepath(data_path,
                                     row['name'],
                                     instances_folder,
                                     instance_name,
                                     instance_name,
                                     file_type=instance_type)
        areas.append(get_area(instance_path))

    labels = get_labels(np.array(areas))

    sss = StratifiedShuffleSplit(n_splits=1,
                                 test_size=test_size,
                                 random_state=random_state)

    return sss.split(X, labels)
Ejemplo n.º 13
0
def main(save_filename, unique_metatiles_file, player_img, print_stats):

    print("Calculating states per metatile stats for the given unique_metatiles_file: %s" % unique_metatiles_file)
    start_time = datetime.now()

    save_directory = "level_saved_files_%s/metatile_num_states_dicts/" % player_img
    save_file = "%s.pickle" % save_filename
    metatile_num_states_dict_file = get_filepath(save_directory, save_file)

    unique_metatiles = read_pickle(unique_metatiles_file)
    metatile_num_states_dict = {}

    for metatile in unique_metatiles:
        metatile_str = metatile.to_str()
        metatile_graph = nx.DiGraph(metatile.graph_as_dict)
        num_states = len(metatile_graph.nodes())
        metatile_num_states_dict[metatile_str] = num_states

    write_pickle(metatile_num_states_dict_file, metatile_num_states_dict)

    end_time = datetime.now()
    runtime = str(end_time-start_time)

    if print_stats:
        print(get_metatile_num_states_stats(metatile_num_states_dict))

    print("Runtime: %s\n" % runtime)

    return runtime
Ejemplo n.º 14
0
def get_pipeline(image):
    filepath = utils.get_filepath(app.config['INPUT_FOLDER'], image)
    if filepath is None:
        return not_found_error()
    utils.delete_images(app.config['OUTPUT_FOLDER_PIPELINES'])
    steps = request.get_json().get('steps')
    if steps is None:
        response = jsonify({
            'success': False,
            'message': 'Field "steps" is required'
        })
        response.status_code = 404
        return response
    processing_lib.pipeline_individual(filepath, steps)
    original = ['/' + filepath, image]
    pipelines = utils.get_images(app.config['OUTPUT_FOLDER_PIPELINES'])
    steps_count = utils.count_folders(app.config['OUTPUT_FOLDER_STEPS'])
    for index in range(1, steps_count + 1):
        if next((x for x in pipelines if int(x[1].split('-')[0]) == index),
                None) is None:
            pipelines.append(('/static/img/fail.gif',
                              '{}-{}'.format(index,
                                             str(uuid.uuid4()).split('-')[0])))
    pipelines.sort(key=lambda x: int(x[1].split('-')[0]))
    response = jsonify({
        'success': True,
        'original': original,
        'pipeline': pipelines
    })
    response.status_code = 200
    return response
Ejemplo n.º 15
0
def steps(original, folder):
    filepath = utils.get_filepath(app.config['INPUT_FOLDER'], original)
    if filepath is None:
        return redirect(url_for('index'))
    original = ['/' + filepath, original]
    steps = utils.get_images('static/img/pipelines/steps/{}'.format(folder))
    steps.sort(key=lambda x: int(x[1].split(')')[0]))
    return render_template('steps.html', original=original, steps=steps, folder=folder)
Ejemplo n.º 16
0
def get_prolog_file_info(prolog_file):
    player_img, prolog_filename = Solver.parse_prolog_filepath(prolog_file)
    all_prolog_info_file = get_filepath(
        "level_saved_files_%s/prolog_files" % player_img,
        "all_prolog_info.pickle")
    all_prolog_info_map = read_pickle(all_prolog_info_file)
    prolog_file_info = all_prolog_info_map[prolog_filename]
    return prolog_file_info
Ejemplo n.º 17
0
	def on_post_save(self, view):
		if utils.get_language()!="jsf":return
		window=sublime.active_window()
		folders=window.folders()
		if not folders:return

		folderProyecto=folders[0]
		if not os.path.exists(os.path.join(folderProyecto, "pom.xml")):return
		server=utils.get_preference("server")
		
		folderDeploy=server_folder_deploy[server]
		self.folderDeploy=folderDeploy

		filepath=utils.get_filepath()
		self.filepath=filepath
		
		if server=="weblogic":
			threading.Thread(target=self.reemplazarTodos).start()
			return

		if server!="jboss":
			folderDeploy=folderDeploy+os.sep+os.listdir(folderDeploy)[0]
			self.folderDeploy=folderDeploy

			folderDeploy=os.path.normpath(folderDeploy)
			print("the folder deploy is : "+folderDeploy)
		nombreProyecto=filepath.replace(folderProyecto+os.sep, "")
		#print("el nombre del proyceto es : "+nombreProyecto)
		nombreProyecto=nombreProyecto[:nombreProyecto.find(os.sep)]
		#print("el nuevo nombre del proyecto es: "+nombreProyecto)
		#print("el filepath es : "+filepath)
		#print("el folderDeploy es : "+folderDeploy)
		fileLocation=filepath[filepath.find("webapp"+os.sep)+7:]
		#print("el fileLocation is: "+fileLocation)
		print(server)
		

		print("el nombre del proyecto es : "+nombreProyecto)
		folders=os.listdir(folderDeploy)

		folders=[os.path.join(folderDeploy, x) for x in folders]
		
		def comparador(x):return os.path.getmtime(x)

		folders=sorted(folders, key=comparador, reverse=True)
		print(folders)
		for folderS in folders:
			for folder in os.listdir(folderS):
				print(folder)
				if folder.find(nombreProyecto)!=-1:
					fileLocation=folderS+os.sep+folder+os.sep+fileLocation
					print("la nueva localizacion del archivo es : "+fileLocation)
					utils.file_write(fileLocation, utils.file_read(filepath))
					#print("escrito con exito")
					return
				else:print("no")
Ejemplo n.º 18
0
def processing(image):
    filepath = utils.get_filepath(app.config['INPUT_FOLDER'], image)
    if filepath is None:
        return redirect(url_for('index'))
    utils.delete_images(app.config['OUTPUT_FOLDER'])
    processing_lib.individual(filepath)
    original = ['/' + filepath, image]
    transformations = utils.get_images(app.config['OUTPUT_FOLDER'])
    transformations.sort(key=lambda x: x[1])
    return render_template('processing.html', original=original, transformations=transformations)
Ejemplo n.º 19
0
def get_ocr_inidividual(pipeline):
    filepath = utils.get_filepath(app.config['OUTPUT_FOLDER_PIPELINES'],
                                  pipeline)
    if filepath is None:
        return not_found_error()
    text = ocr.extract(filepath)
    results = {'pipeline': pipeline, 'text': text}
    response = jsonify({'success': True, 'results': results})
    response.status_code = 200
    return response
Ejemplo n.º 20
0
    def download_from_cloud_storage(self, filename):
        """
        Download file from cloude storage
        :param filepath:
        :return:
        """
        filepath = get_filepath(filename)
        blob = self.bucket.blob(filename)
        blob.download_to_filename(filepath)

        return filepath
Ejemplo n.º 21
0
def main(push, pull, files, dirs, file_types, push_project, pull_trials,
         pull_processed):

    if not any([push, pull, push_project, pull_trials, pull_processed]):
        error_exit(
            "Must specify a push/pull action. View options with python scp_files.py --help"
        )

    if push_project:
        files_to_transfer = get_files_to_transfer(
            files=[], dirs=PROJECT_DIRS, file_types=PROJECT_FILE_TYPES)
        transfer_files(files_to_transfer, push=True)
        return

    if pull_trials:
        pull_directories(dirs=TRIAL_DIRS)
        return

    if pull_processed is not None:
        if len(pull_processed) <= 1:
            error_exit(
                '--pull_processed args should be in the format <game> <level1> <level2> ...'
            )
        else:
            files_to_transfer = get_processed_level_files(
                player_img='block',
                game=pull_processed[0],
                levels=pull_processed[1:])
            for file in files_to_transfer:
                get_filepath(os.path.dirname(file), os.path.basename(file))
            transfer_files(files_to_transfer, push=False)

    if push and pull:
        error_exit('Push and pull are mutually exclusive')

    files_to_transfer = get_files_to_transfer(files=files,
                                              dirs=dirs,
                                              file_types=file_types)
    transfer_files(files_to_transfer, push=push)
Ejemplo n.º 22
0
def get_ocr(image):
    filepath = utils.get_filepath(app.config['INPUT_FOLDER'], image)
    if filepath is None:
        return not_found_error()
    text = request.get_json().get('text')
    if text is None:
        response = jsonify({
            'success': False,
            'message': 'Field "text" is required'
        })
        response.status_code = 400
        return response
    pipelines = utils.get_images(app.config['OUTPUT_FOLDER_PIPELINES'])
    pipelines.sort(key=lambda x: int(x[1].split('-')[0]))
    results = []
    for pipeline in pipelines:
        result_text, percentage = ocr.compare(
            text,
            utils.get_filepath(app.config['OUTPUT_FOLDER_PIPELINES'],
                               pipeline[1]))
        results.append({
            'pipeline': pipeline[1].split('-')[0],
            'original': text,
            'result': result_text,
            'percentage': percentage
        })
    results = sorted(results, key=itemgetter('percentage'), reverse=True)

    result_text, percentage = ocr.compare(text, filepath)
    results.insert(
        0, {
            'pipeline': 'original',
            'original': text,
            'result': result_text,
            'percentage': percentage
        })
    response = jsonify({'success': True, 'results': results})
    response.status_code = 200
    return response
Ejemplo n.º 23
0
def main(save_filename, metatile_id_map_file, id_metatile_map_file,
         metatile_coords_dict_files, player_img):

    print("Constructing tile_id constraints dictionary...")
    start_time = datetime.now()

    # Create save file path
    metatile_constraints_dir = "level_saved_files_%s/metatile_constraints" % player_img
    metatile_constraints_file = get_filepath(metatile_constraints_dir,
                                             "%s.pickle" % save_filename)

    # Load in files
    metatile_id_map = read_pickle(metatile_id_map_file)
    id_metatile_map = read_pickle(id_metatile_map_file)
    metatile_coords_dicts = [
        read_pickle(file) for file in metatile_coords_dict_files
    ]

    coord_metatiles_dict = get_coord_metatiles_dict(metatile_coords_dicts)
    coord_tile_ids_map = get_coord_tile_ids_map(metatile_id_map,
                                                coord_metatiles_dict)

    tile_id_constraints_dict = {}
    for tile_id, metatile_str in id_metatile_map.items():
        metatile = Metatile.from_str(metatile_str)
        tile_id_constraints_dict[tile_id] = {
            "type": metatile.type,
            "graph": metatile.graph_as_dict,
            "games": metatile.games,
            "levels": metatile.levels,
            "adjacent": {
                TOP: [],
                BOTTOM: [],
                LEFT: [],
                RIGHT: [],
                TOP_LEFT: [],
                BOTTOM_LEFT: [],
                TOP_RIGHT: [],
                BOTTOM_RIGHT: []
            }
        }
    tile_id_constraints_dict = populate_tile_id_constraints_adjacencies(
        tile_id_constraints_dict, coord_tile_ids_map)

    end_time = datetime.now()
    runtime = str(end_time - start_time)

    write_pickle(metatile_constraints_file, tile_id_constraints_dict)
    print("Runtime: %s\n" % runtime)

    return metatile_constraints_file, runtime
Ejemplo n.º 24
0
def get_ocr_steps(original, folder):
    filepath = utils.get_filepath(app.config['INPUT_FOLDER'], original)
    if filepath is None:
        return not_found_error()
    text = request.get_json().get('text')
    if text is None:
        response = jsonify({
            'success': False,
            'message': 'Field "text" is required'
        })
        response.status_code = 400
        return response
    steps = utils.get_images('static/img/pipelines/steps/{}'.format(folder))
    steps.sort(key=lambda x: int(x[1].split(')')[0]))
    results = []
    for step in steps:
        result_text, percentage = ocr.compare(
            text,
            utils.get_filepath('static/img/pipelines/steps/{}'.format(folder),
                               step[1]))
        results.append({
            'step': step[1].split('-')[0],
            'original': text,
            'result': result_text,
            'percentage': percentage
        })

    result_text, percentage = ocr.compare(text, filepath)
    results.insert(
        0, {
            'step': 'original',
            'original': text,
            'result': result_text,
            'percentage': percentage
        })
    response = jsonify({'success': True, 'results': results})
    response.status_code = 200
    return response
Ejemplo n.º 25
0
    def process_data(self, data_source):
        fp = utils.get_filepath(data_source, 'train')
        data = arff.load(open(fp, 'rb'))
        class_index, header, data_types = utils.parse_attributes(data)
        self.data = []
        self.class_sets = []
        for row in data['data']:
            self.data.append(row[:class_index])
            label = reduce(lambda x,y:x+y, row[class_index:])
            self.class_sets.append(utils.str_to_set(label))
        self.num_data = len(self.class_sets)
        self.num_classes = len(label)
        self.data = np.array(self.data)

        fp = utils.get_filepath(data_source, 'test')
        test_data = arff.load(open(fp, 'rb'))
        self.test_data = []
        self.test_class_sets = []
        for row in test_data['data']:
            self.test_data.append(row[:class_index])
            label = reduce(lambda x,y:x+y, row[class_index:])
            self.test_class_sets.append(utils.str_to_set(label))
        self.test_num_data = len(self.test_class_sets)
        self.test_data = np.array(self.test_data)
Ejemplo n.º 26
0
def save_process_runtimes(process_key, process_runtimes):
    all_levels_process_info_file = utils.get_filepath(
        "", "all_levels_process_info.pickle")
    if os.path.exists(all_levels_process_info_file):
        all_levels_process_info = utils.read_pickle(
            all_levels_process_info_file)
    else:
        all_levels_process_info = {}

    if all_levels_process_info.get(process_key) is None:
        all_levels_process_info[process_key] = {}

    for process_step, runtime_str in process_runtimes:
        all_levels_process_info[process_key][process_step] = runtime_str

    utils.write_pickle(all_levels_process_info_file, all_levels_process_info)
Ejemplo n.º 27
0
def pipeline(image):
    filepath = utils.get_filepath(app.config['INPUT_FOLDER'], image)
    if filepath is None:
        return redirect(url_for('index'))
    if request.method == 'POST':
        utils.delete_images(app.config['OUTPUT_FOLDER_PIPELINES'])
        list_transformations = request.form.get('list_transformations').split(',')
        processing_lib.pipeline(filepath, list_transformations)
    original = ['/' + filepath, image]
    pipelines = utils.get_images(app.config['OUTPUT_FOLDER_PIPELINES'])
    steps_count = utils.count_folders(app.config['OUTPUT_FOLDER_STEPS'])
    for index in range(1, steps_count + 1):
        if next((x for x in pipelines if int(x[1].split('-')[0]) == index), None) is None:
            pipelines.append(('/static/img/fail.gif', '{}-{}'.format(index, str(uuid.uuid4()).split('-')[0])))
    pipelines.sort(key=lambda x: int(x[1].split('-')[0]))
    return render_template('pipeline.html', original=original, pipelines=pipelines)
Ejemplo n.º 28
0
def processing(image):
    filepath = utils.get_filepath(app.config['INPUT_FOLDER'], image)
    if filepath is None:
        return not_found_error()
    utils.delete_images(app.config['OUTPUT_FOLDER'])
    processing_lib.individual(filepath)
    original = ['/' + filepath, image]
    transformations = utils.get_images(app.config['OUTPUT_FOLDER'])
    transformations.sort(key=lambda x: x[1])
    response = jsonify({
        'success': True,
        'original': original,
        'transformations': transformations
    })
    response.status_code = 200
    return response
Ejemplo n.º 29
0
    def get_state_graph_valid_path(assignments_dict,
                                   player_img,
                                   prolog_filename,
                                   answer_set_filename,
                                   save=True):

        # Construct state graph for generated level
        id_metatile_file = "level_saved_files_%s/id_metatile_maps/%s.pickle" % (
            player_img, prolog_filename)
        state_graph = Solver.construct_state_graph(assignments_dict,
                                                   id_metatile_file)
        if save:
            state_graph_file = get_filepath(
                'level_saved_files_%s/enumerated_state_graphs/generated' %
                player_img, '%s.gpickle' % answer_set_filename)
            nx.write_gpickle(state_graph, state_graph_file)

        # Check for valid path from start to goal state
        start_nodes = []
        goal_nodes = []
        for node in state_graph.nodes():
            state = State.from_str(node)
            if state.is_start:
                start_nodes.append(node)
            if state.goal_reached:
                goal_nodes.append(node)
        if len(start_nodes) == 0:
            error_exit("No start states found in generated level state graph")
        if len(goal_nodes) == 0:
            error_exit("No goal states found in generated level state graph")
        for start_node in start_nodes:
            for goal_node in goal_nodes:
                if nx.has_path(state_graph,
                               source=start_node,
                               target=goal_node):
                    return nx.dijkstra_path(state_graph,
                                            source=start_node,
                                            target=goal_node)

        return None
Ejemplo n.º 30
0
def parse_multipart(request: flask.Request) -> Dict:
    """Parses a 'multipart/form-data' upload request
    Args:
        request (flask.Request): The request object.
    Returns:
        Dict containing files and
    """
    formResult = {"files": []}

    # This code will process each non-file field in the form
    data = request.form.to_dict()
    for field in data:
        formResult[field] = data[field]

    # This code will process each file uploaded
    files = request.files.to_dict()
    for item in files.items():
        filename, file = item
        file.save(get_filepath(filename))
        formResult["files"].append(item)

    return formResult
Ejemplo n.º 31
0
def arrange_expenditure_data():
    '''
    the function selects files in the datasets dir, normalize the data and creates new csvs
    from them.
    '''
    def to_include(filename):
        if re.match('10.*Expend.*\d{8}\.csv$', filename):
            return True

    # list all files in datasets dir
    all_files = os.listdir(DATASETS_PATH)
    to_arrange_with_same_logic = filter(to_include, all_files)
    for filename in to_arrange_with_same_logic:
        filepath = get_filepath(filename)
        try:
            df = get_normalized_expenditure_dataframe_for_10(filepath)
        except Exception as e:
            print(filepath)
            raise e

        # save in a file with _copy appended to the original file's name.
        to_file = '{}_copy.csv'.format(filepath.split('.csv')[0])
        df.to_csv(to_file, index=False)