def store_preprocessed_metadata(
        self,
        node_types_number: Optional[int],
        nodes_number: int,
        edge_types_number: Optional[int],
        edges_number: int,
    ):
        """Store the provided metadata.

        Parameters
        --------------------------------
        node_types_number: Optional[int],
            The number of unique node types existing in this graph.
        nodes_number: int,
            The number of unique nodes existing in this graph.
        edge_types_number: Optional[int],
            The number of unique edge types existing in this graph.
        edges_number: int,
            The number of edges existing in this graph.
        """
        compress_json.dump(
            dict(
                node_types_number=node_types_number,
                nodes_number=nodes_number,
                edge_types_number=edge_types_number,
                edges_number=edges_number
            ),
            self.get_preprocessed_graph_metadata_path()
        )
 def save_data(self, compress=False):
     if compress:
         compress_json.dump(self.chile, "./data/chile.json.gz")
         return
     with open("./data/chile.json", "w") as outfile:
         if minify:
             json.dump(self.chile, outfile)
         else:
             json.dump(self.chile, outfile, indent=4)
예제 #3
0
def compress_all_db():
    """
    Iterate through all available databases
    and compress them one by one
    """
    #взять все файлы, которые есть в папке с дб,
    #и всем сделать compress_data, по их порядку создания
    _db_files = sorted(glob.iglob(
        '{}\\jsons\\DB_*.json'.format(db_folder_path)),
                       key=os.path.getctime)
    #iterate through all dbs
    for _n in range(len(_db_files)):
        print('compressing {} db out of {}'.format(_n + 1, len(_db_files)))
        #берем файл, достаем из него инфу и компрессим ее в новый файл
        with open(_db_files[_n], encoding="utf8") as json_file:
            compress_json.dump(json.load(json_file),
                               "{}.gz".format(_db_files[_n]))
예제 #4
0
def test_compress_json():
    D = random_string_dict(10, 10)
    key = sha256(D)
    extensions = compress_json.compress_json._DEFAULT_EXTENSION_MAP.keys()
    for ext in extensions:
        path = f"random_dirs/test.json.{ext}"
        compress_json.dump(D, path)
        assert key == sha256(compress_json.load(path))

    shutil.rmtree("random_dirs")

    for ext in extensions:
        path = f"random_dirs/test.json.{ext}"
        compress_json.local_dump(D, path)
        assert key == sha256(compress_json.local_load(path))

    shutil.rmtree("tests/random_dirs")
예제 #5
0
        def on_exit():
            if establishConnection():
                if __version__ != (version := win.session.get_version()):
                    print(
                        f"You can only update the private database if you're using the latest version! (v{version})"
                    )
                else:
                    print('Updating private database...')
                    subprocess.call(
                        [github_db, '-p', win.database.folder, '-g'],
                        shell=True,
                        stdout=sys.stdout)
                    content = retreive_temp_data(win.database.folder)
                    content.update(win.database.all())
                    win.database.store(content)
                    content = win.database.organise()
                    compress_json.dump(content, win.database.filename)

                    subprocess.call([github_db, '-e', win.database.filename],
                                    shell=True)
                    print('Successfully updated!')
def create_JSON(case):
    """"Create JSON with all Data"""
    # Read CSV
    key_file = "Master_Key/master_key.csv"
    key_df = pd.read_csv(key_file)
    key_df = key_df.sort_values(by=["File"])

    # Remove Unacast
    key_df = key_df[
        key_df["File"] !=
        "../../../covid19demographics-fall/unacast/data/2020-04-07 22:39:47.057978.csv"]

    new_files = vlookup(key_df, case)

    out = {"USA": {}, "Intl.": {}}

    intl_keys = ["Iceland", "SouthKorea", "SouthKoreaCensus"]

    # Add all rows to state key
    for state in new_files:
        international = False
        if state in intl_keys:
            out["Intl."][state] = []
            international = True
        else:
            out["USA"][state] = []
        for dic in new_files[state]:
            for key in dic:
                rows = dic[key].to_dict(orient="records")
                if international:
                    out["Intl."][state].extend(rows)
                else:
                    out["USA"][state].extend(rows)

    now = str(datetime.now())

    # Export JSON - works when running on Andrew's PC
    compress_json.dump(out, "../../../covid19demographics/data/data.json.gz")
예제 #7
0
def demo_pipeline(
    image_path: str,
    output_path: str = None,
    cache: bool = False,
    errors_path: str = None,
):
    """Executes demosaicking pipeline on given image.

    Parameters
    ---------------------------
    image_path: str,
        Path from where to load the given image.
    output_path: str = None,
        Path where to save the demosaicked image.
        Use None to not save the image.
    cache: bool = False,
        Whenever to skip processing an image if it was already processed.
    errors_path: str = None,
        Path where to store the error info.
        Use None to not log the errors and raise them.
    """

    try:
        # Check if we have already this image caches
        if cache and output_path is not None and os.path.exists(output_path):
            # If this is the case we skip this image.
            return None

        # Loading the image.
        original = load_image(image_path)
        # If the user hit a keyboard interrupt we just stop.
    except KeyboardInterrupt as e:
        raise e
    # Otherwise we optionally write to disk to encountered exception.
    except Exception as e:
        if errors_path is None:
            raise e
        os.makedirs(errors_path, exist_ok=True)
        compress_json.dump(
            {
                "image-path": image_path,
                "text": str(e),
                "class": str(type(e)),
                "traceback": " ".join(traceback.format_exc().splitlines())
            },
            "{}/{}.json".format(errors_path,
                                os.path.basename(image_path).split(".")[0]),
            json_kwargs=dict(indent=4))
        return None

    if output_path is not None:
        directory_name = os.path.dirname(output_path)
        os.makedirs(directory_name, exist_ok=True)

    image_demo = demosaicking(original, 'menon')

    if output_path is not None:
        # Saving image to given path
        cv2.imwrite(  # pylint: disable=no-member
            output_path,
            # Resize given image
            image_demo)
    return image_demo
예제 #8
0
                       destinations_column="object",
                       directed=True,
                       edge_types_column="edge_label",
                       node_path="../embiggen/pos_train_nodes.tsv",
                       nodes_column="id",
                       node_types_column="category")
completed_graph = time() - start
start_walk = time()
graph.walk(iterations=10,
           length=80,
           min_length=0,
           return_weight=1,
           explore_weight=1,
           change_node_type_weight=1,
           change_edge_type_weight=1)
delta = time() - start
total_walk_time = time() - start_walk

response = {
    "required_time": delta,
    "human_time": naturaldelta(delta),
    "building_graph_required_time": completed_graph,
    "building_graph_required_human_time": naturaldelta(completed_graph),
    "random_walk_time": total_walk_time,
    "random_walk_human_time": naturaldelta(total_walk_time)
}

print(json.dumps(response, indent=4))

compress_json.dump(response, "time_required.json", json_kwargs={"indent": 4})
예제 #9
0
input_dir: str = "json"
output_dir: str = "mini-json"
log_file: str = "failed-minification.txt"

files: List[str] = os.listdir(input_dir)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

for file in files:
    print(f"Minifying and Compressing {file}")

    try:
        json_file_path: str = os.path.join(input_dir, file)
        mini_file_path: str = os.path.join(output_dir, f"{file}.lzma")

        input_file: TextIO = open(file=json_file_path, mode="r")
        input_contents_list: List[str] = input_file.readlines()
        input_contents: str = "\n".join(input_contents_list)

        input_dict: dict = json.loads(input_contents)

        compress_json.dump(input_dict, mini_file_path)
    except Exception as e:  # JSONDecodeError
        print(f"Failed To Minify {file}!!! Reason: {e}!!! Logging To {log_file}")

        with open(file=log_file, mode="a+") as f:
            f.writelines(f"{file}; {e}\n")
            f.close()
class Session(QObject):

    logger = pyqtSignal(str, dict)
    show = pyqtSignal()
    close = pyqtSignal()

    def __init__(self, base, parent=None):
        super().__init__(parent)
        self.running = False
        self.shownStats = False
        self.ui = base
        self.database = base.database
        self.cache = base.cache

    def actually_start(self):
        asyncio.run(self.start())

    async def start(self):
        """
        Start the Session.
        """
        self.ui.ui.table.clearContents()
        for i in range(6):
            self.ui.ui.table.setItem(0, i, QTableWidgetItem())
            self.ui.ui.table.setItem(1, i, QTableWidgetItem())
        self.running = True
        self.ui.ui.startButton.setEnabled(False)
        self.ui.ui.stopButton.setEnabled(True)

        self.email = self.ui.ui.emailTassomai.text()
        self.password = self.ui.ui.passwordTassomai.text()
        self.maxQuizes = self.ui.ui.maxQuizes.value()
        self.dailyGoal = self.ui.ui.dailyGoal.isChecked()
        self.bonusGoal = self.ui.ui.bonusGoal.isChecked()
        self.row = self.ui.row
        self.correct = 0
        self.incorrect = 0
        self.quizes = 0
        self.until = 0
        self.until_incorrect = self.ui.ui.randomnessAmount.value()

        if '@' not in self.email:
            self.logger.emit('TYPES=[(#c8001a, BOLD), Invalid email.]', {})
            print("Invalid email.")
            return
        if len(self.password) < 1:
            self.logger.emit('TYPES=[(#c8001a, BOLD), Must include password.]',
                             {})
            print("Must include password.")
            return

        self.timer = time.perf_counter()

        self.cache.store({'email': self.email, 'password': self.password})

        def connect():
            self.logger.emit(
                'TYPES=[(#0066cc, BOLD), Establishing a connection...]', {})
            print("Establishing a connection...")
            r = establishConnection()
            if r:
                self.logger.emit(
                    'TYPES=[(#0c5d09, BOLD), Successfully established connection.]',
                    {'newlinesafter': 2})
                print("Successfully established connection.\n")
            else:
                self.logger.emit(
                    'TYPES=[(#c8001a, BOLD), Unable to establish connection.]',
                    {})
                print("Unable to establish connection.")
                time.sleep(0.5)
                connect()

        connect()

        if __version__ != (version := self.get_version()):
            self.logger.emit(
                'TYPES=[(#c8001a, BOLD), Your Tassomai Automation is outdated! '
                ''
                f'Please update to the newest version v{version} for better performance.]',
                {})
            print(
                f"Your Tassomai Automation is outdated! Please update to the newest version v{version} for better performance."
            )
            return

        subprocess.call([github_db, '-p', self.database.folder, '-g'],
                        shell=True,
                        stdout=sys.stdout)
        content = retreive_temp_data(self.database.folder)
        self.database.store(content)
        content = self.database.organise()
        compress_json.dump(content, self.database.filename)

        self.logger.emit(
            'TYPES=[(BOLD, #000000), Successfully updated local database by fetching the Private Answers Database!]',
            {'newlinesafter': 2})
        print(
            "Successfully updated local database by fetching the Private Answers Database!\n"
        )

        if not self.running:
            return

        self.tassomai = Tassomai(self.database.all())
        await self.tassomai.login(self.email, self.password)  # logging in

        if not self.running:
            return

        self.logger.emit(
            'TYPES=[(#0c5d09, BOLD), Successfully logged into Tassomai.]', {})
        print("Successfully logged into Tassomai.")

        await asyncio.sleep(2.00)

        loop = 1
        try:
            for quiz in range(self.maxQuizes):
                if not self.running:
                    subprocess.call(
                        [github_db, '-p', self.database.folder, '-g'],
                        shell=True,
                        stdout=sys.stdout)
                    content = retreive_temp_data(self.database.folder)
                    content.update(self.tassomai.database)
                    database = {
                        key: value
                        for key, value in sorted(content.items(),
                                                 key=lambda item: item[0])
                    }
                    self.database.store(database)
                    return
                if self.tassomai.is_complete and self.dailyGoal:
                    break
                if self.tassomai.is_bonus_complete and self.bonusGoal:
                    break

                self.logger.emit(
                    f'COLOR=(#0c5d09, Starting up quiz {quiz+1}.)',
                    {'newlinesbefore': 1})
                print(f"\nQuiz {quiz+1}")

                time.sleep(0.50)

                self.quiz_data = await self.tassomai.extract_quiz_data()

                title = self.tassomai.title
                self.logger.emit(title, {
                    'color': '#7214ff',
                    'bold': True,
                    'newlinesafter': 2
                })
                print(f"{title}\n")

                quiz_timer = time.perf_counter()

                for index, question in enumerate(self.quiz_data['questions']):
                    force_incorrect = False
                    if self.ui.ui.randomness.isChecked():
                        if self.until == self.until_incorrect:
                            self.until = 0
                            force_incorrect = True
                    question['text'] = question['text'].replace("  ", " ")
                    for ie, ans in enumerate(question['answers']):
                        question['answers'][ie]['text'] = ans['text'].replace(
                            "  ", " ")
                    question_data, database = await self.tassomai.answer_question(
                        Variables(question, force_incorrect))
                    item = self.ui.ui.table.item(self.row, 0)
                    if item is None:
                        for i in range(6):
                            self.ui.ui.table.setItem(self.row, i,
                                                     QTableWidgetItem())
                        item = self.ui.ui.table.item(self.row, 0)
                    item2 = self.ui.ui.table.item(self.row, 1)
                    item3 = self.ui.ui.table.item(self.row, 2)
                    item4 = self.ui.ui.table.item(self.row, 3)
                    item5 = self.ui.ui.table.item(self.row, 4)
                    item6 = self.ui.ui.table.item(self.row, 5)
                    item.setText(str(loop))
                    item2.setText(str(index + 1))
                    item3.setText(question_data['question'])
                    item4.setText(str(question_data['correct']))
                    item5.setText(str(question_data['time']) + 's')
                    item6.setText(' **OR** '.join(question_data['answer']))
                    self.logger.emit(
                        f'Question {index+1}: {question_data["question"]}', {
                            'color': '#0c5d09',
                            'bold': True
                        })
                    self.logger.emit(
                        f'TYPES=[(BOLD, #0066cc), Correct:] TYPES=[(BOLD, '
                        f'{"#0c5d09" if question_data["correct"] else "#c8001a"}), '
                        f'{question_data["correct"]}]', {})
                    self.row += 1
                    self.ui.row = self.row
                    if question_data['correct']:
                        self.correct += 1
                    else:
                        self.incorrect += 1
                    if self.ui.ui.delay.isChecked():
                        if self.ui.ui.whenDelay.currentText() == "question":
                            if self.ui.ui.amountOfDelay.value(
                            ) == 0 and self.ui.ui.amountOfDelay2.value() == 0:
                                rand = round(random.uniform(1, 4), 2)
                            elif self.ui.ui.amountOfDelay.value(
                            ) == self.ui.ui.amountOfDelay2.value():
                                rand = self.ui.ui.amountOfDelay.value()
                            elif self.ui.ui.amountOfDelay.value(
                            ) != self.ui.ui.amountOfDelay2.value():
                                rand = round(
                                    random.uniform(
                                        self.ui.ui.amountOfDelay.value(),
                                        self.ui.ui.amountOfDelay2.value()), 2)
                            item5.setText(
                                f"{float(question_data['time']) + rand}s")
                            await asyncio.sleep(rand)
                    self.until += 1

                end_time = time.perf_counter() - quiz_timer
                print(f"Completed quiz {loop} in {end_time:0.2f}s")
                self.logger.emit(f"Completed quiz {loop} in {end_time:0.2f}s",
                                 {
                                     'color': '#7214ff',
                                     'bold': True
                                 })
                print(len(self.database.all()), "total questions answered!\n")

                loop += 1

                if not self.running:
                    subprocess.call(
                        [github_db, '-p', self.database.folder, '-g'],
                        shell=True,
                        stdout=sys.stdout)
                    content = retreive_temp_data(self.database.folder)
                    content.update(self.tassomai.database)
                    database = {
                        key: value
                        for key, value in sorted(content.items(),
                                                 key=lambda item: item[0])
                    }
                    self.database.store(database)
                    content = self.database.organise()
                    compress_json.dump(content, self.database.filename)
                    return

                self.quizes += 1

                if self.ui.ui.delay.isChecked():
                    if self.ui.ui.whenDelay.currentText() == "quiz":
                        if self.ui.ui.amountOfDelay.value(
                        ) == 0 and self.ui.ui.amountOfDelay2.value() == 0:
                            rand = round(random.uniform(1, 4), 2)
                        elif self.ui.ui.amountOfDelay.value(
                        ) == self.ui.ui.amountOfDelay2.value():
                            rand = self.ui.ui.amountOfDelay.value()
                        elif self.ui.ui.amountOfDelay.value(
                        ) != self.ui.ui.amountOfDelay2.value():
                            rand = round(
                                random.uniform(
                                    self.ui.ui.amountOfDelay.value(),
                                    self.ui.ui.amountOfDelay2.value()), 2)
                        await asyncio.sleep(rand)

        except:
            logging.error(traceback.format_exc())

        subprocess.call([github_db, '-p', self.database.folder, '-g'],
                        shell=True,
                        stdout=sys.stdout)
        content = retreive_temp_data(self.database.folder)
        content.update(self.tassomai.database)
        database = {
            key: value
            for key, value in sorted(content.items(), key=lambda item: item[0])
        }
        self.database.store(database)
        content = self.database.organise()
        compress_json.dump(content, self.database.filename)

        self.shownStats = True
        self.show_stats()

        self.ui.terminate_session()
    def build_all(self):
        """Build graph retrieval methods."""
        target_directory_path = os.path.join(
            "../bindings/python/ensmallen/datasets",
            self.repository_package_name,
        )
        file_path = "{}.py".format(target_directory_path)

        imports = []

        for graph_data_path in tqdm(
            glob(os.path.join(
                os.path.dirname(os.path.abspath(__file__)),
                "graph_repositories",
                self.get_formatted_repository_name(),
                "*.json.gz"
            )),
            desc="Building graph retrieval methods for {}".format(self.name),
            leave=False
        ):
            graph_data = compress_json.load(graph_data_path)
            first_graph_version_data = list(graph_data.values())[0]
            graph_name = first_graph_version_data["graph_name"]
            packages_to_import = self.get_imports(
                graph_name, list(graph_data.keys())[-1])
            if packages_to_import:
                imports.append(packages_to_import)

        imports = list(set(imports))

        first_references = list(compress_json.load(glob(os.path.join(
            os.path.dirname(os.path.abspath(__file__)),
            "graph_repositories",
            self.get_formatted_repository_name(),
            "*.json.gz"
        ))[0]).values())[0]["references"]

        has_unique_references = all(
            list(compress_json.load(path).values())[
                0]["references"] == first_references
            for path in glob(os.path.join(
                os.path.dirname(os.path.abspath(__file__)),
                "graph_repositories",
                self.get_formatted_repository_name(),
                "*.json.gz"
            ))
        ) and first_references

        file = open(file_path, "w")
        file.write("\n".join([
            "\"\"\"Module providing graphs available from {repository_name}.{references}\"\"\"".format(
                repository_name=self.get_formatted_repository_name(),
                references="\n\n{}\n".format(self.format_references(
                    first_references)) if has_unique_references else ""
            ),
            "from ensmallen import Graph  # pylint: disable=import-error",
            self.get_graph_retrieval_import(),
            *imports,
            "",
            ""
        ]))
        graph_repository_metadata = {}
        for graph_data_path in tqdm(
            glob(os.path.join(
                os.path.dirname(os.path.abspath(__file__)),
                "graph_repositories",
                self.get_formatted_repository_name(),
                "*.json.gz"
            )),
            desc="Building graph retrieval methods for {}".format(self.name),
            leave=False,
            dynamic_ncols=True
        ):
            graph_data = compress_json.load(graph_data_path)
            first_graph_version_data = list(graph_data.values())[0]
            graph_name = first_graph_version_data["graph_name"]
            graph_method_name = first_graph_version_data["graph_method_name"]
            graph_retrieval_file = self.format_graph_retrieval_file(
                graph_name=graph_name,
                graph_method_name=graph_method_name,
                references=first_graph_version_data["references"],
                versions=list(graph_data.keys()),
                has_unique_references=has_unique_references
            )
            for value in graph_data.values():
                value.pop("references")
            graph_repository_metadata[graph_method_name] = graph_data
            file.write(graph_retrieval_file)

        file.close()
        compress_json.dump(
            graph_repository_metadata,
            "{}.json.gz".format(target_directory_path)
        )
예제 #12
0
def api_db_load(vend, buy, db_compare=False, compressed=True):
    """
    Make api call, save new db, compare new db with old one and save demand
    Parameters
    ----------
    vend : list
        list that will store vend data.
    buy : list
        list that will store vend data.
    db_compare : TYPE, optional
        if we want to compare all available dbs. The default is False.

    Returns
    -------
    None.

    """
    #находим последнюю сжатую базу
    if compressed:
        db_conv_time = last_db_time_get()
        if not db_compare:
            if convert_time(db_conv_time, 'm') > 9:  #9
                print("Old database...")
                #собираем ссылку
                link = 'https://api.originsro.org/api/v1/market/list?api_key='
                api_key = ''
                #делаем вызов и сохраняем получаемое в переменной
                response = rq.get('{}{}'.format(link, api_key))
                #создаем путь/имя для нашей новой базы
                latest_file = '{}\\jsons\\DB_{}.json.gz'.format(
                    db_folder_path, now.strftime("%d-%m-%Y_%H-%M-%S"))
                #сохраняем нашу новую базу
                compress_json.dump(response.text, latest_file)
                print('compressed db saved')
                #и сразу же загружаем
                #actual_db = compress_json.local_load(latest_file)
                #находим 2 по старости дб (то есть ту, которая была перед нашей только что выкачанной)
                previous_db = sorted(glob.iglob(
                    '{}\\jsons\\DB_*.json.gz'.format(db_folder_path)),
                                     key=os.path.getctime)[-2]
                #распаковываем его
                #previous_db_uncompressed = compress_json.local_load(previous_db)
                #создаем временные листы с данными
                #из прошлой бд
                pre_vendor_shops = []
                pre_buy_shops = []
                get_vend_data(previous_db, pre_vendor_shops, pre_buy_shops)
                #из новой бд
                print(latest_file)
                get_vend_data(latest_file, vend, buy)
                #fill demand file
                compare_data(pre_vendor_shops, vend)
                compare_data(pre_buy_shops, buy)
                print('demand refreshed')
            else:
                print('Database ok...')
                latest_file = max(glob.glob(
                    '{}\\jsons\\DB_*.json.gz'.format(db_folder_path)),
                                  key=os.path.getctime)
                get_vend_data(latest_file, vend, buy)
        else:
            compare_db()
    else:
        #находим самую свежую базу из имеющихся
        latest_file = max(glob.glob(
            '{}\\jsons\\DB_*.json'.format(db_folder_path)),
                          key=os.path.getctime)
        #Загрузка БД и ее обновление
        with open(latest_file, encoding="utf8") as json_file:
            #делаем удобное обозначение той же базы
            api_db = json.load(json_file)['generation_timestamp']
            db_time = datetime.datetime.strptime(api_db,
                                                 "%Y-%m-%dT%H:%M:%S.%fZ")
            if not db_compare:
                #если базе больше 10 минут, надо обновить
                if convert_time(db_time, 'm') > 10:  #9
                    print("Old database...")
                    link = 'https://api.originsro.org/api/v1/market/list?api_key='
                    api_key = 'r3bkd0q8umxhuj75ahtvwpgkd3yzi3rm'
                    response = rq.get('{}{}'.format(link, api_key))
                    latest_file = '{}\\jsons\\DB_{}.json'.format(
                        db_folder_path, now.strftime("%d-%m-%Y_%H-%M-%S"))
                    #создаем новую бд с названием из latest_file
                    with open(latest_file, "w+", encoding="utf8") as new_file:
                        new_file.write(response.text)
                    #создаем новую, сжатую, бд
                    #compress_json.dump(response.text, latest_file)
                    #находим второй по новости файл, то есть прошлую бд
                    previous_db = sorted(glob.iglob(
                        '{}\\jsons\\DB_*.json'.format(db_folder_path)),
                                         key=os.path.getctime)[-2]
                    #создаем временные листы с данными
                    #из прошлой бд
                    pre_vendor_shops = []
                    pre_buy_shops = []
                    get_vend_data(previous_db, pre_vendor_shops, pre_buy_shops)
                    #из новой бд
                    get_vend_data(latest_file, vend, buy)
                    #fill demand file
                    compare_data(pre_vendor_shops, vend)
                    compare_data(pre_buy_shops, buy)
                else:
                    print('Database ok...')
                    get_vend_data(latest_file, vend, buy)
            else:
                compare_db()
예제 #13
0
 def _write_compressed_json(self, filename, content):
     compress_json.dump(
         content,
         os.path.join(self._destination_folder, (filename + ".json.gz")))
def reference_path_example(mode):
    """
    Saves a video of a shortest path follower agent navigating from a start
    position to a goal. Agent follows the ground truth reference path by
    navigating to intermediate viewpoints en route to goal.
    Args:
        mode: 'geodesic_path' or 'greedy'
    """

    show_waypoint_indicators = False
    config = habitat.get_config(
        config_paths="configs/test/habitat_r2r_vln_test.yaml")
    config.defrost()
    config.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
    config.TASK.SENSORS.append("HEADING_SENSOR")
    config.freeze()

    split = 'train'
    vln_data_path = '/home/mirshad7/habitat-lab/data/datasets/vln/mp3d/r2r/v1/' + split + '/' + split + '.json.gz'
    with gzip.open(vln_data_path, "rt") as f:
        deserialized = json.loads(f.read())

    val_ids = {}
    for i in range(len(deserialized['episodes'])):
        val_ids[deserialized['episodes'][i]['episode_id']] = i

    new_data_dict = {}
    new_data_dict['episodes'] = {}
    new_data_dict['instruction_vocab'] = deserialized['instruction_vocab']
    new_data_list = []
    save_fig = False
    steps_dict = {}

    with SimpleRLEnv(config=config) as env:
        print("Environment creation successful")

        # obj_attr_mgr = env.habitat_env._sim.get_object_template_manager()
        # remove_all_objects(env.habitat_env._sim)
        sim_time = 30  # @param {type:"integer"}
        continuous_nav = True  # @param {type:"boolean"}
        if continuous_nav:
            control_frequency = 10  # @param {type:"slider", min:1, max:30, step:1}
            frame_skip = 6  # @param {type:"slider", min:1, max:30, step:1}
        fps = control_frequency * frame_skip
        print("fps = " + str(fps))
        control_sequence = []
        for action in range(int(sim_time * control_frequency)):
            if continuous_nav:
                # allow forward velocity and y rotation to vary
                control_sequence.append({
                    "forward_velocity":
                    random.random() * 2.0,  # [0,2)
                    "rotation_velocity":
                    (random.random() - 0.5) * 2.0,  # [-1,1)
                })
            else:
                control_sequence.append(random.choice(action_names))

                # create and configure a new VelocityControl structure
        vel_control = habitat_sim.physics.VelocityControl()
        vel_control.controlling_lin_vel = True
        vel_control.lin_vel_is_local = True
        vel_control.controlling_ang_vel = True
        vel_control.ang_vel_is_local = True

        vis_ids = []

        collided_trajectories = []
        trajectory_without_collision = True

        for episode in range(len(deserialized['episodes'])):
            counter = 0
            env.reset()
            episode_id = env.habitat_env.current_episode.episode_id
            print(
                f"Agent stepping around inside environment. Episode id: {episode_id}"
            )

            dirname = os.path.join(IMAGE_DIR, "vln_reference_path_example",
                                   mode, "%02d" % episode)
            if os.path.exists(dirname):
                shutil.rmtree(dirname)
            os.makedirs(dirname)

            images = []
            steps = 0
            reference_path = env.habitat_env.current_episode.reference_path + [
                env.habitat_env.current_episode.goals[0].position
            ]

            # manually control the object's kinematic state via velocity integration
            time_step = 1.0 / (30)

            x = []
            y = []
            yaw = []
            vel = []
            omega = []

            continuous_path_follower = ContinuousPathFollower(
                env.habitat_env._sim, reference_path, waypoint_threshold=0.4)
            max_time = 30.0
            done = False
            EPS = 1e-4
            prev_pos = np.linalg.norm(
                env.habitat_env._sim.get_agent_state().position)
            if show_waypoint_indicators:
                for id in vis_ids:
                    sim.remove_object(id)
                vis_ids = setup_path_visualization(env.habitat_env._sim,
                                                   continuous_path_follower)

            while continuous_path_follower.progress < 1.0:
                # print("done",done)
                if done:
                    break
                if counter == 150:
                    counter = 0
                    collided_trajectories.append(
                        env.habitat_env.current_episode.episode_id)
                    trajectory_without_collision = False
                    break
                continuous_path_follower.update_waypoint()

                if show_waypoint_indicators:
                    sim.set_translation(continuous_path_follower.waypoint,
                                        vis_ids[0])

                agent_state = env.habitat_env._sim.get_agent_state()
                pos = np.linalg.norm(
                    env.habitat_env._sim.get_agent_state().position)

                if abs(pos - prev_pos) < EPS:
                    counter += 1
                previous_rigid_state = habitat_sim.RigidState(
                    quat_to_magnum(agent_state.rotation), agent_state.position)

                v, w = track_waypoint(
                    continuous_path_follower.waypoint,
                    previous_rigid_state,
                    vel_control,
                    dt=time_step,
                )

                observations, reward, done, info = env.step(vel_control)
                # print(observations)
                # save_map(observations, info, images)
                prev_pos = pos
                x.append(env.habitat_env._sim.get_agent_state().position[0])
                y.append(-env.habitat_env._sim.get_agent_state().position[2])
                yaw.append(
                    quaternion.as_euler_angles(
                        env.habitat_env._sim.get_agent_state().rotation)[1])
                vel.append(v)
                omega.append(w)
                steps += 1

            if save_fig:  # pragma: no cover
                plt.close()
                plt.subplots(1)
                plt.plot(x, y, "xb", label="input")
                plt.grid(True)
                plt.axis("equal")
                plt.xlabel("x[m]")
                plt.ylabel("y[m]")
                plt.legend()
                pose_title = dirname + 'pose.png'
                plt.savefig(pose_title)

                plt.subplots(1)
                plt.plot(yaw, "-r", label="yaw")
                plt.grid(True)
                plt.legend()
                plt.xlabel("line length[m]")
                plt.ylabel("yaw angle[deg]")
                yaw_title = dirname + 'yaw.png'
                plt.savefig(yaw_title)

                plt.subplots(1)
                plt.plot(vel, "-r", label="vel")
                plt.grid(True)
                plt.legend()
                plt.xlabel("line length[m]")
                plt.ylabel("omega_reference [rad/s^2]")
                vel_title = dirname + 'vel.png'
                plt.savefig(vel_title)

                plt.subplots(1)
                plt.plot(omega, "-r", label="v_reference")
                plt.grid(True)
                plt.legend()
                plt.xlabel("line length[m]")
                plt.ylabel("v_reference [m/s]")
                omega_title = dirname + 'omega.png'
                plt.savefig(omega_title)

                x = []
                y = []
                yaw = []
                vel = []
                omega = []

            if trajectory_without_collision:
                ids = val_ids[episode_id]
                single_data_dict = deserialized['episodes'][ids]
                new_data_list.append(single_data_dict)
            trajectory_without_collision = True
            print(f"Navigated to goal in {steps} steps.")
            steps_dict[episode] = steps
            # images_to_video(images, dirname, str(episode_id), fps = int (1.0/time_step))
            images = []

        steps_path = '/home/mirshad7/habitat-lab/train_steps.json.gz'
        # new_data_dict['episodes'] = new_data_list
        # path = '/home/mirshad7/habitat-lab/data/datasets/vln/mp3d/r2r/robo_vln/train/train.json.gz'
        # compress_json.dump(new_data_dict, path)
        compress_json.dump(steps_dict, steps_path)
        print("collided trajectories:", collided_trajectories)
예제 #15
0
@app.route('/', methods=['GET', 'POST'])
def home():
    global result
    global count
    global search_word
    if request.method == 'POST':
        search_word = request.form["search bar"]
        result = search(search_word)
        count = 1
        return redirect(url_for("result_found"))
    else:
        return render_template("search.html")


if __name__ == '__main__':
    if not (os.path.isfile('comp_data.json.gz')
            and os.access('comp_data.json.gz', os.R_OK)):
        if not (os.path.isfile('data.txt') and os.access('data.txt', os.R_OK)):
            with open("data.txt", "w", encoding='utf8') as jsonfile:
                open_files_and_collect_data()
                collect_yap_data()
                json.dump(data, jsonfile, ensure_ascii=False)

        with open('data.txt', encoding='utf8') as data_json:
            data_load = json.load(data_json)
            compress_json.dump(data_load, "comp_data.json.gz")

    data = compress_json.load("comp_data.json.gz")
    app.run()
예제 #16
0
def image_pipeline(
    image_path: str,
    output_path: str = None,
    blur_bbox_padding: int = 50,
    width: int = 480,
    initial_width: int = 1024,
    thumbnail_width: int = 256,
    hardness: float = 0.7,
    save_steps: bool = False,
    cache: bool = False,
    errors_path: str = None,
):
    """Executes complete pipeline on given image.

    Parameters
    ---------------------------
    image_path: str,
        Path from where to load the given image.
    output_path: str = None,
        Path where to save the processed image.
        Use None to not save the image.
    blur_bbox_padding: int = 50,
        The padding to use around the blur bbox cut.
    width: int = 480,
        The size to resize the image.
    initial_width: int = 1024,
        The initial resize width.
    thumbnail_width: int = 256,
        Width to use for the thumbnails during processing.
    hardness: float = 0.7,
        Hardness to use for the body cut.
    save_steps: bool = False,
        Wethever to save the partial steps instead othe processed image.
        This option is useful to debug which parameters are to blaim for
        unexpected pipeline behaviour.
        By default, this is False.
    cache: bool = False,
        Wethever to skip processing an image if it was already processed.
    errors_path: str = None,
        Path where to store the error info.
        Use None to not log the errors and raise them.
    """
    try:
        # Check if we have already this image caches
        if cache and output_path is not None and os.path.exists(output_path):
            # If this is the case we skip this image.
            return None

        # Loading the image.
        original = load_image(image_path)

        # Executes perspective correction
        image_perspective = perspective_correction(original)

        # Computing thumbnail
        image_perspective = get_thumbnail(image_perspective,
                                          width=initial_width)

        # Executes blur bbox cut
        image_bbox = blur_bbox(image_perspective, padding=blur_bbox_padding)

        # Determines optimal counter rotation
        image_rotated, angle, x = counter_rotate(image_bbox,
                                                 width=thumbnail_width)

        # Cuts the body lower part
        image_body_cut, _ = get_body_cut(image_bbox,
                                         image_rotated,
                                         angle,
                                         simmetry_axis=x,
                                         width=thumbnail_width,
                                         hardness=hardness)

        # Executes secondary blur bbox cut
        image_body_cut = blur_bbox(image_body_cut, padding=blur_bbox_padding)

        # Final result
        final_result = cv2.cvtColor(  # pylint: disable=no-member
            ace(
                cv2.cvtColor(  # pylint: disable=no-member
                    image_body_cut,
                    cv2.COLOR_GRAY2RGB  # pylint: disable=no-member
                ),
                slope=10),
            cv2.COLOR_RGB2GRAY  # pylint: disable=no-member
        )
    # If the user hit a keyboard interrupt we just stop.
    except KeyboardInterrupt as e:
        raise e
    # Otherwise we optionally write to disk to encountered exception.
    except Exception as e:
        if errors_path is None:
            raise e
        os.makedirs(errors_path, exist_ok=True)
        compress_json.dump(
            {
                "image-path": image_path,
                "text": str(e),
                "class": str(type(e)),
                "traceback": " ".join(traceback.format_exc().splitlines())
            },
            "{}/{}.json".format(errors_path,
                                os.path.basename(image_path).split(".")[0]),
            json_kwargs=dict(indent=4))
        return None

    if output_path is not None:
        directory_name = os.path.dirname(output_path)
        os.makedirs(directory_name, exist_ok=True)

    thumb = get_thumbnail(image_body_cut, width=width)

    if not save_steps:
        if output_path is not None:
            # Saving image to given path
            cv2.imwrite(  # pylint: disable=no-member
                output_path,
                # Resize given image
                thumb)
        return thumb

    fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(15, 10))
    axes = axes.ravel()

    axes[0].imshow(original, cmap="gray")
    axes[0].set_title("Original image")

    axes[1].imshow(image_perspective, cmap="gray")
    axes[1].set_title("Perspective correction")

    axes[2].imshow(image_bbox, cmap="gray")
    axes[2].set_title("Blur BBox image")

    axes[3].imshow(image_rotated, cmap="gray")
    axes[3].set_title("Rotated image")

    axes[4].imshow(image_body_cut, cmap="gray")
    axes[4].set_title("Body cut image")

    axes[5].imshow(final_result, cmap="gray")
    axes[5].set_title("ACE")

    [ax.set_axis_off() for ax in axes.ravel()]
    fig.tight_layout()
    fig.savefig(output_path)
    plt.close(fig)
예제 #17
0
from typing import List, TextIO

input_dir: str = "mini-json"
output_dir: str = "decompressed-json"
log_file: str = "failed-decompression.txt"

files: List[str] = os.listdir(input_dir)

if not os.path.exists(output_dir):
    os.mkdir(output_dir)

for file in files:
    print(f"Decompressing {file}")

    try:
        mini_json_file_path: str = os.path.join(input_dir, file)
        decompressed_file_path: str = os.path.join(output_dir, f"{file[:-5]}")

        input_dict: dict = compress_json.load(mini_json_file_path)

        compress_json.dump(input_dict, decompressed_file_path)
    except Exception as e:  # JSONDecodeError
        print(
            f"Failed To Decompress {file}!!! Reason: {e}!!! Logging To {log_file}"
        )

        with open(file=log_file, mode="a+") as f:
            f.writelines(f"{file}; {e}\n")
            f.close()
예제 #18
0
def sanitize_graph(graph_data: str, root: str):
    """Convert all the graphs to a standard format.

    Parameters
    ----------
    graph_data: List[Dict],
        Informations of the graph to sanitize
    root: str,
        The working folder. All the files will be read and written from here.
    """
    kwargs = graph_data["loading_settings"]

    kwargs["edge_path"] = os.path.join(root, graph_data["folder_name"],
                                       graph_data["edge_file"])

    kwargs.setdefault("directed", False)

    directed_dst_path = os.path.join(root, graph_data["folder_name"],
                                     "directed_sanitized.tsv")

    undirected_dst_path = os.path.join(root, graph_data["folder_name"],
                                       "undirected_sanitized.tsv")

    report_path = os.path.join(root, graph_data["folder_name"], "report.json")

    textual_report_path = os.path.join(root, graph_data["folder_name"],
                                       "report.txt")

    if all(
            os.path.exists(p) for p in (directed_dst_path, undirected_dst_path,
                                        report_path, textual_report_path)):
        return

    logger.info("Loading the file %s" % kwargs["edge_path"])
    graph: EnsmallenGraph = EnsmallenGraph.from_unsorted_csv(
        **kwargs, name=graph_data["graph"])
    logger.info("Enabling fast version")
    graph.enable_fast_walk()
    logger.info("Computing metadata")
    if not os.path.exists(report_path):
        logger.info("Computing JSON report")
        report = graph.report()
        compress_json.dump(report, report_path)
    if not os.path.exists(textual_report_path):
        logger.info("Computing textual report")
        textual_report = str(graph)
        with open(textual_report_path, "w") as f:
            f.write(textual_report)

    if not os.path.exists(undirected_dst_path):
        logger.info("Writing the file {}".format(undirected_dst_path))
        graph.dump_edges(
            path=undirected_dst_path,
            header=False,
            sources_column_number=0,
            destinations_column_number=1,
            weights_column_number=2,
            numeric_node_ids=True,
            # We dump with directed=True for the undirected file to have in the file the bidirectional edges.
            directed=True)
    if not os.path.exists(directed_dst_path):
        logger.info("Writing the file {}".format(directed_dst_path))
        graph.dump_edges(
            path=directed_dst_path,
            header=False,
            sources_column_number=0,
            destinations_column_number=1,
            weights_column_number=2,
            numeric_node_ids=True,
            # We dump with directed=False for the directed file to have no doubled bidirectional edge in the write out.
            directed=False)