def sync_dataset(DATASET_URL=DATASET_URL):
    sys.stdout.write("\n> Syncing files at: https://data.world/" +
                     DATASET_URL + " -> ")
    with Spinner():
        api_client = dw.api_client()
        api_client.sync_files(DATASET_URL)
        print("\n")
Beispiel #2
0
def import_from_Browser(BrowserName, db_path, loader, mute = False):
    """
        Wrapper function that calls and handles the appropriate Bookmarks Loader
        for the given browser.
        Usage includes being passed as an encoder for ChipOps.dfh.JSON.storeObjects()

        # Parameters
        `BrowserName` : Name of the Browser ->  _(str)_
        `db_path` : Path to the Bookmarks' Database of the browser ->  _(str)_
        `loader` : Function handling the importing for the specific case ->  _(function)_

        # Returns
        `Chip` : Object of dataChip.Chip class containing bookmark name, url and
                 folder structure data

        # Examples
        >>>>>> JSON.storeObjects("data/chipsTEST.json", import_from_Browser("Google Chrome", __GC_DB, jsonMiner))

    """
    try:
        db_path = os.path.expanduser(db_path)
        if not os.path.exists(db_path):
            raise FileNotFoundError

        if not mute: sys.stdout.write("\n> Importing bookmarks from {} -> ".format(BrowserName))
        with Spinner(mute = mute):
            for bookmark in loader(db_path):
                yield bookmark

    except Exception as e:
        print("\nUnable to import bookmarks from {}".format(BrowserName))
        raise e
Beispiel #3
0
def generateImportsfromExports():
    """
        Generator for iterating through combined exports from browsers and medium files

        Yields:
        `dataChip.Primitive_Bookmark`
    """
    sys.stdout.write(
        "\n> Auto-Importing bookmarks: "
    )  # TODO: Add the config setup check before this for paths
    with Spinner():
        b_1, c_1 = GET_BrowserExports(buku=True)
        b_2, c_2 = GET_MediumExports()
    print(c_1 + c_2, "objects imported")

    b_1.extend(b_2)
    yield from b_1
Beispiel #4
0
def generateChipImports():
    """
        Generator for iterating through imports from browsers and medium files

        Yields:
        `dataChip.Chip`
    """
    import parser_Browser as BP  # import needs to be removed otherwise
    # if parser_Browser.py is to be run alone

    sys.stdout.write(
        "\n> Auto-Importing bookmarks: "
    )  # TODO: Add the config setup check before this for paths
    with Spinner():
        if (__IMPORT_GC):
            try:
                for chips in BP.import_from_Browser("Google Chrome",
                                                    __GC_DB,
                                                    BP.jsonMiner,
                                                    mute=True):
                    yield chips
            except FileNotFoundError as e:
                print("Issue Encountered: ", e, " > ", __GC_DB)

        if (__IMPORT_CR):
            try:
                for chips in BP.import_from_Browser("Chromium",
                                                    __CR_DB,
                                                    BP.jsonMiner,
                                                    mute=True):
                    yield chips
            except FileNotFoundError as e:
                print("Issue Encountered: ", e, " > ", __CR_DB)

        if (__IMPORT_MEDIUM):
            try:
                mediumB, _ = GET_MediumExports()
                for p_bm in mediumB:
                    yield Chip(p_bm)
            except FileNotFoundError as e:
                print("Issue Encountered: ", e, " > ", MEDIUM_DIR)
Beispiel #5
0
def main():
    # create arg parse instance
    parser = argparse.ArgumentParser()
    parser.add_argument('FILE',
                        type=str,
                        help='ciphertext which is used to derive the key')
    parser.add_argument('REFERENCE_DATA',
                        type=str,
                        help='reference data to analyze the ciphertext')

    # parse arguments
    args = parser.parse_args()

    # read file
    ciphertext = IfStream(args.FILE)
    refData = args.REFERENCE_DATA

    # derive key from ciphertext
    with Spinner('Analyzing cipher '):
        key = Cipher.deriveKey(ciphertext.data, refData)

    # print key to stdout
    print(f"The key is: {key}")
def fetch_dataset(DATASET_URL=DATASET_URL):
    """
        Fetchs the data.world dataset from the given url path using dw.load_dataset()

        The load_dataset() function facilitates maintaining copies of datasets on the
        local filesystem. It will download a given dataset's datapackage and store it
        under ~/.dw/cache. When used subsequently, load_dataset() will use the copy
        stored on disk and will work offline, unless it's called with force_update=True
        or auto_update=True.

        force_update=True will overwrite your local copy unconditionally.
        auto_update=True will only overwrite your local copy if a newer version of the dataset is available on data.world.

        Returns
        -------
        `datadotworld.models.dataset.LocalDataset` object

    """
    sys.stdout.write("\n> Fetching bookmarks from: https://data.world/" +
                     DATASET_URL + " -> ")
    with Spinner():
        dataset = dw.load_dataset(DATASET_URL, auto_update=True)
        print("\n")

    if args.verbose:
        colorama.init(autoreset=True)
        print(
            colorama.Fore.BLACK + colorama.Back.YELLOW +
            "\n Local Dataset Info: " + "---" * 23, "\n")

        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(dataset.describe())
        print("\n", dataset.dataframes)

        print(colorama.Fore.BLACK + colorama.Back.YELLOW + "\n" + "---" * 30)

    return dataset
Beispiel #7
0
def run(ERASE_BEFORE_INIT=False,
        EXPORT_FILE_PATH="./temp/browserExport.html",
        mute=False):
    try:
        output = check_output(
            ["buku", "-v"], stderr=PIPE
        )  # Avoid using shell=True for security issues, however it's safe for use here though, cause no user input is used.
        if output: output = output.decode("ascii").strip()
        if not mute: print("Buku version(" + output + ") Detected!")

        if (ERASE_BEFORE_INIT == True):
            print("\n Erasing Buku Database before Export Initialization")
            call("expect ./bukuOps/bukuErase.sh", shell=True)

        if not mute:
            sys.stdout.write(
                "\n> Auto-Importing bookmarks from all available browsers: ")
            with Spinner():
                call("expect ./bukuOps/bukuAI.sh", shell=True, stdout=DEVNULL)
        else:
            call("expect ./bukuOps/bukuAI.sh", shell=True, stdout=DEVNULL)

        if os.path.exists(EXPORT_FILE_PATH):
            os.remove(EXPORT_FILE_PATH)
        out = check_output(["buku", "-e", EXPORT_FILE_PATH])
        if out: out = out.decode("ascii").strip()

        if not mute:
            print("\n\t Buku Status:", out)
            print("\n")

    except subprocess.CalledProcessError as e:
        print("\'Buku\' Not Found!")
        print(
            "BMM uses Buku as a temporary backend tool for interacting with your browser..."
        )
        print("Please install Buku through: https://github.com/jarun/Buku\n")
Beispiel #8
0
            node = tree.get_node('->'.join(foldersStack))
            if node.data is None:
                node.data = []
            node.data.append(chip.ID)
            yield chip



if __name__ == '__main__':
    colorama.init(autoreset = True)
    print(colorama.Fore.WHITE + colorama.Back.RED + 'Warning! This script is to be run internally by ' + PROJECT_NAME + ' scripts, direct use might lead to unexpected behaviour\n')

    JSON.storeObjects("data/chipsTEST.json", import_from_Browser("Google Chrome", __GC_DB, jsonMiner))

    sys.stdout.write("\n> Fetching bookmarks from local file system -> ")
    with Spinner():
        print("\nObjects loaded: ", len(JSON.loadObjects("data/chipsTEST.json")))

    print("\n\n")
    tree.show(line_type="ascii-em")

    TreeOctane.to_JSON()