コード例 #1
0
ファイル: backend.py プロジェクト: lliurex/lliuwin
 def run_previous_uninstaller(self):
     if not self.info.previous_uninstaller_path \
     or not os.path.isfile(self.info.previous_uninstaller_path):
         return
     previous_uninstaller = self.info.previous_uninstaller_path.lower()
     uninstaller = self.info.previous_uninstaller_path
     command = [uninstaller, "--uninstall"]
     # Propagate noninteractive mode to the uninstaller
     if self.info.non_interactive:
         command.append("--noninteractive")
     if 0 and previous_uninstaller.lower() == self.info.original_exe.lower():
         # This block is disabled as the functionality is achived via pylauncher
         if self.info.original_exe.lower().startswith(self.info.previous_target_dir.lower()):
             log.debug("Copying uninstaller to a temp directory, so that we can delete the containing directory")
             uninstaller = tempfile.NamedTemporaryFile()
             uninstaller.close()
             uninstaller = uninstaller.name
             copy_file(self.info.previous_uninstaller_path, uninstaller)
         log.info("Launching asynchronously previous uninstaller %s" % uninstaller)
         run_nonblocking_command(command, show_window=True)
         return True
     elif get_file_hash(self.info.original_exe) == get_file_hash(self.info.previous_uninstaller_path):
         log.info("This is the uninstaller running")
     else:
         log.info("Launching previous uninestaller %s" % uninstaller)
         subprocess.call(command)
         # Note: the uninstaller is now non-blocking so we can just as well quit this running version
         # TBD: make this call synchronous by waiting for the children process of the uninstaller
         self.application.quit()
         return True
コード例 #2
0
ファイル: backend.py プロジェクト: hakuna-m/wubiuefi
 def run_previous_uninstaller(self):
     if not self.info.previous_uninstaller_path \
     or not os.path.isfile(self.info.previous_uninstaller_path):
         return
     previous_uninstaller = self.info.previous_uninstaller_path.lower()
     uninstaller = self.info.previous_uninstaller_path
     command = [uninstaller, "--uninstall"]
     # Propagate noninteractive mode to the uninstaller
     if self.info.non_interactive:
         command.append("--noninteractive")
     if 0 and previous_uninstaller.lower() == self.info.original_exe.lower():
         # This block is disabled as the functionality is achived via pylauncher
         if self.info.original_exe.lower().startswith(self.info.previous_target_dir.lower()):
             log.debug("Copying uninstaller to a temp directory, so that we can delete the containing directory")
             uninstaller = tempfile.NamedTemporaryFile()
             uninstaller.close()
             uninstaller = uninstaller.name
             copy_file(self.info.previous_uninstaller_path, uninstaller)
         log.info("Launching asynchronously previous uninstaller %s" % uninstaller)
         run_nonblocking_command(command, show_window=True)
         return True
     elif get_file_hash(self.info.original_exe) == get_file_hash(self.info.previous_uninstaller_path):
         log.info("This is the uninstaller running")
     else:
         log.info("Launching previous uninestaller %s" % uninstaller)
         subprocess.call(command)
         # Note: the uninstaller is now non-blocking so we can just as well quit this running version
         # TBD: make this call synchronous by waiting for the children process of the uninstaller
         self.application.quit()
         return True
コード例 #3
0
ファイル: backend.py プロジェクト: lliurex/lliuwin
 def check_metalink(self, metalink, base_url, associated_task=None):
     if self.info.skip_md5_check:
         return True
     url = base_url +"/" + self.info.distro.metalink_md5sums
     metalink_md5sums = downloader.download(url, self.info.install_dir, web_proxy=self.info.web_proxy)
     url = base_url +"/" + self.info.distro.metalink_md5sums_signature
     metalink_md5sums_signature = downloader.download(url, self.info.install_dir, web_proxy=self.info.web_proxy)
     if not verify_gpg_signature(metalink_md5sums, metalink_md5sums_signature, self.info.trusted_keys):
         log.error("Could not verify signature for metalink md5sums")
         return False
     md5sums = read_file(metalink_md5sums)
     log.debug("metalink md5sums:\n%s" % md5sums)
     md5sums = dict([reversed(line.split()) for line in md5sums.replace('*','').split('\n') if line])
     hashsum = md5sums.get(os.path.basename(metalink))
     if not hashsum:
         log.error("Could not find %s in metalink md5sums)" % os.path.basename(metalink))
         return False
     hash_len = len(hashsum)*4
     if hash_len == 160:
         hash_name = 'sha1'
     elif hash_len in [224, 256, 384, 512]:
         hash_name = 'sha' + str(hash_len)
     else:
         hash_name = 'md5'
     hashsum2 = get_file_hash(metalink, hash_name)
     if hashsum != hashsum2:
         log.error("The %s of the metalink does not match (%s != %s)" % (hash_name, hashsum, hashsum2))
         return False
     return True
コード例 #4
0
ファイル: core.py プロジェクト: spdx/spdx-npm-build-tool
 def set_creation_info(self):
     ext_doc_ref = ExternalDocumentRef(
         self.code_extra_params["ext_doc_ref"],
         self.code_extra_params["tool_version"],
         Algorithm("SHA1", utils.get_file_hash(self.full_file_path or "")),
     )
     self.spdx_document.add_ext_document_reference(ext_doc_ref)
     self.spdx_document.comment = self.code_extra_params["notice"]
     if self.doc_type == utils.TAG_VALUE:
         self.spdx_document.creation_info.add_creator(
             Tool(
                 self.code_extra_params["tool_name"]
                 + " "
                 + self.code_extra_params["tool_version"]
             )
         )
         self.spdx_document.namespace = self.code_extra_params["notice"]
         self.spdx_document.name = self.code_extra_params["notice"]
     else:
         self.spdx_document.creation_info.add_creator(
             Tool(
                 self.code_extra_params["tool_name_rdf"]
                 + "."
                 + self.code_extra_params["tool_version"]
             )
         )
         self.spdx_document.namespace = self.code_extra_params["tool_name_rdf"]
         self.spdx_document.name = self.code_extra_params["tool_name_rdf"]
     self.spdx_document.creation_info.set_created_now()
     self.spdx_document.creation_info.comment = self.code_extra_params[
         "creator_comment"
     ]
     self.spdx_document.spdx_id = self.code_extra_params["doc_ref"]
コード例 #5
0
ファイル: backend.py プロジェクト: hakuna-m/wubiuefi
 def check_metalink(self, metalink, base_url, associated_task=None):
     if self.info.skip_md5_check:
         return True
     url = base_url +"/" + self.info.distro.metalink_md5sums
     metalink_md5sums = downloader.download(url, self.info.install_dir, web_proxy=self.info.web_proxy)
     url = base_url +"/" + self.info.distro.metalink_md5sums_signature
     metalink_md5sums_signature = downloader.download(url, self.info.install_dir, web_proxy=self.info.web_proxy)
     if not verify_gpg_signature(metalink_md5sums, metalink_md5sums_signature, self.info.trusted_keys):
         log.error("Could not verify signature for metalink md5sums")
         return False
     md5sums = read_file(metalink_md5sums)
     log.debug("metalink md5sums:\n%s" % md5sums)
     md5sums = dict([reversed(line.split()) for line in md5sums.replace('*','').split('\n') if line])
     hashsum = md5sums.get(os.path.basename(metalink))
     if not hashsum:
         log.error("Could not find %s in metalink md5sums)" % os.path.basename(metalink))
         return False
     hash_len = len(hashsum)*4
     if hash_len == 160:
         hash_name = 'sha1'
     elif hash_len in [224, 256, 384, 512]:
         hash_name = 'sha' + str(hash_len)
     else:
         hash_name = 'md5'
     hashsum2 = get_file_hash(metalink, hash_name)
     if hashsum != hashsum2:
         log.error("The %s of the metalink does not match (%s != %s)" % (hash_name, hashsum, hashsum2))
         return False
     return True
コード例 #6
0
 def save(self):
     file = self.cleaned_data['image']
     file_hash = utils.get_file_hash(file)
     file.name = ''.join((file_hash, '.', file.image.format.lower()))
     name = os.path.join('thomas', 'images', str(self.user.pk), file.name)
     name = default_storage.save(name, file)
     file.url = default_storage.url(name)
     return file
コード例 #7
0
ファイル: core.py プロジェクト: spdx/spdx-npm-build-tool
 def get_package_verification_code(self):
     verificationcode = 0
     filelist = ""
     templist = []
     for item in self.id_scan_results:
         if not utils.should_skip_file(item["FileName"], self.output_file_name):
             templist.append(utils.get_file_hash(item["FileName"]))
     # sort the sha values
     templist.sort()
     for item in templist:
         filelist = "{0}{1}".format(filelist, item)
     verificationcode = hashlib.sha1(filelist.encode())
     return verificationcode.hexdigest()
コード例 #8
0
def process_file(file_path):
    utils.file_exists(file_path)
    filename, mod_time = utils.get_file_metadata(file_path)
    file_hash = utils.get_file_hash(filename, mod_time)
    if data.logic.file_already_proccessed(file_hash):
        log.info('The file {} has been already processed')
    else:
        log.info('Start processing {}'.format(filename))
        data.logic.create_file_processed(file_hash, filename, mod_time)
        input_records = utils.parse_csv(file_path)
        records_with_candidates = data.logic.infer_candidates(input_records)
        process_rows(file_hash, records_with_candidates)
        log.info('Finished processing {}'.format(filename))
コード例 #9
0
ファイル: backend.py プロジェクト: lliurex/lliuwin
 def check_file(self, file_path, relpath, md5sums, associated_task=None):
     log.debug("  checking %s" % file_path)
     if associated_task:
         associated_task.description = _("Checking %s") % file_path
     relpath = relpath.replace("\\", "/")
     md5line = find_line_in_file(md5sums, "./%s" % relpath, endswith=True)
     if not md5line:
         raise Exception("Cannot find md5 in %s for %s" % (md5sums, relpath))
     reference_hash = md5line.split()[0]
     hash_len = len(reference_hash)*4
     if hash_len == 160:
         hash_name = 'sha1'
     elif hash_len in [224, 256, 384, 512]:
         hash_name = 'sha' + str(hash_len)
     else:
         hash_name = 'md5'
     hash_file = get_file_hash(file_path, hash_name, associated_task)
     log.debug("  %s %s = %s %s %s" % (file_path, hash_name, hash_file, hash_file == reference_hash and "==" or "!=", reference_hash))
     return hash_file == reference_hash
コード例 #10
0
ファイル: backend.py プロジェクト: hakuna-m/wubiuefi
 def check_file(self, file_path, relpath, md5sums, associated_task=None):
     log.debug("  checking %s" % file_path)
     if associated_task:
         associated_task.description = _("Checking %s") % file_path
     relpath = relpath.replace("\\", "/")
     md5line = find_line_in_file(md5sums, "./%s" % relpath, endswith=True)
     if not md5line:
         raise Exception("Cannot find md5 in %s for %s" % (md5sums, relpath))
     reference_hash = md5line.split()[0]
     hash_len = len(reference_hash)*4
     if hash_len == 160:
         hash_name = 'sha1'
     elif hash_len in [224, 256, 384, 512]:
         hash_name = 'sha' + str(hash_len)
     else:
         hash_name = 'md5'
     hash_file = get_file_hash(file_path, hash_name, associated_task)
     log.debug("  %s %s = %s %s %s" % (file_path, hash_name, hash_file, hash_file == reference_hash and "==" or "!=", reference_hash))
     return hash_file == reference_hash
コード例 #11
0
ファイル: index.py プロジェクト: Lifka/thor-av-multiscanner
async def upload_file():
    if request.method != 'POST':
        return json.dumps({"status": "error", "message": "No POST request"})

    files = await request.files
    if 'file' not in files:
        return json.dumps({
            "status":
            "error",
            "message":
            "Incorrect request. It was expected to receive the file using POST."
        })
    file = files["file"]
    if not file.filename:
        return json.dumps({"status": "error", "message": "Empty file."})

    file_path = save_file(file, app.config["VAULT"])
    print("[upload_file] Sent file -> {}: {}".format(file_path, file))
    return json.dumps({
        "status": "success",
        "hash": '{}'.format(get_file_hash(file_path))
    })
コード例 #12
0
def test_get_file_hash_function():
    assert get_file_hash(
        picture_with_exif_not_in_filename
    ) == "f83ab5b8387d67b537cbf07ce6259d4a754b63d2043059c9e67b28cb6cc8ada7c37001e1af5007313d521989ccfb128d54aefc9b58e9b09790e09375eb31b544"
コード例 #13
0
def main():

    #
    # start_dir = get_dir("forrás")
    # dest_dir = get_dir("cél")

    start_dir = "./teszt_video"
    dest_dir = convert_relative_path_to_absolute("./videos")

    dest_dir_no_exif = create_no_exif_data_dir(dest_dir)

    #print(daily_config_filename, dest_dir)
    file_hash_in_dest_dir = get_hash_of_files_from_file(daily_config_filename, dest_dir)

    #print(file_hash_in_dest_dir, get_count_of_files_in_dir(dest_dir), len(file_hash_in_dest_dir))

    # sys.exit()
    file_counter_for_same_name = 0
    file_counter_dest_dir_no_exif = 0
    file_counter_dest_dir = 0
    file_count_deleted = 0

    for root, dirs, files in os.walk(start_dir, topdown=True):
        for name in files:

            original_name = name
            original_name_with_path = os.path.join(root, name)
            filename, filename_ext = os.path.splitext(name)
            filename = clean_file_name(filename)

            if filename_ext.lower() in FILE_EXTENSIONS:

                video_date_converted = get_video_exif_info(original_name_with_path)

                if video_date_converted:
                    filename = set_exif_info_in_filename(video_date_converted, filename)
                    target_dir = dest_dir
                else:
                    target_dir = dest_dir_no_exif
                    error_message("\nAz EXIF információ nem elérhető... " + original_name)

                new_name = filename + filename_ext

                #print(new_name)

                file_hash = get_file_hash(original_name_with_path)

                # print("HASH elenőrzés: ", file_hash in file_hash_in_dest_dir)

                if file_hash in file_hash_in_dest_dir:
                    warning_message("Már létezik ugyanilyen tartalmú fájl" \
                        + "a célkönyvtárban, ezért törlöm a forráskönyvtárban...")
                    file_count_deleted += 1
                    os.remove(original_name_with_path)
                else:
                    # A forrás állomány áthelyezése a célkönyvtárba
                    try:

                        if os.path.isfile(os.path.join(target_dir, new_name)):
                            # már van ilyen nevű fájl és a kettő tartalma nem egyezik meg,
                            # ezért a fájlnevet ellátjuk időbélyeggel is
                            warning_message("Már van ilyen nevű fájl és a kettő tartalma nem egyezik meg...")
                            filename = set_timestamp_in_filename(filename)
                            new_name = filename + filename_ext
                            file_counter_for_same_name += 1

                        message = " ".join(["Áthelyezés: ", original_name_with_path, os.path.join(target_dir, new_name)])
                        info_message(message)
                        if target_dir == dest_dir:
                            file_counter_dest_dir += 1
                        else:
                            file_counter_dest_dir_no_exif += 1

                        file_hash_in_dest_dir.add(file_hash)
                        os.rename(original_name_with_path, os.path.join(target_dir, new_name))
                    except:

                        print("Hiba lépett fel a következő fájl esetén: ", os.path.join(target_dir, new_name))
                        continue

    if file_hash_in_dest_dir:
        save_file_hash_to_file(daily_config_filename, file_hash_in_dest_dir)

    summary_report(file_counter_dest_dir, file_counter_dest_dir_no_exif, file_counter_for_same_name, file_count_deleted)
コード例 #14
0
def main():
    """
    A forrás könyvtárban lévő jpg fájlokat áthelyezi a cél könyvtárba úgy, hogy az exif információval
    rendelkező fájlok esetében a fájl nevébe beírja a készítés dátumát is.

    Fájlnévütközés esetén - ha a két fájl tartalma eltér -  a fájl nevét kiegészíti az aktuális időből
     készített időbélyeggel a felülírás elkerülése érdekében.

    A fájlok neveit optimalizálja:
    - lecseréli a magyar ékezetes karaktereket ékezet nélkülire (angol ábc)
    - eltávolítja szóközöket, pontokat és egyéb speciális jeleket

    """

    start_dir = get_dir("forrás")
    dest_dir = get_dir("cél")
    dest_dir_no_exif = create_no_exif_data_dir(dest_dir)

    file_hash_in_dest_dir = get_hash_of_files_from_file(
        daily_config_filename, dest_dir)

    file_counter_for_same_name = 0
    file_counter_dest_dir_no_exif = 0
    file_counter_dest_dir = 0
    # file_counter_bad_image = 0
    file_count_deleted = 0

    for root, dirs, files in os.walk(start_dir, topdown=True):
        for name in files:

            original_name = name
            original_name_with_path = os.path.join(root, name)
            filename, filename_ext = os.path.splitext(name)
            filename = clean_file_name(filename)

            # jpg fájlokból kiszedjük az exif információt

            if filename_ext.lower() in FILETYPES:
                # try:

                image_date_converted = get_image_exif_info(
                    original_name_with_path)

                if image_date_converted:
                    filename = set_exif_info_in_filename(
                        image_date_converted, filename)
                    target_dir = dest_dir
                else:
                    target_dir = dest_dir_no_exif
                    error_message("\nAz EXIF információ nem elérhető... " +
                                  original_name)

                new_name = filename + filename_ext

                print(new_name)

                file_hash = get_file_hash(original_name_with_path)

                if file_hash in file_hash_in_dest_dir:
                    warning_message("Már létezik ugyanilyen tartalmú fájl\
                                    a célkönyvtárban, ezért törlöm a forráskönyvtárban..."
                                    )
                    file_count_deleted += 1
                    os.remove(original_name_with_path)
                else:
                    # A forrás állomány áthelyezése a célkönyvtárba
                    try:

                        if os.path.isfile(os.path.join(target_dir, new_name)):
                            # már van ilyen nevű fájl és a kettő tartalma nem egyezik meg,
                            # ezért a fájlnevet ellátjuk időbélyeggel is
                            warning_message(
                                "Már van ilyen nevű fájl és a kettő tartalma nem egyezik meg..."
                            )
                            filename = set_timestamp_in_filename(filename)
                            new_name = filename + filename_ext
                            file_counter_for_same_name += 1

                        message = " ".join([
                            "Áthelyezés: ", original_name_with_path,
                            os.path.join(target_dir, new_name)
                        ])
                        info_message(message)
                        if target_dir == dest_dir:
                            file_counter_dest_dir += 1
                        else:
                            file_counter_dest_dir_no_exif += 1

                        file_hash_in_dest_dir.add(file_hash)
                        os.rename(original_name_with_path,
                                  os.path.join(target_dir, new_name))
                    except:

                        print("Hiba lépett fel a következő fájl esetén: ",
                              os.path.join(target_dir, new_name))
                        continue

    if file_hash_in_dest_dir:
        save_file_hash_to_file(daily_config_filename, file_hash_in_dest_dir)

    summary_report(file_counter_dest_dir, file_counter_dest_dir_no_exif,
                   file_counter_for_same_name, file_count_deleted)
コード例 #15
0
ファイル: core.py プロジェクト: spdx/spdx-npm-build-tool
    def create_spdx_document(self):
        """
        Write identifier scan results as SPDX Tag/value or RDF.
        """
        logging.basicConfig(level=logging.INFO)
        logging.info("Creating spdx document")
        self.get_output_file()
        self.spdx_document = Document(
            version=Version(2, 1),
            data_license=License.from_identifier(
                self.code_extra_params["lic_identifier"]
            ),
        )
        self.set_creation_info()
        if isdir(self.path_or_file):
            input_path = self.path_or_file
        else:
            input_path = dirname(self.path_or_file)

        package = self.spdx_document.package = Package(
            download_location=NoAssert(), version=self.get_package_version()
        )
        self.set_package_info(package)
        all_files_have_no_license = True
        all_files_have_no_copyright = True
        file_license_list = []
        file_license_ids = []
        if utils.is_dir(self.path_or_file):
            for idx, file_data in enumerate(self.id_scan_results):
                file_data_instance = open(file_data["FileName"], "r")
                if not utils.should_skip_file(
                    file_data["FileName"], self.output_file_name
                ):
                    name = file_data["FileName"].replace(self.path_or_file, ".")
                    file_entry = File(
                        name=name,
                        chk_sum=Algorithm(
                            "SHA1", utils.get_file_hash(file_data["FileName"]) or ""
                        ),
                    )
                    spdx_license = None
                    if self.doc_type == utils.TAG_VALUE:
                        spdx_license = License.from_identifier(file_data["SPDXID"])
                    else:
                        licenseref_id = "SPDXID-Doc-Generator-" + file_data["SPDXID"]
                        file_license_ids.append(licenseref_id)
                        if licenseref_id in file_license_ids:
                            spdx_license = ExtractedLicense(licenseref_id)
                        spdx_license.name = NoAssert()
                        comment = "N/A"
                        spdx_license.comment = comment
                        text = NoAssert()
                        if not text:
                            text = comment
                        spdx_license.text = text
                        self.spdx_document.add_extr_lic(spdx_license)
                        package.add_lics_from_file(spdx_license)
                    file_entry.add_lics(spdx_license)
                    file_license_list.append(spdx_license)
                    file_entry.conc_lics = NoAssert()
                    file_entry.copyright = SPDXNone()
                    file_entry.spdx_id = self.code_extra_params["file_ref"].format(
                        idx + 1
                    )
                    package.add_file(file_entry)
            if self.doc_type == utils.TAG_VALUE:
                for spdx_license in list(set(file_license_list)):
                    package.add_lics_from_file(spdx_license)

        if len(package.files) == 0:
            if self.doc_type == utils.TAG_VALUE:
                self.output_file.write(
                    "# No results for package '{}'.\n".format(package.name)
                )
            else:
                self.output_file.write(
                    "<!-- No results for package '{}'. -->\n".format(package.name)
                )

        if self.doc_type == utils.TAG_VALUE:
            from spdx.writers.tagvalue import write_document  # NOQA
        else:
            from spdx.writers.rdf import write_document  # NOQA

        if package.files:
            spdx_output = io.StringIO()
            if self.doc_type == utils.TAG_VALUE:
                write_document(self.spdx_document, spdx_output, validate=False)
                logging.info("SPDX Tag-Value Document created successfully.")
            else:
                # spdx_output = io.BytesIO()
                write_document(self.spdx_document, spdx_output, validate=False)
                logging.info("SPDX RDF Document created successfully.")
            result = spdx_output.getvalue()
            if self.doc_type == utils.TAG_VALUE:
                result = result.encode("utf-8")
            self.output_file.write(result)