def test_hashfile(self): data = "bazbarfoo" tempfile_path = tempfile.mkstemp()[1] with open(tempfile_path, "w") as fh: fh.write(data) hashed_sha1 = hashfile(tempfile_path) written_sha1 = self.repo.create_blob(data) self.assertEqual(hashed_sha1, written_sha1)
def test_hashfile(self): data = "bazbarfoo" tempfile_path = tempfile.mkstemp()[1] with open(tempfile_path, 'w') as fh: fh.write(data) hashed_sha1 = hashfile(tempfile_path) written_sha1 = self.repo.create_blob(data) self.assertEqual(hashed_sha1, written_sha1)
def test_hashfile(self): data = "bazbarfoo" handle, tempfile_path = tempfile.mkstemp() with os.fdopen(handle, 'w') as fh: fh.write(data) hashed_sha1 = hashfile(tempfile_path) os.unlink(tempfile_path) written_sha1 = self.repo.create_blob(data) assert hashed_sha1 == written_sha1
def find_fs_blob_in_repository(self, path): if os.path.isfile(path): requestedblobid = pygit2.hashfile(path) # Find the ID of the file so we can check if it's in repository if requestedblobid in self: # Check if the file is in the repository dataitem = self[requestedblobid] # We found the file so return it return dataitem # If we reach here, it is a directory or it was not found on the file system raise DataBlobNotFoundError("Could not find data blob in repository")
def test_hashfile(self): data = "bazbarfoo" handle, tempfile_path = tempfile.mkstemp() with os.fdopen(handle, 'w') as fh: fh.write(data) hashed_sha1 = hashfile(tempfile_path) os.unlink(tempfile_path) written_sha1 = self.repo.create_blob(data) self.assertEqual(hashed_sha1, written_sha1)
def process_file(self, path: str) -> Dict: job = crud.job.get(self.db, self.job_id) assert job assert self.project_details with open(path) as file_handle: content_type = magic.from_descriptor(file_handle.fileno(), mime=True) variables = { "fileInput": { "filename": os.path.basename(path), "spaceId": self.project_details["space_id"], "size": os.path.getsize(path), "projectPath": os.path.relpath(path, job.path), "gitHash": str(pygit2.hashfile(path)), "completed": False, "contentType": content_type, } } return wikifactory_api_request(file_mutation, job.export_token, variables, "file.file")
def import_song(filepath, song_directory): '''Import a song in the database''' data = parsetex(filepath) LOGGER.info("Processing " + pprint.pformat(data['titles'][0])) artist_name = data['args']['by'] artist_slug = slugify(artist_name) artist_model, created = Artist.objects.get_or_create( slug=artist_slug, defaults={'name': artist_name} ) if not created: if (artist_model.name != artist_name): LOGGER.warning( "*** Artist name differs though " "slugs are equal : " + artist_name + " / " + artist_model.name) if (len(data['languages']) > 1): LOGGER.warning("*** Multiple languages " "in song file; we though" " only support one. " "Picking any.") if (len(data['languages']) > 0): language_name = data["languages"].pop() language_code = next( (x for x in LANGUAGES if x[1].lower() == language_name.lower() ), ('', '') )[0] if language_code == '': LOGGER.warning("*** No code found for " "language : '" + language_name + "'") song_title = data['titles'][0] song_slug = slugify(song_title) object_hash = git.hashfile(filepath) filepath_rel = os.path.relpath(filepath, song_directory) import random # For some reason - probably after having interrupted # the generation - insertion fails because slug is # empty, and there is already an empty one. # We assign here a random value, that gets overwritten # afterwards. song_model, created = Song.objects.get_or_create( title=song_title, artist=artist_model, defaults={ 'title': song_title, 'language': language_code, 'file_path': filepath_rel, 'slug': ('%06x' % random.randrange(16**6)) }) if created: if Song.objects.filter(slug=song_slug).exists(): song_slug += '-' + str(song_model.id) song_model.slug = song_slug else: LOGGER.info("-> Already exists.") artist_model.save() song_model.object_hash = object_hash song_model.save()