def __init__(self, sender_settings): log = get_logger_for(self) self.restrictions = _SenderRestriction(sender_settings) self.destinations = deepcopy(sender_settings.destinations) with get_session() as session: self.bytes_uploaded_today = \ FilesDestinations.get_bytes_uploaded_in_date(session, self.destinations) log.info("According to the logs, it were already uploaded today %d bytes for destinations %s", self.bytes_uploaded_today, self.destinations)
def _get_files_container_by_name(self, file_name): if not self._session_resource: self._session_resource = get_session() try: with self._session_resource as session: return session \ .query(FilesContainer) \ .filter(FilesContainer.file_name == file_name) \ .one() except NoResultFound: return None
def main(): # noinspection PyUnresolvedReferences import log_configuration if len(sys.argv) < 2: log.error("Usage: %s <config_file>", sys.argv[0]) exit(1) conf = Configuration(sys.argv[1]) with get_session() as session: db_version = get_db_version(session) if db_version != 3: log.error("Invalid database version (%d). 3 expected", db_version) session.close() exit(1) session.close() mail_verifier = mail.Verifier() mega_verifier = mega.Verifier() def signal_handler(signal, frame): print "Abort signal received!!!!" mail_verifier.stop() mega_verifier.stop() signal.signal(signal.SIGINT, signal_handler) for mail_conf in conf.mail_confs: mail_verifier.verify(mail_conf) for meaga_conf in conf.mega_confs: mega_verifier.verify(meaga_conf) mail_verifier.close() mega_verifier.close()
def main(): if len(sys.argv) < 3: log.error("Usage: %s <config_file> <input path> [<input path> ...]", sys.argv[0]) exit(1) settings = Settings(sys.argv[1]) with get_session() as session: db_version = get_db_version(session) if db_version != 3: log.error("Invalid database version (%d). 3 expected" % db_version) session.close() exit(1) app = App(settings, session) workers.manager.register_app(app) in_files = sys.argv[2:] PipelineFlusher(remaining_inputs=len(in_files)).register(app) # load files to read for file_path in in_files: event = events.NewInputPath(file_path) event.complete = True app.fire(event) session.close() if settings.debugging.enabled: from fcb.utils.debugging import configure_signals configure_signals() app += Debugger() app.run() log.debug("finished processing")
def init(self): self.log = get_logger_for(self) self._session_resource = get_session()
def __init__(self, mail_dst): self._session_resource = get_session() self._log = get_logger_for(self) self._mail_dst = mail_dst self._last_checked_date = None
def _log_in_db(self, block): if not self._session_resource: self._session_resource = get_session() with self._session_resource as session: session.autoflush = False # to avoid IntegrityError raised during testing sent_file_info = block.latest_file_info # a new container has been saved file_container = FilesContainer( sha1=sent_file_info.sha1, file_name=sent_file_info.basename, encryption_key=block.cipher_key if hasattr(block, 'cipher_key') else '', container_size=sent_file_info.size ) session.add(file_container) ''' FIXME we need the container id because file_destination is not getting it (not working example of SQLAlchemy) ''' session.flush() # get container id # associate destinations to the container for destination in block.send_destinations if hasattr(block, 'send_destinations') else []: file_destination = FilesDestinations() file_destination.destination = Destination.get_or_add(session, destination) # FIXME according to the example in SQLAlchemy, this shouldn't be needed file_destination.file_containers_id = file_container.id if hasattr(block, 'destinations_verif_data') and destination in block.destinations_verif_data: file_destination.verification_info = block.destinations_verif_data[destination] file_container.files_destinations.append(file_destination) # save/update each file in the container for file_info in block.content_file_infos: uploaded_file_fragment_number = 0 if hasattr(file_info, 'fragment_info'): # check if it is a fragment uploaded_file_fragment_number = file_info.fragment_info.fragment_num uploaded_file = \ self._get_uploaded_file( session=session, file_info=file_info.fragment_info.file_info, fragment_count=file_info.fragment_info.fragments_count) # save a new fragment for the file file_fragment = FileFragment( fragment_sha1=file_info.sha1, fragment_name=file_info.upath, fragment_number=file_info.fragment_info.fragment_num ) uploaded_file.fragments.append(file_fragment) else: # not fragmented file uploaded_file = self._get_uploaded_file(session=session, file_info=file_info) session.flush() # if uploaded_file has no id, we need one file_in_container_assoc = FilesInContainers( uploaded_file_fragment_number=uploaded_file_fragment_number, uploaded_files_id=uploaded_file.id ) file_in_container_assoc.container_file = file_container file_container.fragments.append(file_in_container_assoc) session.commit()
def untransform_from_db(files): with get_session() as session: for file_path in files: cipher_key_getter = lambda: _get_key_from_db(session, file_path) untransform(in_filename=file_path, cipher_key_getter=cipher_key_getter) session.close()
def delete_unverified_uploads(destinations): """ :param destinations: list of Destination.destination For each Destination.destination: Deletes all FilesDestinations where the destination is not verified Deletes each FilesContainer in the deleted FilesDestinations if not present in a non deleted FilesDestinations Deletes each FileFragment if corresponds to a FilesInContainers for a FilesContainer deleted and not in a non deleted FilesContainer Deletes each UploadedFile if corresponds to a FilesInContainers for a FilesContainer deleted and not in a non deleted FilesContainer and/or has no more FileFragment in non deleted FilesContainer """ with get_session() as session: # TODO use triggers or cascades to delete relations for destination in destinations: _log.info("Deleting unverified uploads for destination %s", destination) # get unverified FilesDestinations for the configured mail_conf files_destinations_q = session.query(FilesDestinations)\ .filter( FilesDestinations.verification_info.is_(None), FilesDestinations.destinations_id == ( select([Destination.id]). where(Destination.destination == destination). as_scalar())) files_destinations = files_destinations_q.all() if not files_destinations: continue # get FilesContainer.id for containers which are not associated to another destination fd1 = aliased(FilesDestinations) fd2 = aliased(FilesDestinations) files_container_ids_to_delete = [ f.file_containers_id for f in session.query(fd1.file_containers_id) .filter(fd1.file_containers_id.in_([fd.file_containers_id for fd in files_destinations])) .filter(~exists().where( and_(fd1.file_containers_id == fd2.file_containers_id, fd1.destinations_id != fd2.destinations_id))) .all() ] # will delete all FilesInContainers for containers to be deleted. FIXME could be done in cascade files_in_container_q = session.query(FilesInContainers)\ .filter(FilesInContainers.file_containers_id.in_(files_container_ids_to_delete)) # get files (and fragments) only present in containers to delete (can be deleted also) fic1 = aliased(FilesInContainers) fic2 = aliased(FilesInContainers) files_to_delete = session.query(fic1)\ .filter(fic1.file_containers_id.in_(files_container_ids_to_delete))\ .filter(~exists().where( and_( # same file/fragment fic1.uploaded_files_id == fic2.uploaded_files_id, fic1.uploaded_file_fragment_number == fic2.uploaded_file_fragment_number, # in other container ~fic2.file_containers_id.in_(files_container_ids_to_delete) )))\ .all() # delete fragments # FIXME needs to be optimized (using placeholders or something) for file_id, fragment_number in \ [(f.uploaded_files_id, f.uploaded_file_fragment_number) for f in files_to_delete if f.uploaded_file_fragment_number > 0]: session.query(FileFragment)\ .filter(FileFragment.file_id == file_id, FileFragment.fragment_number == fragment_number)\ .delete(synchronize_session='fetch') # delete uploaded files without fragments whole_file_ids = [f.uploaded_files_id for f in files_to_delete if f.uploaded_file_fragment_number == 0] if whole_file_ids: session.query(UploadedFile)\ .filter(UploadedFile.id.in_(whole_file_ids))\ .delete(synchronize_session='fetch') # delete uploaded files with all their fragments deleted. FIXME optimize fragmented_file_ids = [f.uploaded_files_id for f in files_to_delete if f.uploaded_file_fragment_number > 0] if fragmented_file_ids: session.query(UploadedFile)\ .filter(UploadedFile.id.in_(fragmented_file_ids), ~exists().where(FileFragment.file_id == UploadedFile.id))\ .delete(synchronize_session='fetch') session.query(FilesContainer)\ .filter(FilesContainer.id.in_(files_container_ids_to_delete))\ .delete(synchronize_session='fetch') files_in_container_q.delete(synchronize_session='fetch') files_destinations_q.delete(synchronize_session='fetch') session.commit()