def upload_page(): user_id = current_user.id user = User.get_or_none(User.id == user_id) # should never happen if user is None: return fail(404, 'User not found.') if request.content_length > MAX_UPLOAD_SIZE: return fail( 413, f'File is too big. {MAX_UPLOAD_SIZE // 1000000}MB allowed.', ) file: Optional[FileStorage] = request.files.get('file') if file is None: return fail(422, 'No file was given.') try: matches, misses = upload.new(user, file) except UploadError as e: log.debug(e) return fail(400, str(e)) except FileSizeError as e: log.debug(e) return fail(413, str(e)) return jsonify({ 'exercise_matches': matches, 'exercise_misses': misses, })
def new( user_id: int, course_id: int, file: FileStorage, ) -> Tuple[List[int], List[int]]: matches: List[int] = [] misses: List[int] = [] errors: List[Union[UploadError, AlreadyExists]] = [] for exercise_number, files, solution_hash in Extractor(file): try: upload_solution( course_id=course_id, exercise_number=exercise_number, files=files, solution_hash=solution_hash, user_id=user_id, ) except (UploadError, AlreadyExists) as e: log.debug(e) errors.append(e) misses.append(exercise_number) else: matches.append(exercise_number) if not matches and errors: raise UploadError(errors) return matches, misses
def _extract(archive: ZipFile, filename: str, dirname: str = '') -> File: with archive.open(filename) as current_file: log.debug(f'Extracting from archive: {filename}') code = current_file.read() decoded = code.decode('utf-8', errors='replace').replace('\x00', '') filename = filename[len(dirname):] return File(path=f'/{filename}', code=decoded)
def __iter__(self) -> Iterator[Tuple[int, List[File]]]: for cls in self.__class__.__subclasses__(): log.debug(f'Trying extractor: {cls.__name__}') extractor = cls(to_extract=self.to_extract) if extractor.can_extract(): for solution_id, files in extractor.get_exercises(): yield (solution_id, files)
def _clean(cls, code: Union[Sequence, str]) -> Tuple[int, str]: first_line, code_text = cls._split_header(code) upload_title = cls.UPLOAD_TITLE.fullmatch(first_line) if upload_title: exercise_id = int(upload_title.group(1)) return exercise_id, code_text log.debug(f'Unmatched title: {first_line}') return 0, ''
def __iter__(self) -> Iterator[Tuple[int, List[File], str]]: for cls in self.__class__.__subclasses__(): log.debug(f'Trying extractor: {cls.__name__}') extractor = cls(to_extract=self.to_extract) if extractor.can_extract(): yield from ( (solution_id, files, hashing.by_content(str(files))) for solution_id, files in extractor.get_exercises() )
def _split_header(cls, code: CodeFile) -> Tuple[str, str]: code_as_text = cast(str, cls._convert_to_text(code)) clean_text = code_as_text.strip('#' + string.whitespace) first_line_end = clean_text.find('\n') if first_line_end == -1: first_line_end = len(clean_text) first_line = clean_text[:first_line_end].strip().replace('_', ' ') code_lines = clean_text[first_line_end:].strip() log.debug(f'Upload title: {first_line}') return first_line, code_lines
def _extract(archive: ZipFile, filename: str, dirname: str = '') -> File: with archive.open(filename) as current_file: log.debug(f'Extracting from archive: {filename}') code = current_file.read() if filename.rpartition('.')[-1].lower() in ALLOWED_IMAGES_EXTENSIONS: decoded = base64.b64encode(code) else: decoded = code.decode( 'utf-8', errors='replace', ).replace('\x00', '') filename = filename[len(dirname):] return File(path=f'/{filename.lower()}', code=decoded)
def _get_file_type(self, code: str) -> Tuple[str, str]: type_line, code_lines = self._split_header(code) file_type_match = self.TYPE_LINE_PREFIX.fullmatch(type_line) if file_type_match: file_type = file_type_match.group(1) if file_type not in ALLOWED_EXTENSIONS: file_type = self.DEFAULT_FILE_TYPE log.debug(f'File type: {file_type}.') return code_lines, file_type log.debug('No file type defined.') return code, self.DEFAULT_FILE_TYPE
def new(user: User, file: FileStorage) -> Tuple[List[int], List[int]]: solution_hash = hashing.by_file(file) if _is_uploaded_before(user, solution_hash): raise AlreadyExists('You try to reupload an old solution.') matches: List[int] = [] misses: List[int] = [] for exercise_id, files in Extractor(file): try: solution = _upload_to_db(exercise_id, user, files, solution_hash) _run_auto_checks(solution) except (UploadError, AlreadyExists) as e: log.debug(e) misses.append(exercise_id) else: matches.append(exercise_id) return matches, misses
def create_new( cls, solution: Solution, ) -> 'SharedSolution': new_url = generate_string( min_len=10, max_len=11, allow_punctuation=False, ) exists = cls.get_or_none(cls.shared_url == new_url) while exists is not None: log.debug( f'Collision with creating link to {solution.id} solution, ', 'trying again.', ) new_url = generate_string( min_len=10, max_len=11, allow_punctuation=False, ) exists = cls.get_or_none(cls.shared_url == new_url) return cls.create(shared_url=new_url, solution=solution)
def new(user: User, file: FileStorage) -> Tuple[List[int], List[int]]: matches: List[int] = [] misses: List[int] = [] errors: List[Union[UploadError, AlreadyExists]] = [] for exercise_id, files, solution_hash in Extractor(file): try: solution = _upload_to_db(exercise_id, user, files, solution_hash) _run_auto_checks(solution) except (UploadError, AlreadyExists) as e: log.debug(e) errors.append(e) misses.append(exercise_id) else: matches.append(exercise_id) if not matches and errors: raise UploadError(errors) return matches, misses