def test_delete_note_with_attachments(self, app, client, fake_auth): """Delete a note with two attachments.""" fake_auth.login(coe_advisor_uid) base_dir = app.config['BASE_DIR'] note = _api_note_create( app, client, author_id=AuthorizedUser.get_id_per_uid(coe_advisor_uid), sids=[coe_student['sid']], subject='My little dog Lassie packed her bags and went out on to the porch', body='Then my little dog Lassie, she sailed off to the moon', attachments=[ f'{base_dir}/fixtures/mock_advising_note_attachment_1.txt', f'{base_dir}/fixtures/mock_advising_note_attachment_2.txt', ], ) attachment_ids = [a['id'] for a in note.get('attachments')] assert len(attachment_ids) == 2 assert NoteAttachment.find_by_id(attachment_ids[0]) and NoteAttachment.find_by_id(attachment_ids[1]) # Log in as Admin and delete the note fake_auth.login(admin_uid) note_id = note['id'] response = client.delete(f'/api/notes/delete/{note_id}') assert response.status_code == 200 assert not NoteAttachment.find_by_id(attachment_ids[0]) assert not NoteAttachment.find_by_id(attachment_ids[1])
def test_update_note_with_attachments(self, app, client, coe_advising_note_with_attachment, fake_auth): """Update a note: delete existing attachment and add a new one.""" fake_auth.login(coe_advising_note_with_attachment.author_uid) base_dir = app.config['BASE_DIR'] note_id = coe_advising_note_with_attachment.id attachment_id = coe_advising_note_with_attachment.attachments[0].id filename = 'mock_advising_note_attachment_2.txt' path_to_new_attachment = f'{base_dir}/fixtures/{filename}' updated_note = self._api_note_update( app, client, note_id=note_id, subject=coe_advising_note_with_attachment.subject, body=coe_advising_note_with_attachment.body, attachments=[path_to_new_attachment], delete_attachment_ids=[attachment_id], ) assert note_id == updated_note['attachments'][0]['noteId'] assert len(updated_note['attachments']) == 1 assert filename == updated_note['attachments'][0]['displayName'] assert filename == updated_note['attachments'][0]['filename'] assert updated_note['attachments'][0]['id'] != attachment_id # Verify db attachments = NoteAttachment.find_by_note_id(note_id) assert len(attachments) == 1 assert filename in attachments[0].path_to_attachment assert not NoteAttachment.find_by_id(attachment_id)
def create(cls, author_uid, author_name, author_role, author_dept_codes, sid, subject, body, topics=(), attachments=()): note = cls(author_uid, author_name, author_role, author_dept_codes, sid, subject, body) for topic in topics: note.topics.append( NoteTopic.create_note_topic(note, titleize(vacuum_whitespace(topic)), author_uid), ) for byte_stream_bundle in attachments: note.attachments.append( NoteAttachment.create_attachment( note=note, name=byte_stream_bundle['name'], byte_stream=byte_stream_bundle['byte_stream'], uploaded_by=author_uid, ), ) db.session.add(note) std_commit() cls.refresh_search_index() return note
def _add_attachment(cls, note, attachment): note.attachments.append( NoteAttachment.create( note_id=note.id, name=attachment['name'], byte_stream=attachment['byte_stream'], uploaded_by=note.author_uid, ), ) note.updated_at = utc_now()
def get_boa_attachment_stream(attachment_id): attachment = NoteAttachment.find_by_id(attachment_id) if attachment: path = attachment.path_to_attachment return { 'filename': attachment.get_user_filename(), 'stream': s3.stream_object(app.config['DATA_LOCH_S3_ADVISING_NOTE_BUCKET'], path), } else: return None
def _add_attachments_to_notes(attachments, author_uid, note_ids): now = utc_now().strftime('%Y-%m-%d %H:%M:%S') for byte_stream_bundle in attachments: s3_path = NoteAttachment.put_attachment_to_s3( name=byte_stream_bundle['name'], byte_stream=byte_stream_bundle['byte_stream'], ) count_per_chunk = 10000 for chunk in range(0, len(note_ids), count_per_chunk): query = """ INSERT INTO note_attachments (created_at, note_id, path_to_attachment, uploaded_by_uid) SELECT created_at, note_id, path_to_attachment, uploaded_by_uid FROM json_populate_recordset(null::note_attachments, :json_dumps); """ note_ids_subset = note_ids[chunk:chunk + count_per_chunk] data = [{ 'created_at': now, 'note_id': note_id, 'path_to_attachment': s3_path, 'uploaded_by_uid': author_uid, } for note_id in note_ids_subset] db.session.execute(query, {'json_dumps': json.dumps(data)})
def download_attachment(attachment_id): is_legacy = not is_int(attachment_id) id_ = attachment_id if is_legacy else int(attachment_id) if is_legacy: stream_data = get_legacy_attachment_stream(id_) else: attachment = NoteAttachment.find_by_id(id_) note = attachment and attachment.note if note and note.is_private and not current_user.can_access_private_notes: raise ForbiddenRequestError('Unauthorized') stream_data = get_boa_attachment_stream(attachment) if not stream_data or not stream_data['stream']: return Response('Sorry, attachment not available.', mimetype='text/html', status=404) r = Response(stream_data['stream']) r.headers['Content-Type'] = 'application/octet-stream' encoding_safe_filename = urllib.parse.quote( stream_data['filename'].encode('utf8')) r.headers[ 'Content-Disposition'] = f"attachment; filename*=UTF-8''{encoding_safe_filename}" return r
def get_zip_stream(filename, notes, student): app_timezone = pytz.timezone(app.config['TIMEZONE']) def iter_csv(): def csv_line(_list): csv_output = io.StringIO() csv.writer(csv_output).writerow(_list) return csv_output.getvalue().encode('utf-8') csv_output.close() yield csv_line([ 'date_created', 'student_sid', 'student_name', 'author_uid', 'author_csid', 'author_name', 'subject', 'topics', 'attachments', 'body', 'is_private', 'late_change_request_action', 'late_change_request_status', 'late_change_request_term', 'late_change_request_course', ]) supplemental_calnet_advisor_feeds = get_calnet_users_for_csids( app, list( set([ note['author']['sid'] for note in notes if note['author']['sid'] and not note['author']['name'] ])), ) for note in notes: calnet_author = supplemental_calnet_advisor_feeds.get( note['author']['sid']) if calnet_author: calnet_author_name =\ calnet_author.get('name') or join_if_present(' ', [calnet_author.get('firstName'), calnet_author.get('lastName')]) calnet_author_uid = calnet_author.get('uid') else: calnet_author_name = None calnet_author_uid = None # strptime expects a timestamp without timezone; ancient date-only legacy notes get a bogus time appended. timestamp_created = f"{note['createdAt']}T12:00:00" if len( note['createdAt']) == 10 else note['createdAt'][:19] datetime_created = pytz.utc.localize( datetime.strptime(timestamp_created, '%Y-%m-%dT%H:%M:%S')) date_local = datetime_created.astimezone(app_timezone).strftime( '%Y-%m-%d') e_form = note.get('eForm') or {} omit_note_body = note.get( 'isPrivate') and not current_user.can_access_private_notes yield csv_line([ date_local, student['sid'], join_if_present(' ', [ student.get('first_name', ''), student.get('last_name', '') ]), (note['author']['uid'] or calnet_author_uid), note['author']['sid'], (note['author']['name'] or calnet_author_name), note['subject'], '; '.join([t for t in note['topics'] or []]), '' if omit_note_body else '; '.join( [a['displayName'] for a in note['attachments'] or []]), '' if omit_note_body else note['body'], note.get('isPrivate'), e_form.get('action'), e_form.get('status'), term_name_for_sis_id(e_form.get('term')), f"{e_form['sectionId']} {e_form['courseName']} - {e_form['courseTitle']} {e_form['section']}" if e_form.get('sectionId') else None, ]) z = zipstream.ZipFile(mode='w', compression=zipstream.ZIP_DEFLATED) csv_filename = f'{filename}.csv' z.write_iter(csv_filename, iter_csv()) if notes: all_attachment_filenames = {csv_filename} for note in notes: if not note.get( 'isPrivate') or current_user.can_access_private_notes: for attachment in note['attachments'] or []: is_legacy_attachment = not is_int(attachment['id']) id_ = attachment['id'] if is_legacy_attachment else int( attachment['id']) if is_legacy_attachment: stream_data = get_legacy_attachment_stream(id_) else: attachment = NoteAttachment.find_by_id(id_) stream_data = get_boa_attachment_stream(attachment) if stream_data: attachment_filename = stream_data['filename'] basename, extension = path.splitext( attachment_filename) suffix = 1 while attachment_filename in all_attachment_filenames: attachment_filename = f'{basename} ({suffix}){extension}' suffix += 1 all_attachment_filenames.add(attachment_filename) z.write_iter(attachment_filename, stream_data['stream']) return z