def keyword_form_valid(self, formset): for form in formset: word = form['keyword'].data definition = form['definition'].data id = form['id'].data # If the user has deleted an existing keyword if not word and not definition and id: try: keyword_object = Keyword.objects.get(id=id) keyword_object.delete() except (ValueError, ObjectDoesNotExist): pass # otherwise get or create a keyword elif word or definition: try: keyword_object = Keyword.objects.get(id=id) except (ValueError, ObjectDoesNotExist): keyword_object = Keyword() NoteKarmaEvent.create_event(self.request.user, self.get_object(), NoteKarmaEvent.CREATED_KEYWORD) keyword_object.note = self.get_object() keyword_object.word = word keyword_object.definition = definition keyword_object.unreviewed = False keyword_object.save()
def process_note_flag_events(request_user, note): # Take a point away from person flagging this note if request_user.is_authenticated(): NoteKarmaEvent.create_event(request_user, note, NoteKarmaEvent.GIVE_FLAG) # If this is the 6th time this note has been flagged, # punish the uploader if note.flags == 6 and note.user: NoteKarmaEvent.create_event(note.user, note, NoteKarmaEvent.GET_FLAGGED)
def process_downloaded_note(request_user, note): """Record that somebody has downloaded a note""" if request_user.is_authenticated() and request_user != note.user: NoteKarmaEvent.create_event(request_user, note, NoteKarmaEvent.DOWNLOADED_NOTE) if request_user.is_authenticated( ) and note.user and note.user != request_user: NoteKarmaEvent.create_event(note.user, note, NoteKarmaEvent.HAD_NOTE_DOWNLOADED)
def process_note_thank_events(request_user, note): # Give points to the person who uploaded this note if note.user != request_user and note.user: NoteKarmaEvent.create_event(note.user, note, NoteKarmaEvent.THANKS) # If note thanks exceeds a threshold, create a Mechanical # Turk task to get some keywords for it if note.thanks == KEYWORD_MTURK_THRESHOLD: submit_extract_keywords_hit.delay(note)
def find_orphan_notes(sender, **kwargs): user = kwargs['user'] s = kwargs['request'].session uploaded_note_urls = s.get(ANONYMOUS_UPLOAD_URLS, []) for uploaded_note_url in uploaded_note_urls: try: note = Note.objects.get(fp_file=uploaded_note_url) note.user = user note.save() NoteKarmaEvent.create_event(user, note, NoteKarmaEvent.UPLOAD) except (ObjectDoesNotExist, MultipleObjectsReturned): mapping = UserUploadMapping.objects.create(fp_file=uploaded_note_url, user=user) mapping.save()
def find_orphan_notes(sender, **kwargs): user = kwargs['user'] s = kwargs['request'].session uploaded_note_urls = s.get(ANONYMOUS_UPLOAD_URLS, []) for uploaded_note_url in uploaded_note_urls: try: note = Note.objects.get(fp_file=uploaded_note_url) note.user = user note.save() NoteKarmaEvent.create_event(user, note, NoteKarmaEvent.UPLOAD) except (ObjectDoesNotExist, MultipleObjectsReturned): mapping = UserUploadMapping.objects.create( fp_file=uploaded_note_url, user=user) mapping.save()
def convert_raw_document(raw_document, user=None): """ Upload a raw document to google drive and get a Note back""" fp_file = raw_document.get_file() # extract some properties from the document metadata filename = raw_document.name print "this is the mimetype of the document to check:" mimetype = raw_document.mimetype print mimetype print "" # A special case for Evernotes if raw_document.mimetype == 'text/enml': raw_document.mimetype = 'text/html' original_content = fp_file.read() # Include mimetype parameter if there is one to include extra_flags = {'mimetype': raw_document.mimetype} if raw_document.mimetype \ else {} media = MediaInMemoryUpload(original_content, chunksize=1024*1024, \ resumable=True, **extra_flags) service = build_api_service() # upload to google drive file_dict = upload_to_gdrive(service, media, filename, mimetype=mimetype) # download from google drive content_dict = download_from_gdrive(service, file_dict, mimetype=mimetype) # this should have already happened, lets see why it hasn't raw_document.is_processed = True raw_document.save() note = raw_document.convert_to_note() # Cache the uploaded file's URL note.gdrive_url = file_dict['alternateLink'] note.text = content_dict['text'] # Extract HTML from the appropriate place html = '' if raw_document.mimetype == PDF_MIMETYPE: html = pdf2html(original_content) elif raw_document.mimetype in PPT_MIMETYPES: html = pdf2html(content_dict['pdf']) elif 'html' in content_dict and content_dict['html']: html = content_dict['html'] if html: html = sanitizer.data_uris_to_s3(html) NoteMarkdown.objects.create(note=note, html=html) # If we know the user who uploaded this, # associate them with the note if user and not user.is_anonymous(): note.user = user NoteKarmaEvent.create_event(user, note, NoteKarmaEvent.UPLOAD) else: try: mapping = UserUploadMapping.objects.get( fp_file=raw_document.fp_file) note.user = mapping.user note.save() NoteKarmaEvent.create_event(mapping.user, note, NoteKarmaEvent.UPLOAD) except (ObjectDoesNotExist, MultipleObjectsReturned): logger.info("Zero or multiple mappings found with fp_file " + raw_document.fp_file.name) # Finally, save whatever data we got back from google note.save()
def convert_raw_document(raw_document, user=None): """ Upload a raw document to google drive and get a Note back""" fp_file = raw_document.get_file() # extract some properties from the document metadata filename = raw_document.name print "this is the mimetype of the document to check:" mimetype = raw_document.mimetype print mimetype print "" # A special case for Evernotes if raw_document.mimetype == 'text/enml': raw_document.mimetype = 'text/html' original_content = fp_file.read() # Include mimetype parameter if there is one to include extra_flags = {'mimetype': raw_document.mimetype} if raw_document.mimetype \ else {} media = MediaInMemoryUpload(original_content, chunksize=1024*1024, \ resumable=True, **extra_flags) service = build_api_service() # upload to google drive file_dict = upload_to_gdrive(service, media, filename, mimetype=mimetype) # download from google drive content_dict = download_from_gdrive(service, file_dict, mimetype=mimetype) # this should have already happened, lets see why it hasn't raw_document.is_processed = True raw_document.save() note = raw_document.convert_to_note() # Cache the uploaded file's URL note.gdrive_url = file_dict['alternateLink'] note.text = content_dict['text'] # Extract HTML from the appropriate place html = '' if raw_document.mimetype == PDF_MIMETYPE: html = pdf2html(original_content) elif raw_document.mimetype in PPT_MIMETYPES: html = pdf2html(content_dict['pdf']) elif 'html' in content_dict and content_dict['html']: html = content_dict['html'] if html: html = sanitizer.data_uris_to_s3(html) NoteMarkdown.objects.create(note=note, html=html) # If we know the user who uploaded this, # associate them with the note if user and not user.is_anonymous(): note.user = user NoteKarmaEvent.create_event(user, note, NoteKarmaEvent.UPLOAD) else: try: mapping = UserUploadMapping.objects.get(fp_file=raw_document.fp_file) note.user = mapping.user note.save() NoteKarmaEvent.create_event(mapping.user, note, NoteKarmaEvent.UPLOAD) except (ObjectDoesNotExist, MultipleObjectsReturned): logger.info("Zero or multiple mappings found with fp_file " + raw_document.fp_file.name) # Finally, save whatever data we got back from google note.save()
def process_downloaded_note(request_user, note): """Record that somebody has downloaded a note""" if request_user.is_authenticated() and request_user != note.user: NoteKarmaEvent.create_event(request_user, note, NoteKarmaEvent.DOWNLOADED_NOTE) if request_user.is_authenticated() and note.user: NoteKarmaEvent.create_event(note.user, note, NoteKarmaEvent.HAD_NOTE_DOWNLOADED)