def basicTokenizer(task):
	task_tag = "NLP ADDRESS PARSER"
	print "\n\n************** %s [START] ******************\n" % task_tag
	print "TOKENIZING TEXT DOCUMENT at %s" % task.doc_id
	task.setStatus(412)

	from lib.Worker.Models.uv_document import UnveillanceDocument

	from conf import DEBUG
	from vars import ASSET_TAGS

	doc = UnveillanceDocument(_id=task.doc_id)
	if doc is None:
		print "DOC IS NONE"
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		return

	txt = None
	if hasattr(task, "txt_file"):
		txt = doc.loadFile(task.txt_file)
	else:
		import os
		try:
			txt_path = doc.getAssetsByTagName(ASSET_TAGS['TXT_JSON'])[0]['file_name']
			txt = doc.loadFile(os.path.join(doc.base_path, txt_path))
		except Exception as e:
			if DEBUG: print e
	
	if txt is None:
		print "TEXT FILE IS NONE"
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		return
Example #2
0
def parse_zipped_j3m(uv_task):
	task_tag = "PARSING ZIPPED J3M"
	print "\n\n************** %s [START] ******************\n" % task_tag
	print "parsing zipped j3m asset at %s" % uv_task.doc_id
	uv_task.setStatus(302)
	
	import os
	from lib.Worker.Models.uv_document import UnveillanceDocument
	
	from conf import DEBUG
	from vars import ASSET_TAGS
	
	media = UnveillanceDocument(_id=uv_task.doc_id)
	if media is None:
		print "DOC IS NONE"
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		uv_task.fail()
		return
	
	from conf import ANNEX_DIR
	if hasattr(uv_task, "j3m_name"):
		j3m_name = uv_task.j3m_name
	else:
		j3m_name = os.path.join(media.base_path, "j3m_raw.gz")
	
	if not media.getFile(j3m_name):
		print "NO J3M.GZ at %s" % j3m_name
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		uv_task.fail()
		return
	
	from cStringIO import StringIO
	from lib.Worker.Utils.funcs import getFileType, unGzipBinary
	from vars import MIME_TYPES
	
	j3m = media.loadFile(j3m_name)
	j3m_type = getFileType(j3m, as_buffer=True)
	
	if j3m_type == MIME_TYPES['gzip']:
		j3m = unGzipBinary(j3m)
	
	if j3m is None or getFileType(j3m, as_buffer=True) != MIME_TYPES['json']:
		print "THIS IS NOT A J3M (type %s)" % j3m_type
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		uv_task.fail(status=412)
		return

	asset_path = "j3m_raw.json"
	media.addAsset(j3m, asset_path, as_literal=False)

	uv_task.put_next([
		"J3M.j3mify.j3mify",
		"J3M.massage_j3m.massageJ3M",
		"PGP.verify_signature.verifySignature",
		"J3M.verify_visual_content.verifyVisualContent"
	])

	uv_task.routeNext(inflate={'j3m_name' : asset_path})	
	uv_task.finish()
	print "\n\n************** %s [END] ******************\n" % task_tag
def evaluate_JSON_media(uv_task):
    task_tag = "EVALUATE JSON MEDIA"

    print "\n\n************** %s [START] ******************\n" % task_tag
    uv_task.setStatus(302)

    import os
    from json import loads

    from lib.Worker.Models.uv_document import UnveillanceDocument
    from conf import ANNEX_DIR, DEBUG

    doc = UnveillanceDocument(_id=uv_task.doc_id)

    try:
        if DEBUG:
            print doc.emit()
    except Exception as e:
        print e

    content = None

    try:
        content = loads(doc.loadFile(doc.file_name))
    except Exception as e:
        error_msg = "could not load content at all: %s" % e

        print error_msg
        print "\n\n************** %s [ERROR] ******************\n" % task_tag

        uv_task.fail(message=error_msg)
        return

    # match keys to object
    mention_set = ["mimeType", "headers", "parts", "body", "filename"]

    if len(set(content.keys()).intersection(mention_set)) == len(
            content.keys()):
        from lib.Worker.Models.dl_FD_mention import FoxyDoxxingMention
        doc = FoxyDoxxingMention(inflate=doc.emit())
    else:
        error_msg = "document not really usable."

        print error_msg
        print "\n\n************** %s [ERROR] ******************\n" % task_tag

        uv_task.fail(message=error_msg)
        return

    doc.addCompletedTask(uv_task.task_path)

    from vars import MIME_TYPE_TASKS

    uv_task.task_queue = [uv_task.task_path]

    try:
        uv_task.task_queue.extend(MIME_TYPE_TASKS[doc.mime_type])
        uv_task.routeNext()
    except Exception as e:
        error_msg = "cannot get task queue for mime type %s: %s" % (
            doc.mime_type, e)

        print error_msg
        print "\n\n************** %s [WARN] ******************\n" % task_tag

    print "\n\n************** %s [END] ******************\n" % task_tag
    uv_task.finish()
def evaluateText(task):
	task_tag = "TEXT EVALUATION"
	print "\n\n************** %s [START] ******************\n" % task_tag
	print "evaluating text at %s" % task.doc_id
	task.setStatus(302)
	
	from lib.Worker.Models.uv_document import UnveillanceDocument
	from conf import DEBUG
	from vars import MIME_TYPE_TASKS
	
	document = UnveillanceDocument(_id=task.doc_id)	
	"""
		limited choices: json, pgp, or txt
	"""

	if hasattr(task, "text_file"):
		content = document.loadAsset(task.text_file)
	else:
		content = document.loadFile(document.file_name)	
	
	if content is None:
		print "no text to evaluate :("
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		task.fail()
		return
	
	new_mime_type = None
	import json
	try:
		json_txt = json.loads(content)
		new_mime_type = "application/json"
		
		print "THIS IS JSON"
	except Exception as e:
		print "NOT JSON: %s" % e
	
	task_path = None	
	if new_mime_type is not None:
		document.mime_type = new_mime_type
		document.save()
		
		if document.mime_type in MIME_TYPE_TASKS.keys():
			task_path = MIME_TYPE_TASKS[document.mime_type][0]
	else:
		try:
			from lib.Core.Utils.funcs import cleanLine
			from vars import ASSET_TAGS
			
			txt_json = []
			txt_pages = []
			line_count = 0
			
			# this is arbitrary
			MAX_LINES_PER_PAGE = 80
			
			for line in content.splitlines():
				txt_pages.append(cleanLine(line))
				line_count += 1
				
				if line_count == MAX_LINES_PER_PAGE:
					txt_json.append(" ".join(txt_pages))
					txt_pages = []
					line_count = 0

			txt_json.append(" ".join(txt_pages))

			document.total_pages = len(txt_json)
			document.save()
						
			asset_path = document.addAsset(txt_json, "doc_texts.json", as_literal=False,
				description="jsonified text of original document, segment by segment",
				tags=[ASSET_TAGS['TXT_JSON']])

			from lib.Worker.Models.uv_text import UnveillanceText
			uv_text = UnveillanceText(inflate={
				'media_id' : document._id,
				'searchable_text' : txt_json,
				'file_name' : asset_path
			})
			
			document.text_id = uv_text._id
			document.save()
		except Exception as e: 
			if DEBUG:
				print "ERROR HERE GENERATING DOC TEXTS:"
				print e
	
	document.addCompletedTask(task.task_path)
	task.finish()
	task.routeNext()
	print "\n\n************** %s [END] ******************\n" % task_tag
Example #5
0
def basicTokenizer(task):
	task_tag = "NLP TOKENIZER"
	print "\n\n************** %s [START] ******************\n" % task_tag
	print "TOKENIZING TEXT DOCUMENT at %s" % task.doc_id
	task.setStatus(412)

	from lib.Worker.Models.uv_document import UnveillanceDocument

	from conf import DEBUG
	from vars import ASSET_TAGS

	doc = UnveillanceDocument(_id=task.doc_id)
	if doc is None:
		print "DOC IS NONE"
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		return

	txt = None
	
	from json import loads
	if hasattr(task, "txt_file"):
		txt = loads(doc.loadFile(task.txt_file))
	else:
		import os
		try:
			txt_path = doc.getAssetsByTagName(ASSET_TAGS['TXT_JSON'])[0]['file_name']
			txt = loads(doc.loadFile(os.path.join(doc.base_path, txt_path)))
		except Exception as e:
			if DEBUG: print e
	
	if txt is None:
		print "TEXT FILE IS NONE"
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		return
		
	from lib.Worker.Models.cp_nlp_server import CompassNLPServer
	nlp_server = CompassNLPServer()
	tokenized = nlp_server.sendNLPRequest({
		'method' : 'tokenize',
		'txt' : txt
	})
	
	if tokenized is None:
		print "COULD NOT TOKENIZE."
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		return
	
	if DEBUG:
		print "here is res"
		print type(tokenized)
		
	asset_path = doc.addAsset(tokenized, "core_nlp_tokenized.json", as_literal=False,
		description="tokenized output from Stanford Core NLP",
		tags=[ASSET_TAGS['TOKENS_NLP']])

	if asset_path is None or not doc.addFile(asset_path, None, sync=True): 
		print "COULD NOT SAVE ASSET."
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		return
	
	doc.addCompletedTask(task.task_path)
	task.finish()
	print "\n\n************** %s [END] ******************\n" % task_tag
Example #6
0
def addressParser(task):
    task_tag = "NLP ADDRESS PARSER"
    print "\n\n************** %s [START] ******************\n" % task_tag
    print "EXTRACTING ADDRESSES FROM TEXT DOCUMENT at %s" % task.doc_id
    task.setStatus(302)

    from lib.Worker.Models.uv_document import UnveillanceDocument

    from conf import DEBUG
    from vars import ASSET_TAGS

    doc = UnveillanceDocument(_id=task.doc_id)
    if doc is None:
        print "DOC IS NONE"
        print "\n\n************** %s [ERROR] ******************\n" % task_tag
        task.fail()
        return

    txt = None
    if hasattr(task, "txt_file"):
        txt = doc.loadFile(task.txt_file)
    else:
        import os

        try:
            txt_path = doc.getAssetsByTagName(ASSET_TAGS["TXT_JSON"])[0]["file_name"]
            txt = doc.loadFile(os.path.join(doc.base_path, txt_path))
        except Exception as e:
            if DEBUG:
                print e

    if txt is None:
        print "TEXT FILE IS NONE"
        print "\n\n************** %s [ERROR] ******************\n" % task_tag
        task.fail()
        return

    import re

    # script from https://code.google.com/p/ebcode/ -> ebdata.tar.gz -> ebdata/nlp/addresses.py

    # Regex notes:
    #   * This is *not* a case-insensitive regex, because we assume
    #     capitalized words are special (street names).
    #   * All data matched by capturing parentheses is concatenated together, so
    #     if you don't want to include something in the resulting string, don't
    #     capture it.

    # STREET_NAME is a fragment of a regular expression that is used in several
    # places in our "real" regular expression (ADDRESSES_RE) below. The one tricky
    # thing about it is that it includes a "CAPTURE_START" placeholder instead of
    # a capturing opening parenthesis. This lets us create two versions of the
    # regex -- STREET_NAME_CAPTURE and STREET_NAME_NOCAPTURE.

    STREET_NAME = r"""
		# Here, we define some common false positives and tell the regex to ignore them.
		(?!
			[Aa][Ss][Ss][Oo][Cc][Ii][Aa][Tt][Ee][Dd]\ [Pp][Rr][Ee][Ss][Ss] # associated press
			|
			[Uu][Nn][Ii][Vv][Ee][Rr][Ss][Ii][Tt][Yy]\ [Oo][Ff]             # university of
		)
		# DIRECTION
		%(CAPTURE_START)s
			(?:
				[NSEWnsew]\.?
				|
				(?:
					[Nn][Oo][Rr][Tt][Hh] |
					[Ss][Oo][Uu][Tt][Hh] |
					[Ee][Aa][Ss][Tt] |
					[Ww][Ee][Ss][Tt] |
					[Nn][Oo][Rr][Tt][Hh][Ee][Aa][Ss][Tt] |
					[Ee][Aa][Ss][Tt][Ww][Ee][Ss][Tt] |
					[Ss][Oo][Uu][Tt][Hh][Ee][Aa][Ss][Tt] |
					[Ss][Oo][Uu][Tt][Hh][Ww][Ee][Ss][Tt]
				)
				|
				(?:
					N\.?W | S\.?W | N\.?E | S\.?E
				)\.?
			)
			\ +                                        # space (but not newline)
		)?
		(?:
			# STREET NAME
			%(CAPTURE_START)s
				# Numbered street names with a suffix ("3rd", "4th").
				\d+(?:st|ST|nd|ND|rd|RD|th|TH|d|D)

				|

				# Or, numbered street names without a suffix ("3", "4")
				# but with a street type.
				\d+
				(?=
					\ +
					(?:Ave|Avenue|Blvd|Boulevard|Bvd|Cir|Circle|Court|Ct|Dr|Drive|
					   Lane|Ln|Parkway|Pkwy|Place|Plaza|Pl|Plz|Point|Pt|Pts|Rd|Rte|
					   Sq|Sqs|Street|Streets|St|Sts|Terrace|Ter|Terr|Trl|Way|Wy
					)
					\b
				)

				|

				# Or, street names that don't start with numbers.
				(?:
					# Optional prefixes --
					# "St", as in "St Louis"
					# "Dr. Martin", as in "Dr. Martin Luther King"
					(?:
						[Ss][Tt]\.?
						|
						[Dd][Rr]\.?\ [Mm][Aa][Rr][Tt][Ii][Nn]
					)
					\ +
				)?
				(?:
					Mass\.(?=\ +[Aa]ve)  # Special case: "Mass." abbr. for "Massachussetts Ave."
										 # Needs to be special-cased because of the period.
					|
					(?:Avenue|Ave\.?)\ +[A-Z]       # Special case: "Avenue X"
					|
					[A-Z][a-z][A-Za-z]*  # One initial-capped word
					|
					[A-Z]\b              # Single-letter street name (e.g., K St. in DC)
					(?!\.\w)             # Avoid '20 U.S.A.'
				)
			)
			(?:
				# Here, we list the options with street suffixes first, so that
				# the suffix abbreviations are treated as the last part of the
				# street name, to avoid overeagerly capturing "123 Main St. The".
				%(CAPTURE_START)s
					\ +(?:Ave|Blvd|Bvd|Cir|Ct|Dr|Ln|Pkwy|Pl|Plz|Pt|Pts|Rd|Rte|Sq|Sqs|St|Sts|Ter|Terr|Trl|Wy)\.
					|
					\ +[A-Z][a-z][A-Za-z]*\ (?:Ave|Blvd|Bvd|Cir|Ct|Dr|Ln|Pkwy|Pl|Plz|Pt|Pts|Rd|Rte|Sq|Sqs|St|Sts|Ter|Terr|Trl|Wy)\.
					|
					(?:,?\ Jr\.?,?|\ +[A-Z][a-z][A-Za-z]*){2}\ +(?:Ave|Blvd|Bvd|Cir|Ct|Dr|Ln|Pkwy|Pl|Plz|Pt|Pts|Rd|Rte|Sq|Sqs|St|Sts|Ter|Terr|Trl|Wy)\.
					|
					(?:,?\ Jr\.?,?|\ +[A-Z][a-z][A-Za-z]*){3}\ +(?:Ave|Blvd|Bvd|Cir|Ct|Dr|Ln|Pkwy|Pl|Plz|Pt|Pts|Rd|Rte|Sq|Sqs|St|Sts|Ter|Terr|Trl|Wy)\.
					|
					(?:,?\ Jr\.?,?|\ +[A-Z][a-z][A-Za-z]*){4}\ +(?:Ave|Blvd|Bvd|Cir|Ct|Dr|Ln|Pkwy|Pl|Plz|Pt|Pts|Rd|Rte|Sq|Sqs|St|Sts|Ter|Terr|Trl|Wy)\.
					|
					(?:,?\ Jr\.?,?|\ +[A-Z][a-z][A-Za-z]*){5}\ +(?:Ave|Blvd|Bvd|Cir|Ct|Dr|Ln|Pkwy|Pl|Plz|Pt|Pts|Rd|Rte|Sq|Sqs|St|Sts|Ter|Terr|Trl|Wy)\.
					|
					(?:,?\ Jr\.?,?|\ +[A-Z][a-z][A-Za-z]*){1,5}
				)?
				# OPTIONAL POST-DIR
				(?:
					# Standard post-dir format
					%(CAPTURE_START)s
						,?\s(?:N\.?E|S\.?E|N\.?W|S\.?W|N|S|E|W)\.?
					)
					# Avoid greedily capturing more letters, like
					# '123 Main St, New England' to '123 Main St, N'
					(?![A-Za-z])

					|

					# Or, a special-case for DC quadrants, to find stuff like:
					# "600 H Street in NE Washington"
					# "600 H Street in the NE quadrant"
					# "600 H Street in northeast DC"

					# Note that this is NOT captured, so that it's excluded from
					# the final output.
					,?
					\s in
					%(CAPTURE_START)s
						\s
					)
					(?:
						(?:the|far) \s
					)?

					%(CAPTURE_START)s
						(?:NE|SE|NW|SW|[Nn]ortheast|[Ss]outheast|[Nn]orthwest|[Ss]outhwest)
						(?=
							\s (?:quadrant|D\.?C\.?|Washington)
						)
					)
				)?
			)?
		)
	"""

    STREET_NAME_CAPTURE = STREET_NAME % {"CAPTURE_START": "("}

    STREET_NAME_NOCAPTURE = STREET_NAME % {"CAPTURE_START": "(?:"}

    ADDRESSES_RE = re.compile(
        r"""(?x)
		(?<!-|/|:|,|\.|\$) # These various characters are not allowed before an address/intersection.
		\b

		# Ignore things that look like dates -- e.g., "21 May 2009".
		# This is a problem e.g. in cases where there's a May Street.
		(?!
			\d+\s+
			(?:January|February|March|April|May|June|July|August|September|October|November|December)
			,?\s+
			\d\d\d\d
		)

		# Ignore intersections that are prefixed by "University of", like
		# "University of Texas at Austin". This is a common false positive.
		(?<!
			[Uu][Nn][Ii][Vv][Ee][Rr][Ss][Ii][Tt][Yy]\s[Oo][Ff]\s
		)

		(?:
			# SEGMENT ("FOO BETWEEN BAR AND BAZ")
			(?:
				%(STREET_NAME_CAPTURE)s (,?\ + between \ +) %(STREET_NAME_CAPTURE)s (,?\ + and \ +) %(STREET_NAME_CAPTURE)s
				|
				%(STREET_NAME_CAPTURE)s (,?\ + from \ +) %(STREET_NAME_CAPTURE)s (,?\ + to \ +) %(STREET_NAME_CAPTURE)s
			)

			|

			# BLOCK/ADDRESS
			(?:
				(
					(?:
						(?:\d+|[Ff][Ii][Rr][Ss][Tt])[-\ ]
							(?:(?:[Nn][Oo][Rr][Tt][Hh]|[Ss][Oo][Uu][Tt][Hh]|[Ee][Aa][Ss][Tt]|[Ww][Ee][Ss][Tt])\ )?
						[Bb][Ll][Oo][Cc][Kk]\ [Oo][Ff]
						|
						\d+\ *-\ *\d+
						|
						\d+
					)
					\ +
				)
				%(STREET_NAME_CAPTURE)s

				# ignore the intersection in parenthesis so that it's not picked
				# up as a separate location. We do this by consuming the string
				# but *not* capturing it.
				(?:
					\ +
					\(?
					between
					\ +
					%(STREET_NAME_NOCAPTURE)s
					\ +
					and
					\ +
					%(STREET_NAME_NOCAPTURE)s
					\)?
				)?
			)

			|

			# INTERSECTION
			(?:
				# Common intersection prefixes. They're included here so that the
				# regex doesn't include them as part of the street name.
				(?:
					(?:
						[Nn]ear |
						[Aa]t |
						[Oo]n |
						[Tt]o |
						[Aa]round |
						[Ii]ntersection\ of |
						[Cc]orner\ of |
						[Aa]rea\ of |
						[Aa]reas?\ surrounding |
						vicinity\ of |
						ran\ down |
						running\ down |
						crossed
					)
					\ +
				)?
				\b
				(?:%(STREET_NAME_CAPTURE)s)
				(\ +)
				(
					(?:
						[Aa][Nn][Dd] |
						[Aa][Tt] |
						[Nn][Ee][Aa][Rr] |
						& |
						[Aa][Rr][Oo][Uu][Nn][Dd] |
						[Tt][Oo][Ww][Aa][Rr][Dd][Ss]? |
						[Oo][Ff][Ff] |
						(?:[Jj][Uu][Ss][Tt]\ )?(?:[Nn][Oo][Rr][Tt][Hh]|[Ss][Oo][Uu][Tt][Hh]|[Ee][Aa][Ss][Tt]|[Ww][Ee][Ss][Tt])\ [Oo][Ff] |
						(?:[Jj][Uu][Ss][Tt]\ )?[Pp][Aa][Ss][Tt]
					)
					\ +
				)
				(?:%(STREET_NAME_CAPTURE)s)
			)
		)

		# OPTIONAL CITY SUFFIX
		(?:
			(?:
				,?\s+in |
				,
			)
			\s+

			# CITY NAME
			(
				[A-Z][a-z][A-Za-z]*                   # One initial-capped word
				(?:
					,?\ Jr\.?,?
					|
					\ [A-Z][a-z][A-Za-z]*
					|
					-[A-Za-z]+                        # Hyphenated words (e.g. "Croton-on-Hudson" in NY)
				){0,4}  # Initial-capped words
			)
		)?
		"""
        % {"STREET_NAME_CAPTURE": STREET_NAME_CAPTURE, "STREET_NAME_NOCAPTURE": STREET_NAME_NOCAPTURE}
    )

    addresses = parse_addresses(txt, ADDRESSES_RE)

    if addresses is None:
        print "COULD NOT EXTRACT ADDRESSES."
        print "\n\n************** %s [ERROR] ******************\n" % task_tag
        task.fail()
        return

    asset_path = doc.addAsset(
        addresses,
        "addresses.json",
        as_literal=False,
        description="addresses output from Everyblock address extractor",
        tags=[ASSET_TAGS["ADDRESSES_NLP"], ASSET_TAGS["CP_ENTITIES"]],
    )

    if asset_path is None or not doc.addFile(asset_path, None, sync=True):
        print "COULD NOT SAVE ASSET."
        print "\n\n************** %s [ERROR] ******************\n" % task_tag
        task.fail()
        return

    doc.addCompletedTask(task.task_path)
    task.routeNext()
    print "\n\n************** %s [END] ******************\n" % task_tag
    task.finish()
Example #7
0
def evaluateTextFile(task):
	task_tag = "EVALUATING TEXT FILE"
	print "\n\n************** %s [START] ******************\n" % task_tag
	print "evaluating text file at %s" % task.doc_id
	task.setStatus(302)
		
	from lib.Worker.Models.uv_document import UnveillanceDocument
	
	from conf import DEBUG
	from vars import ASSET_TAGS
	
	media = UnveillanceDocument(_id=task.doc_id)
	if media is None:
		print "DOC IS NONE"
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		task.fail()
		return
	
	if not media.queryFile(media.file_name):
		print "NO DOCUMENT CONTENT"
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		task.fail()
		return
	
	content = media.loadFile(media.file_name)
	if content is None:
		print "NO DOCUMENT CONTENT"
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		task.fail()
		return
	
	from lib.Core.Utils.funcs import b64decode
	un_b64 = b64decode(content)
	
	# We have removed base 64-ing from the log files...
	if un_b64 is None:
		un_b64 = content

	if un_b64 is not None:
		from lib.Worker.Utils.funcs import getFileType
		from vars import MIME_TYPES, MIME_TYPE_MAP
		
		un_b64_mime_type = getFileType(un_b64, as_buffer=True)
		if DEBUG:
			print "MIME TYPE: %s" % un_b64_mime_type
		
		if un_b64_mime_type not in [MIME_TYPES['pgp'], MIME_TYPES['wildcard']]:
			err_msg = "MIME TYPE NOT USABLE"
			print err_msg
			print "\n\n************** %s [ERROR] ******************\n" % task_tag
			task.fail(status=412, message=err_msg)
			return
		
		media.addAsset(un_b64, "%s.pgp" % media.file_name, description="un-b64'ed pgp asset")
		media.addCompletedTask(task.task_path)

		message_sentinel = "-----BEGIN PGP MESSAGE-----"
		if un_b64[0:len(message_sentinel)] == message_sentinel:
			task.put_next("PGP.decrypt.decrypt")
			task.routeNext(inflate={
				'pgp_file' : ".data/%s/%s.pgp" % (media._id, media.file_name)
			})

	task.finish()
	print "\n\n************** %s [END] ******************\n" % task_tag
Example #8
0
def generatePageMap(uv_task):
	task_tag = "PAGE MAPPER"
	print "\n\n************** %s [START] ******************\n" % task_tag
	print "MAPPING PAGES FROM TEXT DOCUMENT at %s" % uv_task.doc_id
	uv_task.setStatus(302)
	
	from lib.Worker.Models.uv_document import UnveillanceDocument

	from conf import DEBUG
	from vars import ASSET_TAGS

	doc = UnveillanceDocument(_id=uv_task.doc_id)
	if doc is None:
		print "DOC IS NONE"
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		uv_task.fail()
		return

	import os, json
	try:
		page_path = doc.getAssetsByTagName(ASSET_TAGS['TXT_JSON'])[0]['file_name']
		pages = json.loads(doc.loadFile(os.path.join(doc.base_path, page_path)))
	except Exception as e:
		if DEBUG: print e
	
	try:
		bow_path = doc.getAssetsByTagName(ASSET_TAGS['BOW'])[0]['file_name']
		bow = json.loads(doc.loadFile(os.path.join(doc.base_path, bow_path)))
	except Exception as e:
		if DEBUG: print e
	
	if pages is None or bow is None:
		print "NO PAGES OR BAG OF WORDS"
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		uv_task.fail()
		return
	
	# with unique words in bag that are not stopwords
	# for each page, word count of each	
	from numpy import intersect1d, setdiff1d
	from conf import getConfig
	
	if hasattr(uv_task, "stopwords"):
		stopwords = uv_task.stopwords
	else:	
		stopwords = os.path.join(getConfig('nlp_server.path'), "stopwords.json")
	
	try:
		with open(stopwords, 'rb') as S:
			if hasattr(uv_task, "stopwords_lang"):
				lang = uv_task.stopwords_lang
			else:
				lang = "english"
			
			stopwords = json.loads(S.read())[lang]
				
	except Exception as e:
		print "NO STOPWORDS...\n%s" % e
		print "\n\n************** %s [WARN] ******************\n" % task_tag
	
	page_map = []
	
	print "STOPWORDS: (len %d)\nTOP:\n%s\n" % (len(stopwords), stopwords[:10])
	print "BAG OF WORDS: (len %d)\nTOP:\n%s\n" % (len(bow), bow[:10])
	
	use_words = [w for w in setdiff1d(bow, stopwords).tolist() if len(w) > 1]	
	print "SIFTING BAG OF WORDS (old len: %d, new len: %d)" % (len(bow), len(use_words))
	
	global_info = {}

	for i, p in enumerate(pages):
		if p is None: continue
		
		page_bow = p.lower().split(" ")
		words = intersect1d(use_words, page_bow).tolist()
		if len(words) == 0: continue
		
		map = []
		frequency_max = 0

		for word in words:
			word_info = { 'word' : word, 'count' : page_bow.count(word) }
			
			map.append(word_info)
			if word_info['count'] > frequency_max: frequency_max = word_info['count']

			if word not in global_info.keys():
				global_info[word] = 0

			global_info[word] += word_info['count']
		
		page_map.append({ 'index' : i, 'map' : map, 'frequency_max' : frequency_max })
	
	if len(page_map) > 0:
		global_info['uv_page_map'] = page_map
		asset_path = doc.addAsset(global_info, "page_map.json", as_literal=False,
			description="word frequencies, page-by-page", tags=[ASSET_TAGS['PAGE_MAP']])
				
		if asset_path is None or not doc.addFile(asset_path, None, sync=True):
			print "COULD NOT SAVE ASSET."
			print "\n\n************** %s [ERROR] ******************\n" % task_tag
			uv_task.fail()
			return
	
	doc.addCompletedTask(uv_task.task_path)
	uv_task.routeNext()
	
	print "\n\n************** %s [END] ******************\n" % task_tag
	uv_task.finish()
def evaluate_JSON_media(uv_task):
	task_tag = "EVALUATE JSON MEDIA"

	print "\n\n************** %s [START] ******************\n" % task_tag
	uv_task.setStatus(302)

	import os
	from json import loads

	from lib.Worker.Models.uv_document import UnveillanceDocument
	from conf import ANNEX_DIR, DEBUG

	doc = UnveillanceDocument(_id=uv_task.doc_id)
	
	try:
		if DEBUG:
			print doc.emit()
	except Exception as e:
		print e

	content = None

	try:
		content = loads(doc.loadFile(doc.file_name))
	except Exception as e:
		error_msg = "could not load content at all: %s" % e

		print error_msg
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		
		uv_task.fail(message=error_msg)
		return

	# match keys to object
	mention_set = ["mimeType", "headers", "parts", "body", "filename"]

	if len(set(content.keys()).intersection(mention_set)) == len(content.keys()):
		from lib.Worker.Models.dl_FD_mention import FoxyDoxxingMention
		doc = FoxyDoxxingMention(inflate=doc.emit())
	else:
		error_msg = "document not really usable."

		print error_msg
		print "\n\n************** %s [ERROR] ******************\n" % task_tag
		
		uv_task.fail(message=error_msg)
		return

	doc.addCompletedTask(uv_task.task_path)

	from vars import MIME_TYPE_TASKS

	uv_task.task_queue = [uv_task.task_path]

	try:
		uv_task.task_queue.extend(MIME_TYPE_TASKS[doc.mime_type])
		uv_task.routeNext()
	except Exception as e:
		error_msg = "cannot get task queue for mime type %s: %s" % (doc.mime_type, e)

		print error_msg
		print "\n\n************** %s [WARN] ******************\n" % task_tag

	print "\n\n************** %s [END] ******************\n" % task_tag
	uv_task.finish()