Example #1
0
def prepare_request_fields(results):
	if current_user.is_anonymous():
		return map(lambda r: {     
			  "id":           r.id, \
			  "text":         helpers.clean_text(r.text), \
			  "date_received": helpers.date(r.date_received or r.date_created), \
			  "department":   r.department_name(), \
			  "status":       r.status, \
			  # The following two attributes are defined as model methods,
			  # and not regular SQLAlchemy attributes.
			  "contact_name": r.point_person_name(), \
			  "solid_status": r.solid_status()
			   }, results)
	else:
		return map(lambda r: {     
			  "id":           r.id, \
			  "text":         helpers.clean_text(r.text), \
			  "date_received": helpers.date(r.date_received or r.date_created), \
			  "department":   r.department_name(), \
			  "requester":    r.requester_name(), \
			  "due_date":     format_date(r.due_date), \
			  "status":       r.status, \
			  # The following two attributes are defined as model methods,
			  # and not regular SQLAlchemy attributes.
			  "contact_name": r.point_person_name(), \
			  "solid_status": r.solid_status()
			   }, results)
Example #2
0
def prepare_request_fields(results):
    if current_user.is_anonymous():
        return map(lambda r: {
           "id":           r.id, \
           "text":         helpers.clean_text(r.text), \
           "date_received": helpers.date(r.date_received or r.date_created), \
           "department":   r.department_name(), \
           "status":       r.status, \
			  # The following two attributes are defined as model methods,
			  # and not regular SQLAlchemy attributes.


           "contact_name": r.point_person_name(), \
           "solid_status": r.solid_status()
            }, results)
    else:
        return map(lambda r: {
           "id":           r.id, \
           "text":         helpers.clean_text(r.text), \
           "date_received": helpers.date(r.date_received or r.date_created), \
           "department":   r.department_name(), \
           "requester":    r.requester_name(), \
           "due_date":     format_date(r.due_date), \
           "status":       r.status, \
			  # The following two attributes are defined as model methods,
			  # and not regular SQLAlchemy attributes.


           "contact_name": r.point_person_name(), \
           "solid_status": r.solid_status()
            }, results)
Example #3
0
def etl_job():
    data = json.load(open('/home/ubuntu/ti_etl/secrets.json'))
    logger = helpers.setup_logging()
    s3_client = boto3.client(
        's3',
        aws_access_key_id=data['aws_access_key_id'],
        aws_secret_access_key=data['aws_secret_access_key'])
    s3_resource = boto3.resource(
        's3',
        aws_access_key_id=data['aws_access_key_id'],
        aws_secret_access_key=data['aws_secret_access_key'])
    keys = []
    resp = s3_client.list_objects_v2(Bucket='dealer-churn-analysis')
    for obj in resp['Contents']:
        keys.append(obj['Key'])
    for key in keys:
        names = key.split("/")
        obj = s3_resource.Bucket('dealer-churn-analysis').Object(
            helpers.zip_file_name())
    file_name = 'praxis/etl/logs/log_{file}.txt'.format(file=helpers.date())
    obj_log = s3_resource.Bucket('dealer-churn-analysis').Object(file_name)
    buffer = io.BytesIO(obj.get()["Body"].read())
    zip_file = zipfile.ZipFile(buffer, 'r')
    logger.info("Name of csv in zip file :%s", zip_file.namelist())
    logs = ""
    dataframe = pd.DataFrame()
    for name_of_zipfile in zip_file.namelist():
        zip_open = pd.read_csv(zip_file.open(name_of_zipfile))
        dataframe['created_at'] = pd.Series([datetime.datetime.now()] *
                                            len(zip_open))
        dataframe['last_updated_at'] = pd.Series([datetime.datetime.now()] *
                                                 len(zip_open))
        zip_open = pd.concat([dataframe, zip_open], axis=1)
        zip_open = zip_open.dropna()
        table_name = "{name}_table".format(
            name=name_of_zipfile.replace('.csv', ''))
        #print (zip_open)
        try:
            zip_open.to_sql(name=name_of_zipfile.replace('.csv', ''),
                            con=database.db_connection(),
                            if_exists='append',
                            index=False)
        except SQLAlchemyError as sqlalchemy_error:
            print(sqlalchemy_error)
            logs = '\n{table_name}\n{error}\n{logs}'.format(
                logs=logs, error=sqlalchemy_error, table_name=table_name)
            logger.error(" %s", sqlalchemy_error)
        database.db_connection().execute('SET FOREIGN_KEY_CHECKS=1;')
    end_time = datetime.datetime.now()
    logger.info("End time of program : %s", end_time)
    logs = '{logs} \nstart_time : {start_time} \nend_time : {end_time}'.format(
        start_time=helpers.start_time(), logs=logs, end_time=end_time)
    print(logs)
    obj_log.put(Body=logs)
Example #4
0
def fetch_requests():
	"""
	Ultra-custom API endpoint for serving up requests.
	Supports limit, search, and page parameters and returns json with an object that
	has a list of results in the 'objects' field.
	"""
	user_id = get_user_id()
	# Initialize database query
	results = db.session.query(Request)

	# Filters!
	results = filter_department(department_name = request.args.get('department'), results = results)
	results = filter_search_term(search_input = request.args.get('search'), results = results)
	results = filter_status(request.args.get('is_closed'), results = results)
	# due soon should only be an option for open requests
	results = filter_due_soon(due_soon = request.args.get('due_soon'), results = results)
	# overdue should be mutually exclusive with due soon, and should only be an option for open requests
	results = filter_overdue(overdue = request.args.get('overdue'), results = results)

	# Filters for agency staff only:
	if user_id:
		results = filter_my_requests(my_requests = request.args.get('my_requests'), results = results, user_id = user_id)
		results = filter_requester_name(requester_name = request.args.get('requester_name'), results = results)

	# min_date_created = datetime.strptime('May 1 2014', '%b %d %Y')
	# max_date_created = datetime.strptime('May 20 2014', '%b %d %Y')
	min_date_created = None
	max_date_created = None
	if min_date_created and max_date_created:
		results = results.filter(Request.date_created >= min_date_created).filter(Request.date_created <= max_date_created)

	# min_due_date = datetime.strptime('May 15 2014', '%b %d %Y')
	# max__due_date = datetime.strptime('May 20 2014', '%b %d %Y')
	min_due_date = None
	max_due_date = None
	if min_due_date and max_due_date:
		results = results.filter(Request.due_date >= min_due_date).filter(Request.due_date <= max_due_date)

	# Sorting!
			
	sort_by = request.args.get('sort_by') 
	if sort_by and sort_by != '':
		ascending = request.args.get('ascending')
		app.logger.info("\n\nAscending? %s" % ascending)
		app.logger.info("\n\nSort by? %s" % sort_by)
		if ascending == "true":
			results = results.order_by((getattr(Request, sort_by)).asc())
		else:
			results = results.order_by((getattr(Request, sort_by)).desc())
	results = results.order_by(Request.id.desc())


	# Pagination!

	page_number  = request.args.get('page') or 1
	page_number = int(page_number)

	limit  = request.args.get('limit')  or 15
	offset = limit * (page_number - 1)


	# Execute query
	more_results = False
	num_results = results.count()
	start_index = 0
	end_index = 0

	if num_results != 0:
		start_index = (page_number - 1) * limit
		if start_index == 0:
			start_index = 1
		if num_results > (limit * page_number):
			more_results = True
			end_index = start_index + 14
		else:
			end_index = num_results

	results = results.limit(limit).offset(offset).all()

	# TODO([email protected]): This map is pretty kludgy, we should be detecting columns and auto
	# magically making them fields in the JSON objects we return.
	results = map(lambda r: { "id":           r.id, \
							  "text":         r.text, \
							  "date_created": helpers.date(r.date_received or r.date_created), \
							  "department":   r.department_name(), \
							  "requester":   r.requester_name(), \
							  "due_date":    format_date(r.due_date), \
							  # The following two attributes are defined as model methods,
							  # and not regular SQLAlchemy attributes.
							  "contact_name": r.point_person_name(), \
							  "solid_status": r.solid_status(), \
							  "status":       r.status
	   }, results)

	matches = {
		"objects": results,
		"num_results": num_results,
		"more_results": more_results,
		"start_index": start_index,
		"end_index": end_index
		}
	response = anyjson.serialize(matches)
	return Response(response, mimetype = "application/json")
Example #5
0
def fetch_requests():
	"""
	Ultra-custom API endpoint for serving up requests.
	Supports limit, search, and page parameters and returns json with an object that
	has a list of results in the 'objects' field.
	"""
	user_id = get_user_id()
	results = db.session.query(Request)

	# Filters!
	results = filter_department(department_name = request.args.get('department'), results = results)
	results = filter_search_term(search_input = request.args.get('search_term'), results = results)

	# Accumulate status filters
	status_filters = []

	if str(request.args.get('open')).lower() == 'true':
		status_filters.append(Request.open)

	if str(request.args.get('closed')).lower() == 'true':
		status_filters.append(Request.closed)

	date_format = '%m/%d/%Y'

	min_request_date = request.args.get('min_request_date')
	max_request_date = request.args.get('max_request_date')
	if min_request_date and max_request_date:
		min_request_date = datetime.strptime(min_request_date, date_format)
		max_request_date = datetime.strptime(max_request_date, date_format)
		results = results.filter(and_(Request.date_created >= min_request_date, Request.date_created <= max_request_date))
		app.logger.info('Request Date Bounding. Min: {0}, Max: {1}'.format(min_request_date, max_request_date))

	min_due_date = request.args.get('min_due_date')
	max_due_date = request.args.get('max_due_date')
	if min_due_date and max_due_date:
		min_due_date = datetime.strptime(min_due_date, date_format)
		max_due_date = datetime.strptime(max_due_date, date_format)
		results = results.filter(and_(Request.due_date >= min_due_date, Request.due_date <= max_due_date))
		app.logger.info('Due Date Bounding. Min: {0}, Max: {1}'.format(min_due_date, max_due_date))

	# Filters for agency staff only:
	if user_id:
		if str(request.args.get('due_soon')).lower() == 'true':
			status_filters.append(Request.due_soon)

		if str(request.args.get('overdue')).lower() == 'true':
			status_filters.append(Request.overdue)

		# Where am I the Point of Contact?
		if str(request.args.get('mine_as_poc')).lower() == 'true':
				results = results.filter(Request.id == Owner.request_id) \
								 .filter(Owner.user_id == user_id) \
								 .filter(Owner.is_point_person == True)

		# Where am I just a Helper?
		if str(request.args.get('mine_as_helper')).lower() == 'true':
				results = results.filter(Request.id == Owner.request_id) \
								 .filter(Owner.user_id == user_id) \
								 .filter(Owner.active == True)
		# Filter based on requester name
		requester_name = request.args.get('requester_name')
		if requester_name and requester_name != "":
			results = results.join(Subscriber, Request.subscribers).join(User).filter(func.lower(User.alias).like("%%%s%%" % requester_name.lower()))
			
	# Apply the set of status filters to the query.
	# Using 'or', they're non-exclusive!
	results = results.filter(or_(*status_filters))

	app.logger.info(status_filters)
	app.logger.info(str(results.statement.compile(dialect=postgresql.dialect())))

	sort_by = request.args.get('sort_column') 

	if sort_by and sort_by != '':
		ascending = request.args.get('sort_direction')
		app.logger.info("Sort Direction: %s" % ascending)
		app.logger.info("Sort Column: %s" % sort_by)
		if ascending == "asc":
			results = results.order_by((getattr(Request, sort_by)).asc())
		else:
			results = results.order_by((getattr(Request, sort_by)).desc())
	results = results.order_by(Request.id.desc())

	page_number = int(request.args.get('page_number') or 1)
	limit = int(request.args.get('limit') or 15)
	offset = limit * (page_number - 1)
	app.logger.info("Page Number: {0}, Limit: {1}, Offset: {2}".format(page_number, limit, offset))

	# Execute query
	more_results = False
	num_results = results.count()
	start_index = 0
	end_index = 0

	if num_results != 0:
		start_index = (page_number - 1) * limit
		if start_index == 0:
			start_index = 1
		if num_results > (limit * page_number):
			more_results = True
			end_index = start_index + 14
		else:
			end_index = num_results

	results = results.limit(limit).offset(offset).all()

	# TODO([email protected]): This map is pretty kludgy, we should be detecting columns and auto
	# magically making them fields in the JSON objects we return.
	results = map(lambda r: {     
		  "id":           r.id, \
		  "text":         helpers.clean_text(r.text), \
		  "date_created": helpers.date(r.date_received or r.date_created), \
		  "department":   r.department_name(), \
		  "requester":    r.requester_name(), \
		  "due_date":     format_date(r.due_date), \
		  "status":       r.status, \
		  # The following two attributes are defined as model methods,
		  # and not regular SQLAlchemy attributes.
		  "contact_name": r.point_person_name(), \
		  "solid_status": r.solid_status()
		   }, results)

	matches = {
		"objects": 		results,
		"num_results": 	num_results,
		"more_results": more_results,
		"start_index": 	start_index,
		"end_index": 	end_index
		}
	response = anyjson.serialize(matches)
	return Response(response, mimetype = "application/json")