def edit_project(): organization_email = get_jwt_identity() project_id = request.form.get('project_id') # check if already in database project = Project.query.filter_by(project_id=project_id).first() if project: if organization_email != project.organization_id: return jsonify("Project does not belong to this account") project.project_name = request.form.get('project_name') project.project_short_description = request.form.get('project_short_description') project.project_description = request.form.get('project_description') project.project_goal = request.form.get('project_goal') project.project_city = request.form.get('project_city') project.project_state = request.form.get('project_state') project.project_country = request.form.get('project_country') project.newspaper_id = request.form.get('newspaper_id') url = '' # print(request.files) # Setting up picture upload if 'project_picture_link' in request.files: # print("Does it come here") f = request.files['project_picture_link'] filename_split = secure_filename(f.filename).split('.') filename = filename_split[0] + str(project_id) + filename_split[1] s3.put_object(ACL='public-read', Bucket='newspark-charity-data', Key=filename, Body=f) project.project_picture_link = 'https://newspark-charity-data.s3.amazonaws.com/' + filename url = 'https://newspark-charity-data.s3.amazonaws.com/' + filename else: if 'project_picture_link' in request.form: project.project_picture_link = request.form.get('project_picture_link') url = request.form.get('project_picture_link') db.session.commit() # Remove the edited versions from the cache sql_query = '''select article_link from articles where project_id1={} or project_id2={} or project_id3={} or project_id4={} or project_id5={} or project_id6={};'''.format(project_id, project_id, project_id, project_id, project_id, project_id) conn = db.engine.connect().connection df = pd.read_sql(sql_query, conn) conn.close() unique_articles = list(df['article_link'].unique()) for i in unique_articles: if redis.exists(i): redis.delete(i) return jsonify("Success") else: return jsonify("Initiative Does Not Exist")
def upload_image(): if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: flash('No file part.', 'danger') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit an empty part without filename if file.filename == '': flash('No selected file,', 'danger') return redirect(request.url) if file and allowed_file(file.filename): # check if file exists if file.filename in [ filepath.split('/')[-1] for filepath in get_upload_images() ]: flash('Filename exists,', 'danger') return redirect(request.url) else: filename = secure_filename(file.filename) filepath = os.path.join('/tmp', filename) file.save(filepath) s3.put_object(ACL='public-read', Body=open(filepath, 'rb'), Bucket='georgeleeh-blog', Key='static/images/uploads/' + filename) flash('Image uploaded successfully.', 'success') return redirect(url_for('home')) return render_template('upload_image.html')
def get_donations_from_publisher_csv(): """ Returns the url to a CSV with the donation logs :param: JWT token for a owner :return: JSON object of url """ username = get_jwt_identity() exists = Owner.query.filter_by(username=username).first() if exists: sql_query = '''select donation_date_time, newspaper_article_title, newspaper_article_link, amount_donated, project_name, organization_name, donations.newspaper_id from donations, projects, organizations where donations.project_id=projects.project_id and donations.organization_id=organizations.email and donations.newspaper_id in (select publisher_id from Owning where owner_id='{}');'''.format(username) conn = db.engine.connect().connection donation_df = pd.read_sql(sql_query, conn) csv_buffer = StringIO() donation_df.to_csv(csv_buffer) filename = username + '_payment_logs.csv' s3.put_object(Bucket='newspark-charity-data', Key=filename, Body=csv_buffer.getvalue()) return jsonify({"url": s3.generate_presigned_url('get_object', Params={'Bucket': 'newspark-charity-data', 'Key': filename}, ExpiresIn=300)}) return jsonify({})
def add_charity_legal_record(): email = request.form.get('email') now = datetime.now().strftime("%Y-%m-%d, %H:%M") bucket = "newspark-legal" file_name = "charities/legal_records.json" legal_records = json.load(s3.get_object(Bucket=bucket, Key=file_name)['Body']) legal_records[email] = {'version': 1, 'date_time': now} s3.put_object(Bucket=bucket, Key=file_name, Body=json.dumps(legal_records)) return jsonify("Success")
def save_new_match(match, last_id): info = collect_new_info(match, last_id) BUCKET = 'aurate-sku' from app import s3 response = s3.get_object(Bucket=BUCKET, Key=f'easypost_reference_match') previous_data = pickle.loads(response['Body'].read()) previous_data['shipments'].update(info['shipments']) previous_data['last_id'] = info['last_id'] s3.put_object(Body=pickle.dumps(previous_data), Bucket=BUCKET, Key=f'easypost_reference_match')
def upload_file(filename, filedata): bin_data = b64decode(filedata) try: s3.put_object(ACL='public-read', Body=bin_data, Bucket=BUCKET_NAME, Key=filename) except Exception as e: raise ValueError(e) file_url = 'https://%s.s3.amazonaws.com/%s' % (BUCKET_NAME, filename) return file_url
def add_project(): organization_email = get_jwt_identity() project_name = request.form.get('project_name') project_short_description = request.form.get('project_short_description') project_description = request.form.get('project_description') project_goal = request.form.get('project_goal') project_city = request.form.get('project_city') project_state = request.form.get('project_state') project_country = request.form.get('project_country') newspaper_id = request.form.get('newspaper_id') project_raised = 0 # Setting up uploading the picture f = request.files['project_picture_link'] filename_split = secure_filename(f.filename).split('.') filename = filename_split[0] + str(project_name) + filename_split[1] s3.put_object(ACL='public-read', Bucket='newspark-charity-data', Key=filename, Body=f) project_picture_link = 'https://newspark-charity-data.s3.amazonaws.com/' + filename project = Project( organization_id=organization_email, project_name=project_name, project_short_description=project_short_description, project_description=project_description, project_picture_link=project_picture_link, project_goal=project_goal, project_city=project_city, project_state=project_state, project_country=project_country, project_raised=project_raised, newspaper_id=newspaper_id, removed=False ) db.session.add(project) db.session.commit() # send us an email organization = Organization.query.filter_by(email=organization_email).first() msg = Message() msg.subject = 'Added Project' msg.body = 'A new project has been added in newspark ' \ 'Below is a general overview of the organization:\n' \ 'Organization Email: {} Organization Name: {}\n'.format(organization.organization_name, organization_email) msg.recipients = ['*****@*****.**', '*****@*****.**', '*****@*****.**', \ '*****@*****.**', '*****@*****.**', '*****@*****.**'] msg.sender = "*****@*****.**" mail.send(msg) return jsonify("Success")
def get_articles_csv(): """ Returns the url to a CSV with the article logs :param: JWT token for a owner :return: JSON object of url if owner found, Account Not Found otherwise """ username = get_jwt_identity() exists = Owner.query.filter_by(username=username).first() if exists: sql_query = '''select donations.donation_id, donations.amount_donated, articles.article_link, articles.article_title, articles.widget_status, publisher_id from donations left join articles on donations.newspaper_article_link=articles.article_link where articles.publisher_id in (select publisher_id from Owning where owner_id='{}');'''.format(username) conn = db.engine.connect().connection df = pd.read_sql(sql_query, conn) # print(df.head()) all_articles = [] unique_articles = list(df['article_link'].unique()) for i in unique_articles: temp = df[df.article_link == i] dic = {} dic['publisher_name'] = temp['publisher_id'].iloc[0] dic['article_link'] = i dic['article_title'] = temp['article_title'].iloc[0] dic['widget_status'] = bool(temp['widget_status'].iloc[0]) dic['revenue'] = int(temp['amount_donated'].sum()) dic['amount_donations'] = len(temp) all_articles.append(dic) df_csv = pd.DataFrame(all_articles) csv_buffer = StringIO() df_csv.to_csv(csv_buffer) filename = username + '_article_logs.csv' s3.put_object(Bucket='newspark-charity-data', Key=filename, Body=csv_buffer.getvalue()) return jsonify({"url": s3.generate_presigned_url('get_object', Params={'Bucket': 'newspark-charity-data', 'Key': filename}, ExpiresIn=300)}) else: return jsonify('Account Not Found')
def get_payment_csv(): """ Returns the url to a CSV with the payment logs :param: JWT token for an organization :return: JSON object of url """ current_org = get_jwt_identity() organization = Organization.query.filter_by(email=current_org).first() query1 = Donation.query.filter_by(organization_id=current_org).all() serialize_query1 = [] for p in query1: log = p.serialize() project_id = log['project_id'] log['project_name'] = Project.query.filter_by(project_id=project_id).first().project_name del log['donation_id'] del log['project_id'] del log['organization_id'] log['donor_name'] = 'NOT AVAILABLE' log['donor_email'] = 'NOT AVAILABLE' log['publisher_name'] = log.pop('newspaper_id') log['article_link'] = log.pop('newspaper_article_link') serialize_query1.append(log) donation_df = pd.DataFrame(serialize_query1) # print(donation_df.head()) # donation_df = donation_df.sort_values(by=['donation_date_time'], ascending=False) csv_buffer = StringIO() donation_df.to_csv(csv_buffer) filename = organization.organization_name + '_payment_logs.csv' s3.put_object(Bucket='newspark-charity-data', Key=filename, Body=csv_buffer.getvalue()) return jsonify({"url": s3.generate_presigned_url('get_object', Params={'Bucket': 'newspark-charity-data', 'Key': filename}, ExpiresIn=300)})
def clear_s3_buckets(clear_payment, clear_articles, clear_matching): if clear_payment: s3.put_object(Bucket='newspark-payment', Key='routers.json', Body=json.dumps({})) print("cleared payment") if clear_articles: s3.put_object(Bucket='newspark-matching-data', Key='articles.json', Body=json.dumps({})) print("cleared articles") if clear_matching: s3.put_object(Bucket='newspark-matching-data', Key='matching.json', Body=json.dumps({})) print("cleared matching") return None
def dump_inventory_positions(inventory): # save inventory to s3 from app import s3 s3.put_object(Body=pickle.dumps(inventory), Bucket=BUCKET, Key=f'ryby_inventory')
def dump_updated_sku(inventory): # save inventory to s3 from app import s3 s3.put_object(Body=pickle.dumps(inventory), Bucket=BUCKET, Key=f'ryby_updated_sky')