def refresh_files(drive_service, folder_path, drive_folder_id): drive_folder = get_files_in_drive_folder(drive_service, drive_folder_id) os_folder = get_files_in_os_folder(folder_path) def find_file(file_name): for item in drive_folder: if file_name == item['name']: return item return None for item in os_folder: drive_file = find_file(item) if drive_file: md5_checksum = hashlib.md5( open(os.path.join(folder_path, item), 'rb').read()).hexdigest() if md5_checksum != drive_file.get('md5Checksum'): logging.info(f'Update {drive_file["name"]}') media = MediaFileUpload(os.path.join(folder_path, item), mimetype=drive_file.get('mimeType')) drive_service.files().update(fileId=drive_file.get('id'), media_body=media, fields='id').execute() else: upload_file(drive_service, os.path.join(folder_path, item), drive_folder_id) pass for item in drive_folder: if item['name'] not in os_folder: logging.info(f'Delete {item["name"]}') drive_service.files().delete(fileId=item['id']).execute()
def geocode_collissions_by_year(event, context): year = event['year'] data = {} data['table'] = get_data_for_year(year) event_filter_name = '' if event['filter']: if event['filter'] == 'killed': data['table'] = analysis.filter_table_func(data['table'], filters.killed) if event['filter'] == 'injured': data['table'] = analysis.filter_table_func(data['table'], filters.injured) event_filter_name = '{}_'.format(event['filter']) data['data'] = load_data.geocode(data['table']) upload.upload_file(data, 'data', 'accidents_{}{}_geocoded.csv'.format(event_filter_name,year)) body = { "message": "Go Serverless v1.0! Your function executed successfully!", "input": event } response = { "statusCode": 200, "body": json.dumps(body) } return response
def upload_done_file(params): print("Upload a DONE file to tell the backend that the sequence is all uploaded and ready to submit.") if not os.path.exists('DONE'): open("DONE", 'a').close() #upload upload_file("DONE", **params) #remove os.remove("DONE")
def pages_content_page(_id): """ """ if g.my_id is None: abort(401) if request.method == 'POST': page = g.db.pages.find_one({ '_id' : ObjectId(_id) }) len_of_label = len([x for x in request.form if x.startswith('label_it')]) / 2 if g.my['rank'] is 10: page['name'] = request.form['name'] page['file'] = request.form['name_file'] url_it = request.form['url_it'] url_en = request.form['url_en'] page['url'] = { 'it' : url_it, 'en' : url_en } page['input_label'] = [ int(request.form['input_label_'+str(i)]) for i in range(len_of_label)] title_it = request.form['title_it'] title_en = request.form['title_en'] description_it = request.form['description_it'] description_en = request.form['description_en'] page['title'] = { 'it' : title_it, 'en' : title_en } page['description'] = { 'it' : description_it, 'en' : description_en } page['content'] = { 'it' : [], 'en' : [] } for i in range(len_of_label): label = 'label_it_'+str(i) print label if 'label_it_name_0' in request.form: label = request.form['label_it_name_'+str(i)] alias = request.form['alias_it_name_'+str(i)] if page['input_label'][i] is 3: name_file = upload_file('it_'+str(i), 'page') page['content']['it'].append( { 'label' : label, 'alias' : alias, 'value' : name_file }) else: page['content']['it'].append( { 'label' : label, 'alias' : alias, 'value' : request.form['label_it_'+str(i)] }) for i in range(len_of_label): label = 'label_en_'+str(i) if 'label_en_name_0' in request.form: label = request.form['label_en_name_'+str(i)] alias = request.form['alias_en_name_'+str(i)] if page['input_label'][i] is 3: name_file = upload_file('en_'+str(i), 'page') page['content']['en'].append( { 'label' : label, 'alias' : alias, 'value' : name_file } ) else: page['content']['en'].append( { 'label' : label, 'alias' : alias, 'value' : request.form['label_en_'+str(i)] }) g.db.pages.update( { '_id' : ObjectId(_id) }, page) page_content = g.db.pages.find_one({ '_id' : ObjectId(_id) }) return render_template('admin/pages_content.html', page=page_content, _id=_id)
def upload_candidates(): if request.method == 'POST': #uploaded_file will save the file to local filesystem # and then load to mysql upload_file(request, session, mysql) flash('Candidates Added', 'success') return redirect(url_for('uploaded_file')) return render_template('upload_candidates.html')
def traffic_collissions(event, context): data = {} data = load_data.load_data(data) year_data = load_data.add_year_column(data) hour_data = load_data.add_full_hour_date(data) overall_hour_data = load_data.add_hour_column(data) current_year = datetime.date.today().year for year in range(current_year, 2016, -1): killed_year = 'killed_{}'.format(year) data[year] = analysis.data_for_year(data['table'], str(year)) data[killed_year] = analysis.filter_table_func(data[year], filters.killed) upload.upload_file(data, killed_year, 'accidents_killed_{}.csv'.format(year)) upload.upload_file(data, year, 'accidents_{}.csv'.format(year)) ''' charge_2017 = analysis.sum_counts_group(data[2017], 'charge_desc') charge_year = analysis.sum_counts_group(data['table'].group_by('year'), 'charge_desc') charge = analysis.sum_counts_group(data['table'], 'charge_desc') street_name_2017 = analysis.sum_counts_group(data[2017],'street_name') street_name_year = analysis.sum_counts_group(data['table'].group_by('year'),'street_name') street_name = analysis.sum_counts_group(data['table'],'street_name') groupped_data = analysis.year_sum_counts(year_data) year_police_beat_data = analysis.year_police_beat_sum_counts(year_data) full_hour_data = analysis.sum_counts_by_full_hour(hour_data) overall_hour_data_analysis = analysis.sum_counts_by_hour(overall_hour_data) upload.killed_injured_year_police_beat(year_police_beat_data) upload.accidents(year_data) upload.killed_injured_year(groupped_data) upload.full_hour(full_hour_data) upload.hour_data(overall_hour_data_analysis) upload.upload_table(charge_year, 'year_charge_desc.csv') upload.upload_table(charge_2017, 'charge_desc_2017.csv') upload.upload_table(charge, 'charge_desc.csv') upload.upload_table(street_name_year, 'year_street_name.csv') upload.upload_table(street_name_2017, 'street_name_2017.csv') upload.upload_table(street_name, 'street_name.csv') ''' body = { "message": "Go Serverless v1.0! Your function executed successfully!", "input": event } response = { "statusCode": 200, "body": json.dumps(body) } return response
def upload_to_dropbox( title ): """ upload zip file to dropbox """ filename = utf8( u'%s.zip' % title ) try: upload_file( filename, '998 temporary', filename, '*****@*****.**', '|+17=lx3Il8zMz9]' ) except mechanize.HTTPError: pass
def generate_video(): filename = now.strftime("%Y-%m-%d") cmd = "mencoder -nosound -ovc lavc -lavcopts vcodec=mpeg4:aspect=16/9:vbitrate=8000000 -vf scale=1920:1080 -o " + path + filename + ".avi -mf type=jpeg:fps=10 mf://" + path + "*.jpg" os.system(cmd) print ("generate done") logger.debug("Success!") upload.upload_file(path, filename + ".avi", "/" + camera_location + "/" + camera_name + "/Time-Lapse/Daily/", filename + ".avi", file_description="Daily Timelapse", new_file_revision=False, delete_after_upload=True, notify=True) logger.debug("Upload done")
def upload_done_file(params): print( "Upload a DONE file to tell the backend that the sequence is all uploaded and ready to submit." ) if not os.path.exists('DONE'): open("DONE", 'a').close() #upload upload_file("DONE", **params) #remove os.remove("DONE")
def main(): imap_url = "imap.gmail.com" current_date = datetime.datetime.today().strftime ('%d-%b-%Y') new_file = os.path.join(os.environ["FOLDER"], f"AZ_Cost_Report_{current_date}.xls") raw = connection_to_inbox(imap_url) get_attachement(raw, current_date) upload_file(new_file) services, subs, cost = process_raw_data(new_file) with DB() as db: db.insert_subscriptions(subs) db.insert_services(services) db.insert_cost(cost)
def test_upload_success(self): """ Test multiple upload """ video_path_1 = path_in_medialib('what_are_you_up_to?_0.4_en-US.mp4') video_path_2 = path_in_medialib('how_are_you_doing?_0.4_en-US.mp4') video1 = YoutubeVideo( title='What are you up to. Pronunciation and Intonation', file=video_path_1, ) video2 = YoutubeVideo( title='How are you doing. Pronunciation and Intonation', file=video_path_2, ) upload_file(video1) upload_file(video2)
def make_grayscale(): req_data = request.get_json() url = req_data['url'] # get the filename and extension from url file_name = url.rsplit('/', 1)[-1] # create temporary directory, image filename, and temp image dirpath = tempfile.mkdtemp() original_image = dirpath + '/' + file_name grayscale_image = dirpath + '/' + 'grayscale_' + file_name # download image to temp_image location urlretrieve(url, original_image) # we can use Pillow to grayscale the image # copied from: https://stackoverflow.com/questions/12201577/how-can-i-convert-an-rgb-image-into-grayscale-in-python img = Image.open(original_image).convert('L').save(grayscale_image) # uploaded processed image to Amazon S3 url = upload_file(grayscale_image) # remove the temp directory / files shutil.rmtree(dirpath) # return the uploaded image url back to client return jsonify({'grayscaledImage': url})
def make_gif(): req_data = request.get_json() urls = req_data['urls'] frameTime = req_data['gifFrameTime'] print(frameTime) # print("cat") dirpath = tempfile.mkdtemp() original_images = [] for url in urls: filename = url.split('/')[-1] original_image = dirpath + filename original_images.append(original_image) urlretrieve(url, original_image) gif_result = dirpath + '/gif_result.gif' size = (300, 300) img = Image.open(original_images[0]) img = img.resize(size) images = [] for filename in original_images: image = Image.open(filename) image = image.resize(size) images.append(image) img.save(fp=gif_result, format="GIF", append_images=images, save_all=True, duration=frameTime, loop=0) url = upload_file(gif_result) shutil.rmtree(dirpath) return jsonify({'gifResult': url})
def lambda_handler(event, context): logger.info('-=-=-=-=-=-= event -=-=-=-=-=-=') logger.info(event) logger.info('-=-=-=-=-=-= queryStringParameters -=-=-=-=-=-=') logger.info(event["queryStringParameters"]) ts = int(str(time.time()).replace('.', '')) url = event["queryStringParameters"]["url"] zoom = event["queryStringParameters"]["zoom"] or 4 outfile = '/tmp/{}.jpg'.format(ts) logger.info("url: " + url) logger.info("zoom: {}".format(zoom)) logger.info("outfile: " + outfile) coro = load_tiles(url, zoom, outfile) loop = asyncio.get_event_loop() loop.run_until_complete(coro) logger.info("Going to upload S3: ") downloadUrl = upload_file(outfile) logger.info("Uploaded to S3: " + downloadUrl) output = {'url': downloadUrl} return { "isBase64Encoded": False, "statusCode": 200, "headers": {}, "body": json.dumps(output) }
def upload_video(self): uid = self.name_label.text() if uid == self.Name_Label_Init: QMessageBox.warning(self, "Warning", "UID should not be empty") return uid = int(uid) title, suc = QInputDialog.getText(self, "Title", "Enter title of your video:") if not suc: return title = title.strip() if not title: QMessageBox.warning(self, "Warning", "Video Title should not be empty") return file, type = QFileDialog.getOpenFileName(self, "Choose Video", ".", "Videos (*.mp4 *.avi)") if not file: return try: code = upload_file(uid, title, file) except: QMessageBox.critical(self, "Error", "Upload failed") else: if code == 200: QMessageBox.information(self, "Success", "Upload success") self.update_video_list() else: QMessageBox.critical(self, "Error", f"Upload failed <{code}>")
def upload_file(): if request.files: file = request.files['uploaded'] if request.content_length < 30000000: resp = uf.upload_file(file) else: file_size = len(file.read()) file.stream.seek(0) resp = sj.upload_large_file(file, file_size) #TODO: clean up response - doesn't do any error handling return resp
def geocode_collissions(event, context): data = {} year = datetime.date.today().year data['table'] = load_data.load_year_killed_data(str(year)) data['data_current_year'] = load_data.geocode(data['table']) current_year_file = 'accidents_killed_{}_geocoded.csv'.format(year) upload.upload_file(data, 'data_current_year', current_year_file) body = { "message": "Go Serverless v1.0! Your function executed successfully!", "input": event } response = { "statusCode": 200, "body": json.dumps(body) } return response
def upload(): if request.method == 'GET': return render_template('solr_upload.html') elif request.method == 'POST': f = request.files['file'] filepath = os.path.join('tmp', secure_filename(f.filename)) f.save(filepath) msg = upload_file(util.solr_url, filepath) os.remove(filepath) return render_template('solr_upload.html', result=msg) return "ERROR. Contact Admin and try again later."
def handle(self): message = self.get_message() print(message) filename = str(message[-3]) + str(message[-2]) + str( message[-1]) + '.xlsx' print(filename) file = upload.get_file(filename) with open(filename, 'wb') as f: f.write(file) wb = load_workbook(filename) sheet = wb.get_sheet_by_name('Sheet1') # rows=sheet.max_row use = message[:-3] sheet.append(use) wb.save(filename) upload.upload_file(filename) apply = QMessageBox.question(self, '完成', '上传完成,是否退出?', QMessageBox.Yes | QMessageBox.No) if apply == QMessageBox.Yes: QCoreApplication.quit() else: pass
# mergepic = misc.imread('merge.png') diff = mergepic - oldpic threshold = 20 diff[abs(diff) < threshold] = 0 diff[255 - abs(diff) < threshold] = 0 # diff from -255~255, need to compress, mind the way of coding diff_coded = (diff + 255) / 2 misc.imsave(path_dictionary['diff_coded'], diff_coded) # need to handle the upload # FIXME upload.upload_file(path_dictionary['new'], "boardsnapshot", object_name=path_dictionary['new']) # handle new baord if_new_rst = detectIfNew(newpic, oldpic, basepic, canNew) if if_new_rst == 1: # need to add new board function current_board = (current_board + 1) % 5 board_path = "images/" + str(current_board) + "/" canNew = 0 ts = 0 elif if_new_rst == 2: # not mature, waiting to write enough content 1 elif if_new_rst == 0:
if_erasing = 0 if_new_rst = detectIfNew(back_ratio) elif back_ratio < temp_basebaorddiff: if if_erasing == 0: # just erasing return 3 to save current image as temp if_erasing = 1 print("[LOG]: just erasing", back_ratio) if_new_rst = 3 else: if_new_rst = detectIfNew(back_ratio) if if_new_rst == 1: # retrive the before erasing picture and upload as final upload.upload_file(path_dictionary['before_erasing'], "boardsnapshot", object_name=path_dictionary['new']) upload.upload_file(path_dictionary['old'], "boardsnapshot", object_name=path_dictionary['old'] ) # this is uploaded only for testing # need to add new board function current_board = (current_board + 1) % 5 board_path = "images/" + str(current_board) + "/" canNew = 0 ts = 0 else: # need to handle the upload upload.upload_file(path_dictionary['new'], "boardsnapshot",
import requests from tkinter import Tk from tkinter.filedialog import askopenfilename import json import pandas as pd import requests import time from upload import upload_file from get import get_file choice = input( " To Upload File -> Enter U \n To Download File -> Enter D:\n Your Input-> " ) if (choice == "U" or choice == "u") and (len(choice) == 1): upload_file() elif (choice == "D" or choice == "d") and (len(choice) == 1): get_file() else: print("Wrong Input") exit(1)
def pages_content_page(_id): """ """ if g.my_id is None: abort(401) if request.method == 'POST': page = g.db.pages.find_one({'_id': ObjectId(_id)}) len_of_label = len( [x for x in request.form if x.startswith('label_it')]) / 2 if g.my['rank'] is 10: page['name'] = request.form['name'] page['file'] = request.form['name_file'] url_it = request.form['url_it'] url_en = request.form['url_en'] page['url'] = {'it': url_it, 'en': url_en} page['input_label'] = [ int(request.form['input_label_' + str(i)]) for i in range(len_of_label) ] title_it = request.form['title_it'] title_en = request.form['title_en'] description_it = request.form['description_it'] description_en = request.form['description_en'] page['title'] = {'it': title_it, 'en': title_en} page['description'] = {'it': description_it, 'en': description_en} page['content'] = {'it': [], 'en': []} for i in range(len_of_label): label = 'label_it_' + str(i) print label if 'label_it_name_0' in request.form: label = request.form['label_it_name_' + str(i)] alias = request.form['alias_it_name_' + str(i)] if page['input_label'][i] is 3: name_file = upload_file('it_' + str(i), 'page') page['content']['it'].append({ 'label': label, 'alias': alias, 'value': name_file }) else: page['content']['it'].append({ 'label': label, 'alias': alias, 'value': request.form['label_it_' + str(i)] }) for i in range(len_of_label): label = 'label_en_' + str(i) if 'label_en_name_0' in request.form: label = request.form['label_en_name_' + str(i)] alias = request.form['alias_en_name_' + str(i)] if page['input_label'][i] is 3: name_file = upload_file('en_' + str(i), 'page') page['content']['en'].append({ 'label': label, 'alias': alias, 'value': name_file }) else: page['content']['en'].append({ 'label': label, 'alias': alias, 'value': request.form['label_en_' + str(i)] }) g.db.pages.update({'_id': ObjectId(_id)}, page) page_content = g.db.pages.find_one({'_id': ObjectId(_id)}) return render_template('admin/pages_content.html', page=page_content, _id=_id)
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2016 ciel <*****@*****.**> # # Distributed under terms of the MIT license. """ 得到图片的 URL """ from upload import upload_file import util import os import sys url = upload_file() if url: os.system("echo '%s' | pbcopy" % url) util.alert('上传图片成功,图片 URL 已复制到剪切板!') else: util.alrt('上传失败!') sys.exit(0)
def solar_noon(name): if daily_photo == True: logger.debug('Daily Photo Running') dom = minidom.parse(urllib2.urlopen(NOAA_URL)) datums = dom.getElementsByTagName("datum") #print datums[0].getElementsByTagName("valid")[0].childNodes[0].data LAKE_LEVEL = datums[0].getElementsByTagName("primary")[0].childNodes[0].data + " ft" utc = datetime.datetime.strptime(datums[0].getElementsByTagName("valid")[0].childNodes[0].data, '%Y-%m-%dT%H:%M:%S-00:00') OBSERVED = utc.strftime("%Y-%m-%d") f = urllib2.urlopen(WU_CONDITIONS_URL) json_string = f.read() parsed_json = json.loads(json_string) location = parsed_json['location']['city'] weather = parsed_json['current_observation']['weather'] temp_f = parsed_json['current_observation']['temp_f'] relative_humidity = parsed_json['current_observation']['relative_humidity'] wind_string = parsed_json['current_observation']['wind_string'] wind_dir = parsed_json['current_observation']['wind_dir'] wind_mph = parsed_json['current_observation']['wind_mph'] wind_gust_mph = parsed_json['current_observation']['wind_gust_mph'] f.close() f = urllib2.urlopen(WU_ASTRONOMY_URL) json_string = f.read() parsed_json = json.loads(json_string) percentIlluminated = parsed_json['moon_phase']['percentIlluminated'] f.close() logger.debug('Weather Data - city: %s; weather: %s; temp_f: %s; relative_humidity: %s; wind_string: %s; wind_dir: %s; wind_mph: %s; wind_gust_mph: %s; percentIlluminated: %s', location, weather, temp_f, relative_humidity, wind_string, wind_dir, wind_mph, wind_gust_mph, percentIlluminated) filename = noon.strftime("%Y-%m-%d-%H-%M-%S") logger.debug('Taking Photo') #os.system("raspistill -w 1920 -h 1080 -vf -hf -o /home/pi/RPiWebCam/SolarNoon/" + filename + ".jpg") res = cam.take(vflip=vflip, hflip=hflip, resolution_w=resolution_w, resolution_h=resolution_h, exposure_compensation=0, shutter_speed=shutter_speed, exposure_mode="auto", iso=100, file_name="/home/pi/" + application_folder + "/SolarNoon/" + filename + ".jpg") #x = subprocess.check_output(["raspistill -w 1920 -h 1080 -vf -hf -o /home/pi/RPiWebCam/SolarNoon/" + filename + ".jpg"]) #logger.debug('x: %s', x) logger.debug('Adding Overlay to Photo') os.system("convert /home/pi/RPiWebCam/SolarNoon/" + filename + ".jpg" + " -strokewidth 0 -fill 'rgba( 140, 140, 140, 0.8 )' -draw 'rectangle 30,1060 750,675'" + " -pointsize 36 -fill white -annotate +40+720 '" + noon.strftime("%m/%d/%Y") + "' -pointsize 36 -fill white -annotate +40+800 'Temperature:" + "' -pointsize 36 -fill white -annotate +275+800 '" + str(temp_f) + "' -pointsize 36 -fill white -annotate +40+840 'Wind: " + "' -pointsize 36 -fill white -annotate +275+840 'From the " + wind_dir + " at " + str(wind_mph) + " MPH" + "' -pointsize 36 -fill white -annotate +40+880 'Lake Level:" + "' -pointsize 36 -fill white -annotate +275+880 '" + LAKE_LEVEL + "' -pointsize 36 -fill white -annotate +40+920 'Sunrise:" + "' -pointsize 36 -fill white -annotate +275+920 '" + sunrise.strftime("%I:%M %p") + "' -pointsize 36 -fill white -annotate +40+960 'Solar Noon:" + "' -pointsize 36 -fill white -annotate +275+960 '" + noon.strftime("%I:%M %p") + "' -pointsize 36 -fill white -annotate +40+1000 'Sunset:" + "' -pointsize 36 -fill white -annotate +275+1000 '" + sunset.strftime("%I:%M %p") + "' -pointsize 36 -fill white -annotate +40+1040 'Moon:" + "' -pointsize 36 -fill white -annotate +275+1040 '" + str(percentIlluminated) + "%" + "' /home/pi/RPiWebCam/SolarNoon/" + filename + "-o.jpg") logger.debug('Uploading Photo') #os.system("/home/pi/RPiWebCam/daily-photo-upload.py") upload.upload_file("/home/pi/RPiWebCam/SolarNoon/", filename + ".jpg", "/" + camera_location + "/" + camera_name + "/Solar Noon/", filename + ".jpg", file_description="Solar Noon Image", new_file_revision=False, delete_after_upload=True, notify=False) upload.upload_file("/home/pi/RPiWebCam/SolarNoon/", filename + "-o.jpg", "/" + camera_location + "/" + camera_name + "/Solar Noon/Overlay", filename + "-o.jpg", file_description="Solar Noon Overlay Image", new_file_revision=False, delete_after_upload=True, notify=True) else: logger.info("Daily Photo disabled in config.txt.") logger.removeHandler(handler)
self.httpClient.request( "POST", "/cgi-bin/message/custom/send?access_token=%s" % access_token, self.params, headers={}) def response_method(self): self.response = self.httpClient.getresponse() print self.response.status print self.response.reason print self.response.read() class SendImage(SendText): def __init__(self, OPENID, Media_ID): self.params = '{"touser":"******","msgtype":"image","image":{"media_id":"%s"}}' % ( OPENID, Media_ID) if __name__ == '__main__': openidlist = getUserMsg.get_user_id() for OPENID in openidlist: # contex = raw_input('Input message ') # sendtext = SendText(OPENID,contex) # sendtext.send_method() # sendtext.response_method() Media_ID = upload.upload_file('panda.jpg') sendimage = SendImage(OPENID, Media_ID) sendimage.send_method() sendimage.response_method()
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2016 ciel <*****@*****.**> # # Distributed under terms of the MIT license. """ 得到图片的 Markdown """ from upload import upload_file import util import os import sys url = upload_file() if url: markdown_url = '![](%s)' % (url) os.system("echo '%s' | pbcopy" % markdown_url) util.alert('上传图片成功,图片 Markdown 已复制到剪切板!') else: util.alert('上传失败!') sys.exit(0)