def test_uploader(self): uploader = Uploader(self.files_list, 12, self.q) uploader.start() while uploader.is_active(): progress = self.q.get() print(progress.done, progress.error, progress.total) self.print_result(uploader.result)
def setUpClass(self): super(TestSampling_Event, self).setUpClass() sd = Uploader(self._config_file) sd.use_message_buffer = True json_data = json.loads('''{ "values": { "sample_oxford_id": { "column": 0, "type": "string" }, "study_id": { "column": 1, "type": "string" }, "latitude": { "column": 2, "type": "string" }, "longitude": { "column": 3, "type": "string" }, "location_name": { "column": 4, "type": "string" } } }''') sd.load_data_file(json_data, 'multiple_study.tsv') self._messages = sd.message_buffer
def action_crawler(): config_path = os.path.join(app.root_path, 'controller/ueditor/config.json') fp = open(config_path, 'r') config = json.load(fp) fp.close() config = { "pathFormat": config['catcherPathFormat'], "maxSize": config['catcherMaxSize'], "allowFiles": config['catcherAllowFiles'], "oriName": "remote.png" } fieldName = config['catcherFieldName'] list = [] source = request.form[fieldName] if request.form.has_key(fieldName) else request.args.get(fieldName, None) for imgUrl in source: from uploader import Uploader item = Uploader(imgUrl, config, "remote") info = item.getFileInfo() list.append({ "state": info["state"], "url": info["url"], "size": info["size"], "title": cgi.escape(info["title"]), "original": cgi.escape(info["original"]), "source": cgi.escape(imgUrl) }) return jsonify({ 'state': 'SUCCESS' if len(list) else 'ERROR', 'list': list })
def setUpClass(self): super(TestDate, self).setUpClass() sd = Uploader(self._config_file) sd.use_message_buffer = True json_data = json.loads('''{ "values": { "sample_oxford_id": { "column": 0, "type": "string" }, "study_id": { "column": 1, "type": "string" }, "doc": { "column": 2, "type": "datetime", "date_format": "%Y-%m-%d" }, "doc_accuracy": { "column": 3, "type": "string" } } }''') sd.load_data_file(json_data, 'dates.tsv') self._messages = sd.message_buffer
def __init__(self, port): ip = getCurrMachineIp() self.username, self.password = None, None self.socket = zhelper.newServerSocket(zmq.REP, ip, port) self.db = Db("TrackerDB") self.uploader = Uploader(self.socket, self.db) self.downloader = Downloader(self.socket, self.db) self.fileExplorer = FileExplorer(self.socket, self.db)
def __switch(self, xiinArgDict): """ Traffic director. """ from reader import Reader reader = Reader() # Write output if xiinArgDict.filename is not None: print('Starting xiin...') print('') with open(xiinArgDict.filename, 'w') as xiinArgDict.outputFile: reader.info(xiinArgDict) #Displays output. elif xiinArgDict.display: print('Starting xiin...') print('') reader.info(xiinArgDict) elif xiinArgDict.grep is not None: print('Starting xiin...') print('') print('Searching files...') print('') self.grepXiinInfo(xiinArgDict.grep) elif xiinArgDict.upload is not None: # xiin.ftp = {'source': '', 'destination': '', 'uname': '', 'password': ''} from uploader import Uploader xiinArgDict.ftpSource = None xiinArgDict.ftpDestination = None xiinArgDict.ftpUname = None xiinArgDict.ftpPwd = None if len(xiinArgDict.upload) > 0: xiinArgDict.ftpSource = xiinArgDict.upload[0] xiinArgDict.ftpDestination = xiinArgDict.upload[1] if len(xiinArgDict.upload) > 2: # Legacy support if xiinArgDict.ftpUname is 'anon' or xiinArgDict.ftpUname is 'anonymous': pass else: xiinArgDict.ftpUname = xiinArgDict.upload[2] xiinArgDict.ftpPwd = xiinArgDict.upload[3] print('Starting xiin uploader...') print('') print('Uploading debugging information...') print('') uploader = Uploader() uploader.upload(xiinArgDict.ftpSource, xiinArgDict.ftpDestination, xiinArgDict.ftpUname, xiinArgDict.ftpPwd) else: print('ERROR: Unknown') exit(7)
class DriDaemon(DaemonBase): """ Manages all functions that need to take place on the router on a regular basis """ def __init__(self, options): DaemonBase.__init__(self, options) self.kill_switch = False self.loops = 0 self.downloader = Downloader(options) self.uploader = Uploader(options) self.allowed_traffic = self.downloader.get_allowed_traffic() self.policy_mgr = PolicyMgr("/tmp/dnsmasq.log", self.allowed_traffic, self.options) self.policy_mgr.prep_system() self.policy_mgr.initial_load() self.policy_mgr.rotate_log() def main_loop(self): """ Runs forever. We're a daemon """ self.log('Starting dri...') while not self.kill_switch: start_time = time.time() while time.time() < (start_time + INTER_LOOP_SLEEP): try: has_more = self.policy_mgr.check_for_new_stuff() if not has_more: time.sleep(1) except (PolicyMgrException, CommandException): self.log('Help! Policy Manager') if MAX_LOOPS: self.loops += 1 if self.loops > MAX_LOOPS: sys.exit(0) try: self.downloader.get_addresses() allowed_traffic = self.downloader.get_allowed_traffic() self.policy_mgr.process_new_allowed(allowed_traffic) log_open_files("downloader") except (DownloadException, CommandException): self.log('Help! Downloading') try: self.uploader.upload_arp_table() log_open_files("uploader") except (UploadException, CommandException): self.log('Help! Uploading') #print "I LIVE" def terminate(self): self.kill_switch = True print "dying"
def start(self): product_reader = ProductReader(self.data_dir) downloader = Downloader(product_reader, self.data_dir) product_tiler = ProductTiler(product_reader, self.data_dir) uploader = Uploader(product_reader, self.data_dir) downloader.start() product_tiler.start() uploader.start()
def __switch(self, xiinArgDict): """ Traffic director. """ from reader import Reader reader = Reader() # Write output if xiinArgDict.filename is not None: print('Starting xiin...') print('') with open(xiinArgDict.filename, 'w') as xiinArgDict.outputFile: reader.info(xiinArgDict) #Displays output. elif xiinArgDict.display: print('Starting xiin...') print('') reader.info(xiinArgDict) elif xiinArgDict.grep is not None: print('Starting xiin...') print('') print('Searching files...') print('') self.grepXiinInfo(xiinArgDict.grep) elif xiinArgDict.upload is not None: # xiin.ftp = {'source': '', 'destination': '', 'uname': '', 'password': ''} from uploader import Uploader xiinArgDict.ftpSource = None xiinArgDict.ftpDestination = None xiinArgDict.ftpUname = None xiinArgDict.ftpPwd = None if len(xiinArgDict.upload ) > 0: xiinArgDict.ftpSource = xiinArgDict.upload[0] xiinArgDict.ftpDestination = xiinArgDict.upload[1] if len(xiinArgDict.upload ) > 2: # Legacy support if xiinArgDict.ftpUname is 'anon' or xiinArgDict.ftpUname is 'anonymous': pass else: xiinArgDict.ftpUname = xiinArgDict.upload[2] xiinArgDict.ftpPwd = xiinArgDict.upload[3] print('Starting xiin uploader...') print('') print('Uploading debugging information...') print('') uploader = Uploader() uploader.upload(xiinArgDict.ftpSource, xiinArgDict.ftpDestination, xiinArgDict.ftpUname, xiinArgDict.ftpPwd) else: print('ERROR: Unknown') exit(7)
def upload_resource_group_settings(context, deployment_name): settings_uploader = Uploader(context, key='{}/{}'.format( constant.RESOURCE_SETTINGS_FOLDER, deployment_name)) response = settings_uploader.upload_content( constant.DEPLOYMENT_RESOURCE_GROUP_SETTINGS, json.dumps(context.config.aggregate_settings, indent=4, sort_keys=True), 'Aggregate settings file from resource group settings files')
class Scanner: """ Scans an OSX desktop directory, the default storage for screenshots! """ def __init__(self, client, trayIcon): self.client = client self.trayIcon = trayIcon self.screenshot_path = '' self.loader = Uploader(self.client, self.trayIcon) self.scan_path = os.path.expanduser('~') + '/Desktop/' self.files_in_dir = self.dir_list() self.stop_event = threading.Event() self.regex = 'Screen\sShot\s(\d){4}-(\d){2}-(\d){1,2}\sat\s(\d){1,2}\.(\d){1,2}\.(\d){1,2}\s(PM|AM)\.(\w){3}' self.file_list_set_a = set(self.files_in_dir) self.file_list_set_b = set(self.files_in_dir) def dir_list(self): d_list = [f for f in os.listdir(self.scan_path) if f[0] != '.'] return d_list def _check_name(self, name): """ Returns bool on whether file name is in OSX screenshot regex pattern """ return re.match(self.regex, name) is not None def scan(self, stop_event): """ Create set containing new file; find difference in set, store name, update self.files_in_dir, check if name matches the OSX screenshot syntax, and finally Either store the screenshot path and continue to scan, or simply continue to scan. """ if stop_event.isSet(): return while len(self.dir_list()) <= len(self.files_in_dir) and not stop_event.isSet(): pass else: self.file_list_set_b = set(self.dir_list()) if len(self.file_list_set_a) == len(self.file_list_set_b): # Catch if folder was changed self.scan(self.stop_event) pass else: new_file = self.file_list_set_a ^ self.file_list_set_b new_file = next(iter(new_file)) self.files_in_dir = self.dir_list() if self._check_name(new_file): reg_object = re.search(self.regex, new_file) new_file = reg_object.group() self.screenshot_path = self.scan_path + new_file self.loader.upload(self.screenshot_path) self.scan(self.stop_event) else: self.scan(self.stop_event)
def run(): uploader = Uploader(config.TARGET_HOST, 22, config.TARGET_USERNAME, config.TARGET_PASSWORD, config.TARGET_DIR, config.NUM_UPLOAD_THREADS) logging.debug('started') while 1: files = [os.path.join(config.SRC_DIR, f) for f in os.listdir(config.SRC_DIR) if not f.startswith('_')] ret = uploader.upload(files) logger.info('found {} new files'.format(ret)) time.sleep(config.LISTDIR_INTERVAL)
def get_user_list(): response = Response() response.headers['Content-Type'] ='text/json' uploader = Uploader( app.config ) data = uploader.get_list() response.data = json.dumps(data) return response
def test_setup_two(self): uploader_2 = Uploader(self.files_2, self.number_of_processes_2, self.q_2) uploader_2.start() res = [f'Done: {file}' for file in self.files_2] for f in self.files_2: progress = self.q_2.get() self.assertIn(progress.done, res) self.assertEqual(progress.error, f'Errors: None')
def __init__(self, start=1600, connect='iut2-net3.iu.edu', metricName='org.osg.general.perfsonar-rabbitmq-simple', config=None, log=None): Uploader.__init__(self, start, connect, metricName, config, log) self.channel = ps_collector.get_rabbitmq_connection( config).createChannel() self.maxMQmessageSize = self.readConfigFile('mq-max-message-size')
def test_setup_one(self): uploader_1 = Uploader(self.files_1, self.number_of_processes_1, self.q_1) uploader_1.start() results = [f'Done: {file}' for file in self.files_1] for file in self.files_1: progress = self.q_1.get() self.assertIn(progress.done, results) self.assertEqual(progress.error, f'Errors: None')
def __init__(self): self.client = speech_v1.SpeechClient() self.uploader = Uploader() self.base_config = { "encoding": speech_v1.enums.RecognitionConfig.AudioEncoding.LINEAR16, "enable_word_time_offsets": True, "profanity_filter": False, "model": "default", "enable_automatic_punctuation": True }
def main(): """Run a main program of the KSU F****r.""" if len(sys.argv) < 2: help() return Constants.EXIT_SUCCESS cmd = sys.argv[1] #downloader = PageDownloader() # # Use it if you want to create an estimated student DB automatically. # #downloader.determine_studentID() # # Use it if you want to create an estimated student DB using your hand. # #estimated_students_db_manager = downloader.get_db_manager() #estimated_students_db_manager.register_studentIDs_ranging("g0846002", "g0847498") #entrance_year=2008 #estimated_students_db_manager.register_studentIDs_ranging("g0946010", "g0947622") #entrance_year=2009 #estimated_students_db_manager.register_studentIDs_ranging("g1044011", "g1045344") #entrance_year=2010 #estimated_students_db_manager.register_studentIDs_ranging("g1144010", "g1145505") #entrance_year=2011 #estimated_students_db_manager.label_traced_students_ranging("g1144010", "g1145505", datetime.date(2015,07,14)) #estimated_students_db_manager.register_studentIDs_ranging("g1244028", "g1245397") #entrance_year=2012 #estimated_students_db_manager.register_studentIDs_ranging("g1344018", "g1349031") #entrance_year=2013 #estimated_students_db_manager.register_studentIDs_ranging("g1444026", "g1445539") #entrance_year=2014 #estimated_students_db_manager.register_studentIDs_ranging("g1540074", "g1547932") #entrance_year=2015 # # Download all student data using an estimated student DB above. # if cmd == "download_all": downloader = PageDownloader() downloader.download_all() elif cmd == "upload_to_s3": u = Uploader() u.run("tmp") #u.run(Constants.CC_DOMAIN) #u.run(Constants.CSE_DOMAIN) # # Analyze and save downloaded HTMLs into "cse_student_DB.db". # elif cmd == "analyze_HTMLs": analyzer = StudentAnalyzer(Constants.STUDENT_TABLE_NAME) analyzer.analyze_HTMLs() #analyzer.analyze_images() elif cmd == "create_index_DB": analyzer = StudentAnalyzer(Constants.STUDENT_TABLE_NAME) analyzer.create_index_DB() else: help() return Constants.EXIT_SUCCESS
def upload(self): uploader = Uploader(log=self.console_out, progress=progress(self.progressText, self.progressbar)) filename = self.fileText.get() name = os.path.splitext(os.path.basename(filename))[0] uploader.upload(self.tokenText.get(), filename, name, self.uploadFinished, endpoint=self._endpoint)
def initiate_uploader(self, file_path, desc=None): f = open_file(file_path=file_path) with f: size_total = content_length(f) part_size = Uploader.calc_part_size(size_total) response = self.api.initiate_multipart_upload(self.id, part_size, desc=desc) upload_id = response['x-oas-multipart-upload-id'] response = self.api.describe_multipart(self.id, upload_id) return Uploader(self, response, file_path=file_path)
def setUpClass(self): super(TestLocation, self).setUpClass() sd = Uploader(self._config_file) sd.use_message_buffer = True json_data = json.loads('''{ "values": { "sample_oxford_id": { "column": 0, "type": "string" }, "study_id": { "column": 1, "type": "string" }, "latitude": { "column": 2, "type": "float" }, "longitude": { "column": 3, "type": "float" }, "location_name": { "column": 4, "type": "string" }, "country": { "column": 5, "type": "string" }, "proxy_latitude": { "column": 6, "type": "float" }, "proxy_longitude": { "column": 7, "type": "float" }, "proxy_location_name": { "column": 8, "type": "string" } } }''') sd.load_data_file(json_data, 'locations.tsv') self._messages = sd.message_buffer self.setUpSSR() sd = Uploader(self._config_file) # sd.use_message_buffer = True json_data = json.loads(self._ag_json) sd.load_data_file(json_data, 'loc_no_study.tsv')
def update_name(): response = Response() response.headers['Content-Type'] ='text/json' id = request.form.get('id', None) name = request.form.get('name', None) if (id is None or name is None or name == '' ): response.data = '{"status":"DataError"}' return response uploader = Uploader( app.config ) uploader.update_name(id, name) return '{"status":"OK"}'
def upload_photo(photo, auth): while True: try: f = open(photo, "rb") data = f.read() u = Uploader(photo, data, auth) u.setPublic() req = u.getRequest() res = execute(req) photo_id = u.getPhotoIdFromResponse(res) return photo_id except urllib2.HTTPError as e: pass
class Transcriber: def __init__(self): self.client = speech_v1.SpeechClient() self.uploader = Uploader() self.base_config = { "encoding": speech_v1.enums.RecognitionConfig.AudioEncoding.LINEAR16, "enable_word_time_offsets": True, "profanity_filter": False, "model": "default", "enable_automatic_punctuation": True } def extract_text(self, language: str, video_path: str): audio_filepath, channel_count, sample_rate = VideoAnalyzer.convert_to_audio( video_path) complete_config = self.base_config.copy() complete_config['sample_rate_hertz'] = int(sample_rate) complete_config['audio_channel_count'] = int(channel_count) complete_config['language_code'] = language upload_path = f'audio/audio_{random.randint(0,9999999)}.{audio_filepath.split(".")[-1]}' bucket_path = self.uploader.upload_blob(audio_filepath, upload_path) print('beginning long_running_recognize') recognition = self.client.long_running_recognize( complete_config, {'uri': bucket_path}) print(u'Waiting for recognition process to finish') response = recognition.result() print('completed recognition') return response
def recognize_image(): upl = Uploader(request.files, "file") upload_result = upl.upload() if upload_result is not True: return Response(upload_result, status=500) # Image recognition objectId = trainAndTest.trainOrTest(upl.uploaded_file) if objectId is "": return Response("Bild nicht erkannt", status=500) jp = JsonParser(os.path.join(os.getcwd(), "flaskapp", "data.JSON")) jp.parse() exh = jp.get_item_by_id(objectId) upl.delete_file() return exh
def upload_crawls(db_session): notifier = EmailNotifier(config.SMTP_USER, config.SMTP_PASS, config.SMTP_FROM, config.SMTP_HOST, config.SMTP_PORT) uploader = Uploader() crawls = db_session.query(Spider).join(Crawl).\ filter(Spider.enabled == True, Crawl.status.in_(['processing_finished', 'upload_errors'])) for spider in crawls.all(): if upload_required(spider): if spider.crawls[-1].products_count < 1: print 'Not uploading crawl with 0 products' continue print 'Uploading for', spider.name try: upload_changes(uploader, spider) spider.crawls[-1].status = 'upload_finished' spider.crawls[-1].uploaded_time = datetime.now() except Exception: spider.crawls[-1].status = 'upload_errors' db_session.add(spider.crawls[-1]) db_session.commit() try: _send_notification(notifier, spider.crawls[-1], spider) except EmailNotifierException, e: print "Failed to send notifications: %s" % e
def _init_uploader(self): return Uploader( region_name=self.hosting_region, endpoint_url=self.hosting_endpoint_url, access_key=self.hosting_access_key, secret=self.hosting_secret, )
def setUpClass(self): super(TestIndividual, self).setUpClass() sd = Uploader(self._config_file) sd.use_message_buffer = True json_data = json.loads('''{ "values": { "unique_id": { "column": 2, "type": "string" }, "unique_os_id": { "column": 2, "type": "string" }, "sample_oxford_id": { "column": 3, "type": "string" }, "sample_alternate_oxford_id": { "column": 4, "type": "string" }, "sample_source_id": { "column": 6, "type": "string" }, "donor_source_code": { "column": 7, "type": "string" }, "sample_source_type": { "column": 8, "type": "string" }, "species": { "column": 11, "type": "string" } } }''') sd.load_data_file(json_data, 'individual.tsv') self._messages = sd.message_buffer
class ClientHandler: def __init__(self, port): ip = getCurrMachineIp() self.username, self.password = None, None self.socket = zhelper.newServerSocket(zmq.REP, ip, port) self.db = Db("TrackerDB") self.uploader = Uploader(self.socket, self.db) self.downloader = Downloader(self.socket, self.db) self.fileExplorer = FileExplorer(self.socket, self.db) def handleRequest(self): """ syncrounous (blocking) function, waits until receiving an action, handle it and return """ clientRequestInfo = self.socket.recv_json() userToken = clientRequestInfo.get("token") if not self._authenticate(userToken): print("authentication error") return function = clientRequestInfo.get("function") if function == "ls": self.fileExplorer.explore(self.socket, self.username) elif function == "download": fileName = clientRequestInfo.get("fileName") self.uploader.upload(fileName, self.username) elif function == "upload": self.downloader.download() else: print("not correct function: ", function, " should be one of 'download, upload, ls'") self.socket.send_string("Either you don't send a function or it is not a valid function") def _authenticate(self, userToken): if not userToken: print("no user token is supplied: ", userToken) self.socket.send_string("you should provide a token") return False self.username, self.password = decodeToken(userToken) if not self.username: print("can't decode the token: ", userToken) self.socket.send_string("Invalid token") return False return True
def uploadfile(data, result): """ 异步执行的上传程序 :param data: get请求带来得参数 :param result: 根据参数从数据库查找出的数据(md5和url) :return: """ if result: connector_reday_for_upload(result) upload = Uploader(result) upload.run() else: filename = data.split('\\')[-1] path = os.path.join(WaitingUploadPath, filename) try: os.remove(path) print("已删除测试文件%s" % filename) except Exception as ex: print(ex)
def draw_graph(self, to_GCS=True, remove=True): """draw_graph func 購買決定した銘柄のヒストリカルデータをグラフ化 その他、付随情報も記載 Args: to_GCS(bool): GCSに送信するかどうか remove(bool): 作成した画像を削除するかどうか """ for cnt, code in enumerate(self.result_codes): logger.info("draw graph {}".format(code)) stock_data_df = self.get_stock_data(code) package_drawing = PackageDrawing(self, cnt, stock_data_df, code) draw_graph = DrawGraph(**package_drawing.__dict__) graph_image_path = draw_graph.draw() if self.debug: logger.info("debug mode なので、作成したグラフの保存、ラインへの通知を行いません") elif to_GCS: image_basename = os.path.basename(graph_image_path) uploader = Uploader(bucket_name="yoneda-stock-strategy") uploader.upload( local_path=graph_image_path, gcp_path="result/image/{}".format(image_basename), public=True) else: push_line(str(code), image_path=graph_image_path) if remove: logger.info("作成したグラフを削除します") draw_graph.remove() else: logger.info("作成したグラフを削除せず、残します: {}".format( draw_graph.save_path))
def upload(img_map): local_path = img_map['local_path'] # 上传 oss_path = 'wechat/' + local_path upload_res = Uploader.upload_file(oss_path, local_path) if upload_res: img_map['oss_path'] = oss_path os.remove(local_path) return img_map else: return False
def setUp(self): self.endpoint = '/api/v1/imports' self.headers = {'Content-Type': 'application/json', 'Accept': 'application/json', } self.nbslug = 'dummy.nationbuilder.com' self.nbtoken = '1234567890' self.url = "https://" + self.nbslug + self.endpoint + '?access_token=' + self.nbtoken self.data = {'import': { 'file': None, # voter fails (Julian 27-nov-2014) member fails Julian 27-nov-2014 'type': 'people', 'is_overwritable': True, }} self.data_json = json.dumps(self.data) self.csv = b'a,b,c,d,e,f\n0,1,2,3,4,5\n6,7,8,9,10,11\n' self.file_b64 = b64encode(self.csv) self.file_b64ascii = str(self.file_b64, encoding='ascii') self.data['import']['file'] = self.file_b64ascii self.data_json = json.dumps(self.data) self.filename = '/tmp/test_uploader.csv' self.err_filename = '/tmp/test_uploader_errors.csv' csv_str = 'a,b,c,d,e,f\n0,1,2,3,4,5\n6,7,8,9,10,11\n' with open(self.filename, 'w') as fh: fh.write(csv_str) self.uploader = Uploader(self.filename, self.err_filename) self.response_post = MagicMock() self.response_post.json = lambda: {"import":{"id":5}} self.response_get0 = MagicMock() self.response_get0.json = lambda: {"import":{"status":{"name":"working"}}} self.response_get1 = MagicMock() self.response_get1.json = lambda: {"import":{"status":{"name":"finished"}}} self.response_get2 = MagicMock() # Test get csv from results failure_csv self.csv = b'Col0,Col1,Col2,Col3,Col4,Col5\na,b,c,d,e,f\n0,1,2,3,4,5\n6,7,8,9,10,11\n' self.csv_b64 = b64encode(self.csv) self.csv_b64_ascii = str(self.csv_b64, encoding='ascii') result = {'result': {'failure_csv': self.csv_b64_ascii}} self.response_get2.text = json.dumps(result) self.failure_csv = 'self.failure_csv'
def main(port): #? TODO: when to use udp protocol trackerSocket = zhelper.newSocket(zmq.REQ, TRACKER_IP, TRACKER_PORTS_KEEPERS) downloadUploadSocket = zhelper.newServerSocket(zmq.REP, getCurrMachineIp(), port) downloader = Downloader(downloadUploadSocket, trackerSocket, port) uploader = Uploader(downloadUploadSocket, trackerSocket, port) while True: request = downloadUploadSocket.recv_json() print("request received:", request) username = authenticate(downloadUploadSocket, request.get("token")) downloadUploadSocket.send_string("ACK") if not username: continue del request["token"] request["username"] = username if request.get("function") == "download": uploader.upload(request) elif request.get("function") == "upload": downloader.download(request)
def initiate_uploader(self, file_path, desc=None): f = open_file(file_path=file_path) with f: size_total = content_length(f) part_size = Uploader.calc_part_size(size_total) response = self.api.initiate_multipart_upload( self.id, part_size, desc=desc) upload_id = response['x-oas-multipart-upload-id'] response = self.api.describe_multipart(self.id, upload_id) return Uploader(self, response, file_path=file_path)
def __init__(self, options): DaemonBase.__init__(self, options) self.kill_switch = False self.loops = 0 self.downloader = Downloader(options) self.uploader = Uploader(options) self.allowed_traffic = self.downloader.get_allowed_traffic() self.policy_mgr = PolicyMgr("/tmp/dnsmasq.log", self.allowed_traffic, self.options) self.policy_mgr.prep_system() self.policy_mgr.initial_load() self.policy_mgr.rotate_log()
def upload_delete(): sha1 = request.form["sha1"] uploader = Uploader(app.config) uploader.delte_from_db(sha1) uploader.setFile(sha1) uploader.delete() return 'Ok'
class UploaderTests(unittest.TestCase): def setUp(self): self.events = EventStream() self.uploader = Uploader(self.events) def test_can_fire_product_add_event(self): self.uploader.product_add(test_product) event = self.events.stream()[0] self.assertTrue(type(event['inserted_at']) is datetime) self.assertEqual(event['type'], PRODUCT_ADD) def test_can_read_product_csv(self): old_length = len(self.events.stream()) self.uploader.read_products('data/test_products.csv') new_length = len(self.events.stream()) self.assertEqual(new_length, old_length + 4) e = self.events.stream()[old_length + 1] self.assertEqual(e['type'], PRODUCT_ADD) self.assertEqual(e['body']['name'], 'testname') def test_fires_new_upload_event(self): old_length = len(self.events.stream()) self.uploader.read_products('data/test_products.csv') self.assertEqual(self.events.stream()[old_length]['type'], PRODUCT_UPLOAD)
def __init__(self, client, trayIcon): self.client = client self.trayIcon = trayIcon self.screenshot_path = '' self.loader = Uploader(self.client, self.trayIcon) self.scan_path = os.path.expanduser('~') + '/Desktop/' self.files_in_dir = self.dir_list() self.stop_event = threading.Event() self.regex = 'Screen\sShot\s(\d){4}-(\d){2}-(\d){1,2}\sat\s(\d){1,2}\.(\d){1,2}\.(\d){1,2}\s(PM|AM)\.(\w){3}' self.file_list_set_a = set(self.files_in_dir) self.file_list_set_b = set(self.files_in_dir)
def test_uploader_stop(self): uploader = Uploader(self.files_list, 12, self.q) uploader.start() dt = datetime.now() while uploader.is_active(): progress = self.q.get() print(progress.done, progress.error, progress.total) # check method to stop uploading and interrupt all uploading process. if (datetime.now() - dt).seconds >= 4: uploader.stop() self.print_result(uploader.result)
def __init__(self, number, direcotry, q=Queue(), fail_record={}): super(UploadingThread, self).__init__() # 记录自己的线程号 self.thread_number = number # 初始化线程停止控制 self.is_stop = False # 从user_info中读取记录 self.info_ini = QtCore.QSettings("userinfo.ini", QtCore.QSettings.IniFormat) app_id = str(self.info_ini.value("/UserInfo/appID").toString()) secret_id = str(self.info_ini.value("/UserInfo/secretID").toString()) secret_key = str(self.info_ini.value("/UserInfo/secretKey").toString()) bucket = str(self.info_ini.value("/UserInfo/bucket").toString()) # 初始化上传模块 self.up_agent = Uploader(app_id, secret_id, secret_key, bucket) # 初始化失败记录 self.recorder = RecordHandler("fail") # 设置上传队列 self.myq = q # 设置失败记录 self.fail_record = fail_record # 设置一个目录控制器 self.dir = QtCore.QDir(direcotry)
def run_tests(key, secret): try: x = Auth(key, secret) x.authenticate() except urllib2.HTTPError as e: print e.read() raise filename = "/Users/riyer/Desktop/Screen Shot 2013-06-28 at 7.36.02 PM.png" f = open(filename, "rb") pic = f.read() u = Uploader("test_pic", pic, x) u.addTitle("test pic") u.setPublic() req = u.getRequest() try: handle = urllib2.urlopen(req) res = handle.read() except urllib2.HTTPError as e: print e.read() raise photo_id = u.getPhotoIdFromResponse(res) p = Photosets(x) r = p.createGetListRequest() res = execute(r, "createGetListRequest") names = p.getPhotosetList(res) r = p.createNewSetRequest("test set", "test desc", '9404583236') res = execute(r, "createNewSetRequest") set_id = p.getPhotosetIdFromResult(res) r = p.createAddPhotoRequest(photo_id, set_id) execute(r, "createAddPhotoRequest") r = p.createPhotosetDeleteRequest(set_id) execute(r, "createPhotosetDeleteRequest") photos = Photos(x) r = photos.createDeletePhotoRequest(photo_id) execute(r, "createDeletePhotoRequest")
def main(): global indexer, uploader, sender, receiver, downloader setup_signals() logging.info("Asink client started at %s" % (time.strftime("%a, %d %b %Y %X GMT", time.gmtime()))) #create all threads which will be used to process events indexer = Indexer() uploader = Uploader() sender = Sender() receiver = Receiver() downloader = Downloader() #create and set up queues which are used to pass events between threads uploader_queue = Queue() indexer.uploader_queue = uploader_queue uploader.queue = uploader_queue #set on watcher when initialized sender_queue = Queue() uploader.sender_queue = sender_queue sender.queue = sender_queue downloader_queue = Queue() receiver.downloader_queue = downloader_queue downloader.queue = downloader_queue #setup storage provider storage = setup_storage() uploader.storage = storage.clone() downloader.storage = storage #start all threads watcher.start_watching(uploader_queue) indexer.start() uploader.start() sender.start() receiver.start() downloader.start() #sleep until signaled, which will call sig_handler while True: time.sleep(86400) #= 24 hours just for fun
def post(self): CONFIG = {} result = {} mimetype = 'application/json' action = self.get_argument("action", default=None) request = self.request with open(os.path.join(STATIC_PATH,"ueditor","php","config.json")) as fp: CONFIG = json.loads(re.sub(r'\/\*.*\*\/', '', fp.read())) if action == "config": result = CONFIG elif action in ('uploadimage', 'uploadfile', 'uploadvideo'): if action == 'uploadimage': fieldName = CONFIG.get('imageFieldName') config = { "pathFormat": CONFIG['imagePathFormat'], "maxSize": CONFIG['imageMaxSize'], "allowFiles": CONFIG['imageAllowFiles'] } elif action == 'uploadvideo': fieldName = CONFIG.get('videoFieldName') config = { "pathFormat": CONFIG['videoPathFormat'], "maxSize": CONFIG['videoMaxSize'], "allowFiles": CONFIG['videoAllowFiles'] } else: fieldName = CONFIG.get('fileFieldName') config = { "pathFormat": CONFIG['filePathFormat'], "maxSize": CONFIG['fileMaxSize'], "allowFiles": CONFIG['fileAllowFiles'] } if fieldName in request.files: field = request.files[fieldName][0] uploader = Uploader(WrapFileObj(field), config, UPLOAD_PATH) result = uploader.getFileInfo() else: result['state'] = u'上传接口出错' elif action in ('uploadscrawl'): # 涂鸦上传 fieldName = CONFIG.get('scrawlFieldName') config = { "pathFormat": CONFIG.get('scrawlPathFormat'), "maxSize": CONFIG.get('scrawlMaxSize'), "allowFiles": CONFIG.get('scrawlAllowFiles'), "oriName": "scrawl.png" } if fieldName in request.form: field = request.form[fieldName][0] uploader = Uploader(WrapFileObj(field), config, UPLOAD_PATH, 'base64') result = uploader.getFileInfo() else: result['state'] = u'上传接口出错' elif action in ('catchimage'): config = { "pathFormat": CONFIG['catcherPathFormat'], "maxSize": CONFIG['catcherMaxSize'], "allowFiles": CONFIG['catcherAllowFiles'], "oriName": "remote.png" } fieldName = CONFIG['catcherFieldName'] if fieldName in request.form: # 这里比较奇怪,远程抓图提交的表单名称不是这个 source = [] elif '%s[]' % fieldName in request.form: # 而是这个 source = request.form.getlist('%s[]' % fieldName) _list = [] for imgurl in source: uploader = Uploader(imgurl, config, UPLOAD_PATH, 'remote') info = uploader.getFileInfo() _list.append({ 'state': info['state'], 'url': info['url'], 'original': info['original'], 'source': imgurl, }) result['state'] = 'SUCCESS' if len(_list) > 0 else 'ERROR' result['list'] = _list else: result['state'] = u'请求地址出错' callback = self.get_argument('callback', None) if callback: if re.match(r'^[\w_]+$', callback): result = '%s(%s)' % (callback, result) mimetype = 'application/javascript' else: result = json.dumps({'state': u'callback参数不合法'}) self.set_header("Content-Type", "application/json") self.set_header("Access-Control-Allow-Origin", "*") self.set_header("Access-Control-Allow-Headers", "*") result = json.dumps(result) self.write(result)
fieldName = CONFIG.get('videoFieldName') config = { "pathFormat": CONFIG['videoPathFormat'], "maxSize": CONFIG['videoMaxSize'], "allowFiles": CONFIG['videoAllowFiles'] } else: fieldName = CONFIG.get('fileFieldName') config = { "pathFormat": CONFIG['filePathFormat'], "maxSize": CONFIG['fileMaxSize'], "allowFiles": CONFIG['fileAllowFiles'] } if fieldName in request.files: field = request.files[fieldName] uploader = Uploader(field, config, static_home) result = uploader.getFileInfo() else: result['state'] = '上传接口出错' elif action in ('uploadscrawl'): # 涂鸦上传 fieldName = CONFIG.get('scrawlFieldName') config = { "pathFormat": CONFIG.get('scrawlPathFormat'), "maxSize": CONFIG.get('scrawlMaxSize'), "allowFiles": CONFIG.get('scrawlAllowFiles'), "oriName": "scrawl.png" } if fieldName in request.form: field = request.form[fieldName] uploader = Uploader(field, config, static_home, 'base64')
def action_upload(): base64 = "upload" config_path = os.path.join(app.root_path, 'controller/ueditor/config.json') fp = open(config_path, 'r') config = json.load(fp) fp.close() action = request.args.get('action', None) if action == 'uploadimage': print '=====================' config.update({ "pathFormat": config['imagePathFormat'], "maxSize": config['imageMaxSize'], "allowFiles": config['imageAllowFiles'] }) fieldname = config['imageFieldName'] elif action == 'uploadscrawl': config = { "pathFormat": config['scrawlPathFormat'], "maxSize": config['scrawlMaxSize'], "allowFiles": config['scrawlAllowFiles'], "oriName": "scrawl.png" } fieldname = config['scrawlFieldName'] base64 = "base64" elif action == 'uploadvideo': config = { "pathFormat": config['videoPathFormat'], "maxSize": config['videoMaxSize'], "allowFiles": config['videoAllowFiles'] } fieldname = config['videoFieldName'] else: #elif action == 'uploadfile': config.update({ "pathFormat": config['filePathFormat'], "maxSize": config['fileMaxSize'], "allowFiles": config['fileAllowFiles'] }) print config fieldname = config['fileFieldName'] #print config,'===========' from uploader import Uploader up = Uploader(fieldname, config, base64) ''' /** * 得到上传文件所对应的各个参数,数组结构 * array( * "state" => "", //上传状态,上传成功时必须返回"SUCCESS" * "url" => "", //返回的地址 * "title" => "", //新文件名 * "original" => "", //原始文件名 * "type" => "" //文件类型 * "size" => "", //文件大小 * ) */ /* 返回数据 */''' return jsonify(up.getFileInfo())
class UploadingThread(QtCore.QThread): def __init__(self, number, direcotry, q=Queue(), fail_record={}): super(UploadingThread, self).__init__() # 记录自己的线程号 self.thread_number = number # 初始化线程停止控制 self.is_stop = False # 从user_info中读取记录 self.info_ini = QtCore.QSettings("userinfo.ini", QtCore.QSettings.IniFormat) app_id = str(self.info_ini.value("/UserInfo/appID").toString()) secret_id = str(self.info_ini.value("/UserInfo/secretID").toString()) secret_key = str(self.info_ini.value("/UserInfo/secretKey").toString()) bucket = str(self.info_ini.value("/UserInfo/bucket").toString()) # 初始化上传模块 self.up_agent = Uploader(app_id, secret_id, secret_key, bucket) # 初始化失败记录 self.recorder = RecordHandler("fail") # 设置上传队列 self.myq = q # 设置失败记录 self.fail_record = fail_record # 设置一个目录控制器 self.dir = QtCore.QDir(direcotry) def run(self): # 当传输队列里有货的的时候 while not self.myq.empty(): # 创建一个线程锁,防止多个线程同时处理一个资源 mutex = QtCore.QMutex() # 上锁 mutex.lock() print "There are %d item(s) left" % self.myq.qsize() if self.is_stop: break # 获取上传资源 item = self.myq.get() # 发出上传信息,View应该将状态改为uploading... self.emit(QtCore.SIGNAL("started(QString)"), item) # 打印上传信息 print("Thread%d is uploading " % self.thread_number + item) absolute_path = str(item) relative_path = self.dir.relativeFilePath(item) print "relative path is " + relative_path try: obj = self.up_agent.upload(absolute_path, relative_path) # self.sleep(2) # 发出完成信息,View应该将该单位消除 if obj['httpcode'] == 200: self.emit(QtCore.SIGNAL("uploaded(QString)"), item) # 传输失败,记录失败信息 else: self.fail_record[item] = obj['message'] self.emit(QtCore.SIGNAL("failed(QString)"), item) # 发射更改进度条信息 self.emit(QtCore.SIGNAL("progress_need_to_change(int)"), self.myq.qsize()) # 文件名错误处理 except TypeError: self.fail_record[item] = u"文件名错误" self.emit(QtCore.SIGNAL("failed(QString)"), item) # 解锁 mutex.unlock()
def main(args): """ Main function - launches the program. :param args: The Parser arguments :type args: Parser object :returns: List :example: >>> ["The latitude and longitude values must be valid numbers", 1] """ v = VerbosityMixin() if args: if args.subs == 'process': verbose = True if args.verbose else False force_unzip = True if args.force_unzip else False stored = process_image(args.path, args.bands, verbose, args.pansharpen, args.ndvi, force_unzip, args.ndvi1) if args.upload: u = Uploader(args.key, args.secret, args.region) u.run(args.bucket, get_file(stored), stored) return ["The output is stored at %s" % stored] elif args.subs == 'search': try: if args.start: args.start = reformat_date(parse(args.start)) if args.end: args.end = reformat_date(parse(args.end)) except (TypeError, ValueError): return ["You date format is incorrect. Please try again!", 1] s = Search() try: lat = float(args.lat) if args.lat else None lon = float(args.lon) if args.lon else None except ValueError: return ["The latitude and longitude values must be valid numbers", 1] result = s.search(paths_rows=args.pathrow, lat=lat, lon=lon, limit=args.limit, start_date=args.start, end_date=args.end, cloud_max=args.cloud) if result['status'] == 'SUCCESS': v.output('%s items were found' % result['total'], normal=True, arrow=True) if result['total'] > 100: return ['Over 100 results. Please narrow your search', 1] else: v.output(json.dumps(result, sort_keys=True, indent=4), normal=True, color='green') return ['Search completed!'] elif result['status'] == 'error': return [result['message'], 1] elif args.subs == 'download': d = Downloader(download_dir=args.dest) try: bands = convert_to_integer_list(args.bands) if args.pansharpen: bands.append(8) if args.ndvi: bands = [4, 5] downloaded = d.download(args.scenes, bands) if args.process: force_unzip = True if args.force_unzip else False for scene, src in downloaded.iteritems(): if args.dest: path = join(args.dest, scene) else: path = join(settings.DOWNLOAD_DIR, scene) # Keep using Google if the image is before 2015 if src == 'google': path = path + '.tar.bz' stored = process_image(path, args.bands, False, args.pansharpen, args.ndvi, force_unzip) if args.upload: try: u = Uploader(args.key, args.secret, args.region) except NoAuthHandlerFound: return ["Could not authenticate with AWS", 1] except URLError: return ["Connection timeout. Probably the region parameter is incorrect", 1] u.run(args.bucket, get_file(stored), stored) v.output("The output is stored at %s" % stored, normal=True, arrow=True) return ['Image Processing Completed', 0] else: return ['Download Completed', 0] except IncorrectSceneId: return ['The SceneID provided was incorrect', 1]
class Test(unittest.TestCase): def setUp(self): self.endpoint = '/api/v1/imports' self.headers = {'Content-Type': 'application/json', 'Accept': 'application/json', } self.nbslug = 'dummy.nationbuilder.com' self.nbtoken = '1234567890' self.url = "https://" + self.nbslug + self.endpoint + '?access_token=' + self.nbtoken self.data = {'import': { 'file': None, # voter fails (Julian 27-nov-2014) member fails Julian 27-nov-2014 'type': 'people', 'is_overwritable': True, }} self.data_json = json.dumps(self.data) self.csv = b'a,b,c,d,e,f\n0,1,2,3,4,5\n6,7,8,9,10,11\n' self.file_b64 = b64encode(self.csv) self.file_b64ascii = str(self.file_b64, encoding='ascii') self.data['import']['file'] = self.file_b64ascii self.data_json = json.dumps(self.data) self.filename = '/tmp/test_uploader.csv' self.err_filename = '/tmp/test_uploader_errors.csv' csv_str = 'a,b,c,d,e,f\n0,1,2,3,4,5\n6,7,8,9,10,11\n' with open(self.filename, 'w') as fh: fh.write(csv_str) self.uploader = Uploader(self.filename, self.err_filename) self.response_post = MagicMock() self.response_post.json = lambda: {"import":{"id":5}} self.response_get0 = MagicMock() self.response_get0.json = lambda: {"import":{"status":{"name":"working"}}} self.response_get1 = MagicMock() self.response_get1.json = lambda: {"import":{"status":{"name":"finished"}}} self.response_get2 = MagicMock() # Test get csv from results failure_csv self.csv = b'Col0,Col1,Col2,Col3,Col4,Col5\na,b,c,d,e,f\n0,1,2,3,4,5\n6,7,8,9,10,11\n' self.csv_b64 = b64encode(self.csv) self.csv_b64_ascii = str(self.csv_b64, encoding='ascii') result = {'result': {'failure_csv': self.csv_b64_ascii}} self.response_get2.text = json.dumps(result) self.failure_csv = 'self.failure_csv' def test_Uploader(self): self.assertIsInstance(self.uploader, Uploader) def test_Uploader_data(self): actual = self.uploader.data expected = self.data self.assertDictEqual(actual, expected) def test_Uploader_data_json(self): actual = self.uploader.data_json expected = self.data_json self.assertEqual(actual, expected) def test_csvread2base64(self): actual = self.uploader.csvread2base64ascii(self.filename) expected = self.file_b64ascii self.assertEqual(actual, expected) def test_base64_2csvfile(self): with open(self.err_filename, 'wb') as fh: pass self.uploader.err_filename = self.err_filename heading = '' self.uploader.base64_2csvfile(self.csv_b64_ascii, heading) with open(self.err_filename, 'br') as fh: actual = fh.read() expected = self.csv self.assertEqual(actual, expected) def test_err_filename(self): actual = self.uploader.get_err_filename(self.filename, self.err_filename) expected = self.err_filename self.assertEqual(actual, expected) def test_json_extractor(self): json = {"a":{"b":{"c":3}}} actual = self.uploader.json_extractor(json, ('a', 'b', 'c',)) expected = 3 self.assertEqual(actual, expected) def test_upload_status_get_finished(self): '''upload_status_get is a generator. Here we just pull the first value.''' return_value = self.response_get1 # Mocks requests = uploader.requests requests.get = MagicMock(return_value=return_value) # Call result = next(self.uploader.upload_status_get(5)) # Assert self.assertEqual(result, 'finished') url_status = self.uploader.url_join(self.nbslug, ('5',), self.nbtoken) requests.get.assert_called_with(url_status, headers=self.headers) def test_upload_status_get_working(self): '''upload_status_get is a generator. Here we just pull the first value.''' # Mocks requests = uploader.requests requests.get = MagicMock() # Return on consecetive calls: working, finished requests.get.side_effect = [self.response_get0, self.response_get1, ] # Call 1 result = next(self.uploader.upload_status_get(5)) # Assert self.assertEqual(result, 'working') # Call 2 result = next(self.uploader.upload_status_get(5)) # Assert self.assertEqual(result, 'finished') url_status = self.uploader.url_join(self.nbslug, ('5',), self.nbtoken) requests.get.assert_called_with(url_status, headers=self.headers) def test_upload(self): uploader.nbslug = self.nbslug uploader.nbtoken = self.nbtoken requests = uploader.requests requests.post = MagicMock(return_value=self.response_post) # Return on 3 consecquetive calls: working, finished, result requests.get = MagicMock() requests.get.side_effect = [self.response_get0, self.response_get1, self.response_get2, ] # url = self.uploader.url_join(self.nbslug, (), self.nbtoken) next(self.uploader.upload(url, period=1e-6)) requests.post.assert_called_once_with(url, headers=self.headers, data=self.data_json) # url_status = self.uploader.url_join(self.nbslug, ('5',), self.nbtoken) next(self.uploader.upload(url, period=1e-6)) requests.get.assert_called_with(url_status, headers=self.headers) def test_url_join(self): endpoint_parts = ('a', 'b', 'c',) actual = self.uploader.url_join(self.nbslug, endpoint_parts, self.nbtoken) expected = 'https://' + self.nbslug + self.endpoint + \ '/a/b/c' + '?access_token=' + self.nbtoken self.assertEqual(actual, expected)
def upload(): """UEditor文件上传接口 config 配置文件 result 返回结果 """ mimetype = "application/json" result = {} action = request.args.get("action") # 解析JSON格式的配置文件 with open(os.path.join(app.static_folder, "ueditor", "php", "config.json")) as fp: try: # 删除 `/**/` 之间的注释 CONFIG = json.loads(re.sub(r"\/\*.*\*\/", "", fp.read())) except: CONFIG = {} if action == "config": # 初始化时,返回配置文件给客户端 result = CONFIG elif action in ("uploadimage", "uploadfile", "uploadvideo"): # 图片、文件、视频上传 if action == "uploadimage": fieldName = CONFIG.get("imageFieldName") config = { "pathFormat": CONFIG["imagePathFormat"], "maxSize": CONFIG["imageMaxSize"], "allowFiles": CONFIG["imageAllowFiles"], } elif action == "uploadvideo": fieldName = CONFIG.get("videoFieldName") config = { "pathFormat": CONFIG["videoPathFormat"], "maxSize": CONFIG["videoMaxSize"], "allowFiles": CONFIG["videoAllowFiles"], } else: fieldName = CONFIG.get("fileFieldName") config = { "pathFormat": CONFIG["filePathFormat"], "maxSize": CONFIG["fileMaxSize"], "allowFiles": CONFIG["fileAllowFiles"], } if fieldName in request.files: field = request.files[fieldName] uploader = Uploader(field, config, editor.static_folder) result = uploader.getFileInfo() else: result["state"] = "上传接口出错" elif action in ("uploadscrawl"): # 涂鸦上传 fieldName = CONFIG.get("scrawlFieldName") config = { "pathFormat": CONFIG.get("scrawlPathFormat"), "maxSize": CONFIG.get("scrawlMaxSize"), "allowFiles": CONFIG.get("scrawlAllowFiles"), "oriName": "scrawl.png", } if fieldName in request.form: field = request.form[fieldName] uploader = Uploader(field, config, editor.static_folder, "base64") result = uploader.getFileInfo() else: result["state"] = "上传接口出错" elif action in ("catchimage"): config = { "pathFormat": CONFIG["catcherPathFormat"], "maxSize": CONFIG["catcherMaxSize"], "allowFiles": CONFIG["catcherAllowFiles"], "oriName": "remote.png", } fieldName = CONFIG["catcherFieldName"] if fieldName in request.form: # 这里比较奇怪,远程抓图提交的表单名称不是这个 source = [] elif "%s[]" % fieldName in request.form: # 而是这个 source = request.form.getlist("%s[]" % fieldName) _list = [] for imgurl in source: uploader = Uploader(imgurl, config, editor.static_folder, "remote") info = uploader.getFileInfo() _list.append({"state": info["state"], "url": info["url"], "original": info["original"], "source": imgurl}) result["state"] = "SUCCESS" if len(_list) > 0 else "ERROR" result["list"] = _list else: result["state"] = "请求地址出错" result = json.dumps(result) if "callback" in request.args: callback = request.args.get("callback") if re.match(r"^[\w_]+$", callback): result = "%s(%s)" % (callback, result) mimetype = "application/javascript" else: result = json.dumps({"state": "callback参数不合法"}) res = make_response(result) res.mimetype = mimetype res.headers["Access-Control-Allow-Origin"] = "*" res.headers["Access-Control-Allow-Headers"] = "X-Requested-With,X_Requested_With" return res
def main(args): """ Main function - launches the program. :param args: The Parser arguments :type args: Parser object :returns: List :example: >>> ["The latitude and longitude values must be valid numbers", 1] """ v = VerbosityMixin() if args: if 'clip' in args: bounds = convert_to_float_list(args.clip) else: bounds = None if args.subs == 'process': verbose = True if args.verbose else False force_unzip = True if args.force_unzip else False stored = process_image(args.path, args.bands, verbose, args.pansharpen, args.ndvi, force_unzip, args.ndvigrey, bounds) if args.upload: u = Uploader(args.key, args.secret, args.region) u.run(args.bucket, get_file(stored), stored) return ["The output is stored at %s" % stored] elif args.subs == 'search': try: if args.start: args.start = reformat_date(parse(args.start)) if args.end: args.end = reformat_date(parse(args.end)) if args.latest > 0: args.limit = 25 end = datetime.now() start = end - relativedelta(days=+365) args.end = end.strftime("%Y-%m-%d") args.start = start.strftime("%Y-%m-%d") except (TypeError, ValueError): return ["Your date format is incorrect. Please try again!", 1] s = Search() try: lat = float(args.lat) if args.lat else None lon = float(args.lon) if args.lon else None except ValueError: return ["The latitude and longitude values must be valid numbers", 1] address = args.address if address and (lat and lon): return ["Cannot specify both address and latitude-longitude"] result = s.search(paths_rows=args.pathrow, lat=lat, lon=lon, address=address, limit=args.limit, start_date=args.start, end_date=args.end, cloud_max=args.cloud) if result['status'] == 'SUCCESS': if args.json: return json.dumps(result) if args.latest > 0: datelist = [] for i in range(0, result['total_returned']): datelist.append((result['results'][i]['date'], result['results'][i])) datelist.sort(key=lambda tup: tup[0], reverse=True) datelist = datelist[:args.latest] result['results'] = [] for i in range(0, len(datelist)): result['results'].append(datelist[i][1]) result['total_returned'] = len(datelist) else: v.output('%s items were found' % result['total'], normal=True, arrow=True) if result['total'] > 100: return ['Over 100 results. Please narrow your search', 1] else: v.output(json.dumps(result, sort_keys=True, indent=4), normal=True, color='green') return ['Search completed!'] elif result['status'] == 'error': return [result['message'], 1] elif args.subs == 'download': d = Downloader(download_dir=args.dest) try: bands = convert_to_integer_list(args.bands) if args.process: if args.pansharpen: bands.append(8) if args.ndvi or args.ndvigrey: bands = [4, 5] if not args.bands: bands = [4, 3, 2] downloaded = d.download(args.scenes, bands) if args.process: if not args.bands: args.bands = '432' force_unzip = True if args.force_unzip else False for scene, src in downloaded.iteritems(): if args.dest: path = join(args.dest, scene) else: path = join(settings.DOWNLOAD_DIR, scene) # Keep using Google if the image is before 2015 if src == 'google': path = path + '.tar.bz' stored = process_image(path, args.bands, False, args.pansharpen, args.ndvi, force_unzip, args.ndvigrey, bounds=bounds) if args.upload: try: u = Uploader(args.key, args.secret, args.region) except NoAuthHandlerFound: return ["Could not authenticate with AWS", 1] except URLError: return ["Connection timeout. Probably the region parameter is incorrect", 1] u.run(args.bucket, get_file(stored), stored) return ['The output is stored at %s' % stored, 0] else: return ['Download Completed', 0] except IncorrectSceneId: return ['The SceneID provided was incorrect', 1]
def upload(): """UEditor文件上传接口 config 配置文件 result 返回结果 """ mimetype = 'application/json' result = {} action = request.args.get('action') # 解析JSON格式的配置文件 with open(os.path.join(app.static_folder,'ueditor','php','config.json')) as fp: try: # 删除 `/**/` 之间的注释 CONFIG = json.loads(re.sub(r'\/\*.*\*\/', '', fp.read())) except: CONFIG = {} if action == 'config': # 初始化时,返回配置文件给客户端 result = CONFIG elif action in ('uploadimage', 'uploadfile', 'uploadvideo'): # 图片、文件、视频上传 if action == 'uploadimage': fieldName = CONFIG.get('imageFieldName') config = { "pathFormat": CONFIG['imagePathFormat'], "maxSize": CONFIG['imageMaxSize'], "allowFiles": CONFIG['imageAllowFiles'] } elif action == 'uploadvideo': fieldName = CONFIG.get('videoFieldName') config = { "pathFormat": CONFIG['videoPathFormat'], "maxSize": CONFIG['videoMaxSize'], "allowFiles": CONFIG['videoAllowFiles'] } else: fieldName = CONFIG.get('fileFieldName') config = { "pathFormat": CONFIG['filePathFormat'], "maxSize": CONFIG['fileMaxSize'], "allowFiles": CONFIG['fileAllowFiles'] } if fieldName in request.files: field = request.files[fieldName] uploader = Uploader(field, config, app.static_folder) result = uploader.getFileInfo() else: result['state'] = '上传接口出错' elif action in ('uploadscrawl'): # 涂鸦上传 fieldName = CONFIG.get('scrawlFieldName') config = { "pathFormat": CONFIG.get('scrawlPathFormat'), "maxSize": CONFIG.get('scrawlMaxSize'), "allowFiles": CONFIG.get('scrawlAllowFiles'), "oriName": "scrawl.png" } if fieldName in request.form: field = request.form[fieldName] uploader = Uploader(field, config, app.static_folder, 'base64') result = uploader.getFileInfo() else: result['state'] = '上传接口出错' elif action in ('catchimage'): config = { "pathFormat": CONFIG['catcherPathFormat'], "maxSize": CONFIG['catcherMaxSize'], "allowFiles": CONFIG['catcherAllowFiles'], "oriName": "remote.png" } fieldName = CONFIG['catcherFieldName'] if fieldName in request.form: # 这里比较奇怪,远程抓图提交的表单名称不是这个 source = [] elif '%s[]' % fieldName in request.form: # 而是这个 source = request.form.getlist('%s[]' % fieldName) _list = [] for imgurl in source: uploader = Uploader(imgurl, config, app.static_folder, 'remote') info = uploader.getFileInfo() _list.append({ 'state': info['state'], 'url': info['url'], 'original': info['original'], 'source': imgurl, }) result['state'] = 'SUCCESS' if len(_list) > 0 else 'ERROR' result['list'] = _list else: result['state'] = '请求地址出错' result = json.dumps(result) if 'callback' in request.args: callback = request.args.get('callback') if re.match(r'^[\w_]+$', callback): result = '%s(%s)' % (callback, result) mimetype = 'application/javascript' else: result = json.dumps({'state': 'callback参数不合法'}) res = make_response(result) res.mimetype = mimetype res.headers['Access-Control-Allow-Origin'] = '*' res.headers['Access-Control-Allow-Headers'] = 'X-Requested-With,X_Requested_With' return res
def upload_data(): file = request.files.get('myfile', None); uploader = Uploader( app.config ) response = Response() response.headers['Content-Type'] ='text/json' res = uploader.check_mime(file) if not res: response.data = '{"status":"BadType"}' return response res = uploader.check_extension(file.filename); if not uploader.sha1(file): response.data = '{"status":"Exist"}' return response uploader.moveFile(file) # if not uploader.check_file_type(): # uploader.delete() # response.data ='{"status":"BadType"}' # return response if not uploader.get_exif(): uploader.delete() response.data = '{"status":"BadExif"}' return response if not uploader.check_date(app.config['EXPIRE_DAYS']): uploader.delete() response.data = '{"status":"VeryOld"}' return response uploader.get_size() uploader.savedb() response.data = '{"status":"OK", "id": "'+ uploader.id +'" }' return response