def put(self, id, id_ap=None): #esse condicional serve apenas para salvar novas posições, nada a ser mexido # id_ap deveria ser posição, mas devido a um erro do flask de endereçamento, tive que manter esse nome if id_ap: try: item = ImagensAplicacoesModel.return_by_id(id) item.posicao = id_ap item.commit() except: return {'mensagem': 'Ocorreu um erro interno'}, 500 else: try: corpo = request.get_json(force=True) item = ImagensAplicacoesModel.return_by_id(id) imagem_old = item.imagem item.imagem = corpo['imagem'] item.commit() delete_from_s3('del-vetro', 'imagens/' + imagem_old) upload_to_s3(os.getcwd() + UPLOAD_FOLDER + '/' + filename, 'del-vetro', 'imagens/' + filename) os.remove(os.path.join(os.getcwd() + UPLOAD_FOLDER, imagem_old)) return { 'message': 'item alterado', }, 201 except: return {'mensagem': 'Ocorreu um erro interno'}, 500
def handle_video(filename): """Handle video.""" outputvideo = tempfile.NamedTemporaryFile(delete=False) thumbnail = tempfile.NamedTemporaryFile(delete=False) try: outputvideo.close() # generate mp4 container data path = os.path.join(settings.UPLOAD_DIR, filename) print(sh.ffmpeg('-y', '-i', path, '-c', 'copy', '-f', 'mp4', outputvideo.name)) # generate thumbnail # ffmpeg -ss 3 -i test.mp4 -vf "select=gt(scene\,0.4)" # -frames:v 5 -vsync vfr -vf fps=fps=1/600 out%02d.jpg print(sh.ffmpeg('-ss', '3', '-i', path, '-vf', '"select=gt(scene\,0.4)"', '-frames:v', '5', '-vsync', 'vfr', '-vf', 'fps=fps=1/600', '-y', '-f', 'mjpeg', thumbnail.name)) if 'h264' in filename: mp4_filename = filename.split('h264')[0] + 'mp4' else: mp4_filename = filename video_url = upload_to_s3(outputvideo.name, mp4_filename) thumbnail_url = upload_to_s3(thumbnail.name, mp4_filename + '.jpg') return (video_url, thumbnail_url) except: raise finally: os.remove(outputvideo.name) os.remove(thumbnail.name)
def sign(): """ Sign a document to blockchain network """ if request.method == 'GET': return render_template("sign.html") elif request.method == 'POST' and request.files['document']: file = request.files['document'] filename = secure_filename(file.filename) create_new_folder(app.config['UPLOAD_FOLDER']) path = os.path.join(app.config['UPLOAD_FOLDER'], filename) file.save(path) s3.upload_to_s3(path, request.form.get('doc_hash')) data = { 'author': request.form.get('author'), 'title': request.form.get('title'), 'description': request.form.get('description'), 'doc_hash': request.form.get('doc_hash'), 'signature': request.form.get('signature'), 's3_url': 'https://blockchain.s3.sapham.net/{}'.format( request.form.get('doc_hash')) } new_tx = utils.create_new_transaction(data) if new_tx: return render_template("sign.html", message=new_tx['message']) else: return render_template( "sign.html", message="Cannot create a document in blockchain")
def put(self, id, posicao=None): #esse condicional serve apenas para salvar novas posições, nada a ser mexido if posicao: try: item = ProdutosModel.return_by_id(id) item.posicao = posicao item.commit() except: return {'mensagem': 'Ocorreu um erro interno'}, 500 else: try: item = ProdutosModel.return_by_id(id) item.titulo = request.form['titulo'] item.texto = request.form['texto'] files = request.files.getlist('files') if len(files) != 0: if len(files) != 1: resp = jsonify({'mensagem' : 'É necessário enviar apenas uma imagem'}) resp.status_code = 400 return resp errors = {} caminho = "" for file in files: if file and allowed_file(file.filename): filename = secure_filename(str(datetime.now()).replace(".","_")) + ".jpg" caminho = os.path.join(os.getcwd() + UPLOAD_FOLDER, filename) while os.path.isfile(caminho): filename = secure_filename(str(datetime.now()).replace(".","_")) + ".jpg" caminho = os.path.join(os.getcwd() + UPLOAD_FOLDER, filename) file.save(caminho) else: errors[file.filename] = 'Tipo do arquivo não permitido' if errors: errors['mensagem'] = 'Algo de errado não está certo' resp = jsonify(errors) resp.status_code = 500 return resp imagem_old = item.imagem.split("/")[-1] item.imagem = os.path.join(UPLOAD_FOLDER, filename) item.commit() delete_from_s3('del-vetro', 'imagens/' + imagem_old) upload_to_s3(os.getcwd() + UPLOAD_FOLDER + '/' + filename, 'del-vetro', 'imagens/' + filename) os.remove(os.path.join(os.getcwd() + UPLOAD_FOLDER, imagem_old)) return { 'mensagem': 'item alterado', }, 201 except: print("deu ruim") return {'mensagem': 'Ocorreu um erro interno'}, 500
def get_animation(bot, update): print(update.message) telegram_file_id = update.message.animation.file_id file = bot.get_file(file_id=telegram_file_id) name = f'{str(uuid.uuid4())}.mp4' file.download(f'temp_media/{name}') telegram_id = update.message.chat.id user_id = find_user_id_by_telegram_id(telegram_id=telegram_id) save_img(user_id=user_id, img_name=name, media_type='animation', telegram_file_id=telegram_file_id) update.message.reply_text("upload completed") upload_to_s3(img_name=name) return ConversationHandler.END
def save_image_result(image_path, result): image_url = upload_to_s3(image_path) save_image_result_to_json( 'results/{}.json'.format(re_image_name.findall(image_path).pop()), { 'image_url': image_url, 'date': datetime.now().isoformat(), 'result': result, })
def post(self): titulo = request.form['titulo'] texto = request.form['texto'] posicao=9999 if 'files' not in request.files or not titulo or not texto: resp = jsonify({'mensagem' : 'É necessário enviar uma imagem, um título e um texto.'}) resp.status_code = 400 return resp files = request.files.getlist('files') if len(files) != 1: resp = jsonify({'mensagem' : 'É necessário enviar apenas uma imagem'}) resp.status_code = 400 return resp errors = {} caminho = "" for file in files: if file and allowed_file(file.filename): filename = secure_filename(str(datetime.now()).replace(".","_")) + ".jpg" caminho = os.path.join(os.getcwd() + UPLOAD_FOLDER, filename) while os.path.isfile(caminho): filename = secure_filename(str(datetime.now()).replace(".","_")) + ".jpg" caminho = os.path.join(os.getcwd() + UPLOAD_FOLDER, filename) file.save(caminho) else: errors[file.filename] = 'Tipo do arquivo não permitido' if errors: errors['mensagem'] = 'Algo de errado não está certo' resp = jsonify(errors) resp.status_code = 500 return resp item = ProdutosModel(titulo = titulo, texto = texto, imagem = os.path.join(UPLOAD_FOLDER, filename), posicao = posicao) try: item.save() upload_to_s3(os.getcwd() + UPLOAD_FOLDER + '/' + filename, 'del-vetro', "imagens/" + filename) return { 'mensagem': 'item criado', }, 201 except: return {'mensagem': 'Ocorreu um erro interno'}, 500
def test_aws_s3_file_and_dir_uploading(self, mock_boto3): ''' To test aws s3 files uploading works as expected''' mock_client = MagicMock() mock_client.list_buckets.return_value = self.list_buckets_response mock_s3 = MagicMock() mock_boto3.client.return_value = mock_client mock_boto3.resource.return_value = mock_s3 bucket_name = self.list_buckets_response['Buckets'][0]['Name'] local_source_path = '/tmp/abc.txt' remote_target_path ='dir/sub_dir' file_name = os.path.split(local_source_path)[-1] expected_target_path = os.path.join(remote_target_path, file_name) upload_to_s3(bucket_name, local_source_path, remote_target_path) # testing for file uploading calls mock_s3.meta.client.upload_file.assert_called_with( \ local_source_path, bucket_name, expected_target_path) # testing for directory uploading calls file1_name = 'abc.txt' file2_name = 'xyz.txt' file1_path = '{}/{}'.format(self.temp_dir, file1_name) file2_path = '{}/{}'.format(self.temp_dir, file2_name) os.mknod(file1_path) os.mknod(file2_path) expected_target_path1 = '{}/{}'.format(remote_target_path, file1_name) expected_target_path2 = '{}/{}'.format(remote_target_path, file2_name) upload_to_s3(bucket_name, self.temp_dir, remote_target_path, is_dir=True) expected_calls = [call(file2_path, bucket_name, expected_target_path2), \ call(file1_path, bucket_name, expected_target_path1)] mock_s3.meta.client.upload_file.assert_has_calls(\ expected_calls, any_order=True)
def run(): try: lat, lon = 24.6333, 46.7167 locations = api.location_search(lat=lat, lng=lon, distance=5000, count=5000) target_path = 'instagram/%sgrams' %(str(datetime.now())) t = get_lots_of_grams(locations) new_t = pickle.dumps(t, -1) upload = upload_to_s3( target_path, new_t) except: pass threading.Timer(300, run).start()
def run(): starting = 999999999999999999 while starting > 0: try: checkins = get_many_checkins(ll, total_time) timestr = time.strftime("%Y%m%d-%H%M%S") target_path = 'foursquare/%sfoursquare_trending.json' %(str(datetime.now())) upload = upload_to_s3( target_path, json.dumps(checkins)) # with open( 'four_square_trending\%sfour_trending.json' %(timestr), 'w' ) as f: # f.write(json.dumps(checkins)) starting += -1 except: pass
def run(): starting = 999999999999999999999 while starting > 0: try: latlong = [24.6333, 46.7167] #[22.280893, 114.173035] t = get_lots_of_tweets(latlong) target_path = 'twitter/%stweets.json' % (str(datetime.now())) #timestr = time.strftime("%Y%m%d-%H%M%S") # with open( 'twitter\%stweets.json' %(timestr), 'w' ) as f: # f.write( json.dumps(t)) # #threading.Timer(10, run).start() # output = cStringIO.StringIO() # output.write(json.dumps(t)) # print type(json.dumps(t)) # print output new_t = json.dumps(t) upload = upload_to_s3(target_path, new_t) # print upload starting += -1 except: pass
def run(): starting = 999999999999999999999 while starting > 0: try: latlong = [24.6333, 46.7167] #[22.280893, 114.173035] t = get_lots_of_tweets( latlong ) target_path = 'twitter/%stweets.json' %(str(datetime.now())) #timestr = time.strftime("%Y%m%d-%H%M%S") # with open( 'twitter\%stweets.json' %(timestr), 'w' ) as f: # f.write( json.dumps(t)) # #threading.Timer(10, run).start() # output = cStringIO.StringIO() # output.write(json.dumps(t)) # print type(json.dumps(t)) # print output new_t = json.dumps(t) upload = upload_to_s3( target_path, new_t) # print upload starting += -1 except: pass
def run(): t = get_lots_of_tweets([22.280893, 114.173035]) target_path = 'twitter/%stweets.json' % (str(datetime.now())) #with open( './data/%stweets.json' %(datetime.now()), 'w' ) as f: # f.write( json.dumps(t)) upload = upload_to_s3(target_path, json.dumps(t))
def __init__(self, access_key, secret_key, bucket): self.upload = lambda k,v: s3.upload_to_s3(access_key, secret_key, bucket, k, v, 'application/python-pickle', 'zlib')
def async_upload(**kwargs): sio = SocketIO(settings.SOCKETIO_SERVER, settings.SOCKETIO_PORT) project_id = kwargs['project_id'] project_name = kwargs['project_name'] camera_id = kwargs['camera_id'] deploymentLocationID = kwargs['deploymentLocationID'] filename = kwargs['filename'] path = kwargs['path'] room = kwargs['room'] duplicates = kwargs['duplicates'] with open(path) as file: mime = magic.from_file(path, mime=True) isvideo = True if 'image' in mime: isvideo = False if isvideo: video_url, thumbnail_url = handle_video(filename) tmp = dict(project_id=project_id, filename=filename, url=thumbnail_url, video_url=video_url, isvideo=True, camera_id=camera_id, ahash=None, content_type="video/mp4", deploymentLocationID=deploymentLocationID) task = create_task(pbclient, **tmp) final = dict(status='ok', exif=None, task=task.__dict__['data'], room=room) sio.emit('jobcompleted', final) return final else: try: # Get from Exif DateTimeOriginal exif_dict = piexif.load(path) exif_dict.pop('thumbnail') data_d = {} for ifd in exif_dict: data_d[ifd] = { piexif.TAGS[ifd][tag]["name"]: exif_dict[ifd][tag] for tag in exif_dict[ifd]} # Resize file to settings size thumbnail = Image.open(file) thumbnail.thumbnail(settings.THUMBNAIL) thumbnail.save(path) exif = 'removed' piexif.remove(path) Create_time = data_d['Exif']['DateTimeOriginal'] except InvalidImageDataError: exif = 'This image types does not support EXIF' Create_time = None except KeyError: exif = 'This image types does not support EXIF' Create_time = None image_exists, ahash, task = check_exists(path) if duplicates == 'No': image_exists = False if image_exists is False: data_url = upload_to_s3(path, filename) tmp = dict(project_id=project_id, filename=filename, url=data_url, video_url=None, isvideo=False, camera_id=camera_id, ahash=ahash, content_type=mime, Create_time=Create_time, deploymentLocationID=deploymentLocationID) task = create_task(pbclient, **tmp) final = dict(status='ok', exif=exif, task=task.__dict__['data'], room=room) sio.emit('jobcompleted', final) return final else: final = dict(status='ok', exif=exif, task=task, room=room) sio.emit('jobcompleted', final) return final
def run(): checkins = get_many_checkins(ll, total_time) target_path = 'foursquare/%sfoursquare_trending.json' % (str( datetime.now())) upload = upload_to_s3(target_path, json.dumps(checkins))