def setUp(self): super().setUp() self.user_id = 1 self.email = "[email protected]" self.password = "******" self.file_id = 1 self.filename = "test-file.csv" self.keywords = 1 self.new_user = User( id = self.user_id, email = self.email, password = self.password ) self.db_session.add(self.new_user) self.db_session.commit() self.new_file = File( user_id = self.user_id, id = self.file_id, filename = self.filename, keywords = self.keywords ) self.db_session.add(self.new_file) self.db_session.commit()
def post(self): upload = self.get_uploads()[0] # Path root = self.session.get('root') path = self.request.POST.get('path') if path != '': fpath = root + '/' + path else: fpath = root fname = upload.filename fsize = round(blobstore.BlobInfo(upload.key()).size / 1000, 2) if fsize < 1: fsize = 1 fdate = blobstore.BlobInfo(upload.key()).creation qry = File.query(File.path == fpath, File.name == fname) result = qry.fetch() if len(result) > 0: result[0].key.delete() blobstore.delete(result[0].blob_key) # Get file info from blobinfo file = File(name=fname, path=fpath, blob_key=upload.key(), size=str(fsize), cdate=str(fdate.strftime("%m/%d/%Y %H:%M:%S"))) file.put() # self.response.write(upload.filename) self.redirect('/?path=' + path)
def process_csv(): token_result = validate_jwt(request.headers.get('Authorization')) if token_result == 401: return "Unauthorized", 401 if request.method == 'GET': result = engine.execute( """ SELECT id, filename, keywords, created, (SELECT COUNT(*) >= f.keywords FROM data WHERE file_id=f.id) AS status FROM file f WHERE user_id=%s ORDER BY created DESC; """, [token_result["sub"]]) converted_result = [list(row) for row in result] if converted_result: return jsonify(converted_result), 200 return "Not Found", 404 elif request.method == 'POST': if not find_user(token_result['sub']): return "Unauthorized", 401 request_body = request.json new_file = File(user_id=token_result["sub"], filename=request_body["filename"], keywords=len(request_body["keywords"])) db_session.add(new_file) db_session.commit() for keyword in request_body["keywords"]: scrape_data_from_google.apply_async(args=[new_file.id, keyword]) return "Upload Completed", 200
def setUp(self): super().setUp() self.user_id = 1 self.email = "[email protected]" self.password = "******" self.file_id = 1 self.filename = "test-file.csv" self.keywords = 1 self.data_id = 1 self.keyword = "test-keyword" self.total_adword = 1 self.total_link = 1 self.total_search_result = "about 1,000" self.html_code = "test-html-code" self.new_user = User(id=self.user_id, email=self.email, password=self.password) self.db_session.add(self.new_user) self.db_session.commit() self.new_file = File(user_id=self.user_id, id=self.file_id, filename=self.filename, keywords=self.keywords) self.db_session.add(self.new_file) self.db_session.commit()
def get_file_info(self, request): file_path = request.get_url().decode() file_path = urllib.parse.unquote(file_path, encoding='utf-8', errors='replace') if len(file_path.split('../')) > 1: raise ForbidenError if file_path[-1:] == '/': file = self.files_root + file_path + "index.html" else: file = self.files_root + file_path if file.split('.')[-1:][0] in self.content_types: content = self.content_types[file.split('.')[-1]] else: content = '' if not os.path.isfile(file): if file_path[-1:] == '/' and file_path.count(".") < 1: raise ForbidenError else: raise NotFoundError return File(filename=file, file_path=file_path, content_type=content, content_length=os.path.getsize(file))
def test__model__file__serialize(self): file = File( uuid="file identifier", device="the device", filename="some_file.txt", content="hello world", parent_dir_uuid="some_other_identifier", is_directory=False, is_changeable=False, ) expected_result = { "uuid": "file identifier", "device": "the device", "filename": "some_file.txt", "content": "hello world", "parent_dir_uuid": "some_other_identifier", "is_directory": False, "is_changeable": False, } serialized = file.serialize self.assertEqual(expected_result, serialized) serialized["content"] = "hi!" self.assertEqual(expected_result, file.serialize)
def upload_file(current_user,curent_preference,filename,db): if File.query.filter_by(file_name=str( Constants.cloud_script_folder_path(current_user, curent_preference) + filename)).first() is None: file = File( file_name=str(Constants.cloud_script_folder_path(current_user, curent_preference) + filename)) db.session.add(file) db.session.commit() preference_file = Preference_file(file_id=file.id, preference_id=curent_preference.preference_id) db.session.add(preference_file) db.session.commit()
def get_file_info(self, request): file_path = self.check_last_slash(request.url) self.check_dots(file_path) file_path = self.try_decode(file_path) full_file_path = os.path.join(self._files_root, file_path) self._log.debug("full_file_path: " + full_file_path) content_type = self.get_content_type(file_path) return File(full_file_path, content_type)
def post(self): try: current_user = flask_praetorian.current_user() data = request.get_json() article_title = data["title"] article_url = data["fileUrl"] article_id = str(uuid4()) article_filename = secure_filename(f"{article_id}.pdf") article_path = f"/tmp/{article_filename}" pdf = weasyprint.HTML(article_url).write_pdf() open(article_path, "wb").write(pdf) artice_doc = fitz.open(article_path) preview_page = artice_doc.load_page(0) preview_pixmap = preview_page.get_pixmap() preview_filename = secure_filename(f"{article_id}.png") preview_path = f"/tmp/{preview_filename}" preview_image = open(preview_path, "w") artice_preview = preview_pixmap.writePNG(preview_path) storage_client.upload_file( Filename=article_path, Bucket=bucket_name, Key=article_filename, ) storage_client.upload_file(Filename=preview_path, Bucket=bucket_name, Key=preview_filename) article = File( title=article_title, user_id=current_user.id, file_url= f"https://ec500-news-analyzer.s3.us-east-2.amazonaws.com/{article_filename}", preview_url= f"https://ec500-news-analyzer.s3.us-east-2.amazonaws.com/{preview_filename}", ) db.session.add(article) db.session.commit() ret = {"message": "OK"} return ret, 200 except: ret = {"message": "Server Error"} return ret, 500 ret = {"message": "Error"} return ret, 400
def create_file(): file_dictionary = request.json try: cloud_response = upload(file_dictionary['content'], folder=f'/share-app-{os.getenv("FLASK_ENV")}') file = File(url=cloud_response['secure_url'], cloud_id=cloud_response['asset_id'], user_id=g.current_user.id) except ValidationError as e: return {'errors': e.messages, 'messages': 'Something went wrong'} file.save() return file_schema.jsonify(file), 200
def getTorrentsByCategory(self, category: str) -> List[Torrent]: torrents = [] for torrent in self._client.torrents_info(category=category, sort='added_on', reverse=True): # only get complete torrents if not torrent.state_enum.is_complete: continue files = [] for file in torrent.files: files.append(File(file['name'])) newTorrent = Torrent(torrent['hash'], torrent['name'], torrent['category'], torrent['save_path'], files) torrents.append(newTorrent) return torrents
def Upload_to_cloud(current_user, curent_preference, db, name_path): if os.path.exists("Buffer\\Preference_user_" + str(current_user.id) + "\\preference"): os.rename("Buffer\\Preference_user_" + str(current_user.id) + "\\preference", "Buffer\\Preference_user_" + str(current_user.id) + f"\\{curent_preference.name}") shutil.copytree(name_path, Constants.cloud_folder_path(current_user,curent_preference), dirs_exist_ok=True) all_files = Files.get_files_from_cloud(current_user, curent_preference) for path in all_files: if File.query.filter_by(file_name=path).first() is None: file = File(file_name=path) db.session.add(file) db.session.commit() preference_file = Preference_file(file_id=file.id, preference_id=curent_preference.preference_id) db.session.add(preference_file) print(f"NEW File {path}") db.session.commit() else: print(f"Update File {path}")
def create_data(session: Session): department1_id = uuid.uuid4().hex department2_id = uuid.uuid4().hex department1 = Department(id=department1_id, title='Биология и Общая генетика') department2 = Department(id=department2_id, title='Биотехнология') session.add(department1) session.add(department2) mypath = "C:/Users/Andrey/Pictures/PNG" onlyfiles = listdir(mypath) added_objects = [] for obj in onlyfiles: splited_obj = obj.split('_') if splited_obj[0] in added_objects: continue obj_id = uuid.uuid4().hex new_object = Object(id=obj_id, title=splited_obj[0], department_id=department1_id) session.add(new_object) for fl in onlyfiles: splited_fl = fl.split('_') zoom_fl = splited_fl[1].split('.') if fl.find(splited_obj[0]) != -1: fl_id = uuid.uuid4().hex new_file = File(id=fl_id, type='image', url=fl, zoom=zoom_fl[0], object_id=obj_id) session.add(new_file) added_objects.append(splited_obj[0]) session.commit() session.close()
def get_file_info_from_db(self, file_id): return File(file_id, '/root', random.randint(1, 1000) * 5, 'mp3', None)
def create_file(self, name: str) -> None: self.nodes[name] = File(name, self)
def post(self): for blob_info in self.get_uploads(): if not users.get_current_user(): #if they are not logged in then delete all the upload data and redirect them to login for blob in self.get_uploads(): blob.delete() self.redirect(users.create_login_url("/")) return #if uploading a csv file, update the datastore to match records in file if blob_info.content_type == "application/vnd.ms-excel": blobdata = blobstore.BlobReader(blob_info) reader = csv.DictReader(blobdata, dialect='excel') self.response.headers['Content-Type'] = 'text/plain' for row in reader: artist = row["Artist"] categories = row["Categories"] name = row["Name of Piece"] price = row["Price($)"] itemNumber = row["Item Number"] width = row["Width(in)"] height = row["Height(in)"] depth = row["Depth(in)"] weight = row["Weight"] width = row["Width(in)"] description = row["Product Description"] colors = row["Colors"] mediums = row["Mediums"] masterNum = row["Master Art Piece (Item Number)"] pictureName = row["Picture Name"] #check if artpiece with this item number already exists qry = ArtPiece.query(ArtPiece.itemNumber == itemNumber) existingArtPiece = qry.get() #if existingArtPiece: #if an artpiece with that itemNumber is already stored then update the record with the new information #delete and skip to the next file, we don't save excel files nor do we perform photo related code blob_info.delete() continue #otherwise we assume it is a photo (since it only allows jpg, png, gif, and csv) else: #check to see if a photo with that name already exists qry = File.query(File.file_name == blob_info.filename.upper()) existingPhoto = qry.get() if existingPhoto: #if a file with that name is already stored then replace the blob with the new uploaded file blobstore.delete(existingPhoto.blob) existingPhoto.blob = blob_info.key() existingPhoto.uploaded_by = users.get_current_user() existingPhoto.thumbnail = images.get_serving_url( blob_info.key(), size=200) existingPhoto.url = images.get_serving_url(blob_info.key()) existingPhoto.put() else: #add a new file entry if no file with that name already exists file = File(blob=blob_info.key(), file_name=blob_info.filename.upper(), uploaded_by=users.get_current_user(), url=images.get_serving_url(blob_info.key()), thumbnail=images.get_serving_url( blob_info.key(), size=200)) file.put() self.redirect("/admin/photos/%s/success" % blob_info.key())