def _get_client(): transport = Transport(timeout=5, operation_timeout=5) client = Client('https://www.zarinpal.com/pg/services/WebGate/wsdl', transport=transport) return client
def _setup_client(self, session, plugins): transport = Transport(session=session, timeout=10) client = Client( wsdl=os.getenv("WSDL_FILE"), transport=transport, plugins=plugins, ) return client
load_dotenv() # Load Environmental Variable wsdl = os.getenv("WSDL_FILE") username = os.getenv("UCM_USERNAME") password = os.getenv("UCM_PASSWORD") ucm_pub_url = f'https://{os.getenv("UCM_PUB_ADDRESS")}:8443/axl/' # Create Session, do not verify certificate, enable basic auth session = Session() session.verify = False session.auth = HTTPBasicAuth(username, password) # create transport with our session and 10 second timeout transport = Transport(session=session, timeout=10) history = HistoryPlugin() # create the Zeep client client = Client(wsdl, transport=transport, plugins=[history]) # create the service proxy pointing to our UCM service = client.create_service( binding_name="{http://www.cisco.com/AXLAPIService/}AXLAPIBinding", address=ucm_pub_url, ) # Create Line using factories axl_factory = client.type_factory("ns0") xsd_factory = client.type_factory("xsd")
def __init__(self, url, user, password): session = Session() session.auth = HTTPBasicAuth(user, password) wsdl_url = "{0}/?wsdl".format(url) self.client = Client(wsdl_url, transport=Transport(session=session, timeout=10))
import json import pdb from django.http import HttpResponse, HttpResponseBadRequest from django.http.response import HttpResponseServerError from django.shortcuts import get_object_or_404 from zeep import Client, Transport from zeep.cache import SqliteCache from django.conf import settings from zeep.helpers import serialize_object from dateutil import parser from .util import parsearFecha, MyEncoder from .decoradores import access_token_requerido from .models import Reserva, Cliente, Vendedor transport = Transport(cache=SqliteCache()) soap = Client(settings.URL_WSDL, transport=transport) client = Client( "http://romeroruben-001-site1.itempurl.com/WCFReservaVehiculos.svc?singlewsdl" ) @access_token_requerido def getClientes(request): clientes = Cliente.objects.all() results = [c.dic() for c in clientes] return HttpResponse(json.dumps(results), content_type='application/json') @access_token_requerido def getVendedores(request):
def initialize(config_file_path, client_tpid): try: stream = open(config_file_path, 'r') config_data = json.load(stream) except Exception: raise Exception('Unable to process specified config file: {0}.'.format( config_file_path)) # Initialize Log Service log_configuration = config_data[ 'log_configuration'] if 'log_configuration' in config_data else None if log_configuration and all(k in log_configuration for k in ('log_path', 'log_level')): global log_service log_service = LogService(log_path=log_configuration['log_path'], log_level=log_configuration['log_level'], client_tpid=client_tpid).logger else: raise Exception('Log configuration is missing or incomplete.') # Retrieve Tessco Credentials global service_provider_configuration service_provider_configuration = config_data[ 'service_provider_configuration'] if 'service_provider_configuration' in config_data else None if not service_provider_configuration or not all( k in service_provider_configuration for k in ('wsdl_path', 'vendor_id', 'user_name', 'password', 'timeout', 'slack_webhook')): raise Exception( 'Service Provider configuration is missing or incomplete') global vendor_id vendor_id = service_provider_configuration['vendor_id'] global user_name user_name = str(service_provider_configuration['user_name']) global password password = str(service_provider_configuration['password']) if not vendor_id or not user_name or not password: raise Exception('IQMetrix credentials is missing or incomplete.') # Retrieve webhook from config. global slack slack = slack_utility.SlackUtility( log_service, str(service_provider_configuration['slack_webhook'])) # Initialize Database Service database_configuration = config_data[ 'database_configuration'] if 'database_configuration' in config_data else None if not database_configuration or not all( k in database_configuration for k in ('host', 'username', 'password', 'db_name', 'schema', 'port')): raise Exception('Database configuration is missing or incomplete') global database_service database_service = DatabaseService(database_configuration['host'], database_configuration['username'], database_configuration['password'], database_configuration['db_name'], database_configuration['schema'], database_configuration['port']) # Initialize iQmetrix client - pull in WSDL file global iqmetrix_client iqmetrix_client = Client( service_provider_configuration['wsdl_path'], transport=Transport(timeout=service_provider_configuration['timeout']))
def get_client(wsdl: AnyHttpUrl) -> Client: session = Session() session.verify = False transport = Transport(session=session) client = zeep.Client(wsdl=wsdl, transport=transport) return client
index_movie = None index_screening = None indexs_seats = [] index_reservation = None pom_index = None user = None url = 'http://localhost:8080' session = Session() session.proxies = { "http": 'http://localhost:4040', "https": 'https://localhost:4040' } movie_Service = Client(url + '/movie/service?wsdl', transport=Transport(session=session)).service image_Service = Client(url + '/image/service?wsdl', transport=Transport(session=session)).service screening_Service = Client(url + '/screening/service?wsdl', transport=Transport(session=session)).service reservation_Service = Client(url + '/reservation/service?wsdl', transport=Transport(session=session)).service user_Service = Client(url + '/user/service?wsdl', transport=Transport(session=session)).service loop = asyncio.get_event_loop() @eel.expose def create_view_list_movies(): movies = movie_Service.getAllMovie()
# initialize Blue Prism Parameters wsdl = appConfig.BLUE_PRISM_WSDL queueName = appConfig.BLUE_PRISM_QUEUE # create an authorized reddit instance reddit = praw.Reddit(client_id = appConfig.REDDIT_CLIENT_ID, client_secret = appConfig.REDDIT_CLIENT_SECRET, username = appConfig.REDDIT_USERNAME, password = appConfig.REDDIT_PW, user_agent = appConfig.REDDIT_USER_AGENT) # to find the top most submission in the subreddit subreddit = reddit.subreddit("wallstreetbets") session = Session() session.auth = HTTPBasicAuth(appConfig.BLUE_PRISM_USERNAME, appConfig.BLUE_PRISM_PW) client = Client(wsdl=wsdl, transport=Transport(session=session)) for submission in subreddit.hot(limit = 10): # displays the submission title print(submission.title) # displays the net upvotes of the submission print(submission.score) # displays the submission's ID print(submission.id) # displays the url of the submission print(submission.url) client.service.InsertintoQueue(queueName, submission.id, submission.title, submission.score, submission.url, '')
def init_zeep_lib(self, url): from zeep import Client, Transport transport_with_basic_auth = Transport(http_auth=(tambov_api_login, tambov_api_password)) self.client = Client(url, transport=transport_with_basic_auth)
def process_units(queue: multiprocessing.Queue, process_no, api_key, mastr_number, output): logging.getLogger('zeep').setLevel(logging.CRITICAL) logging.basicConfig(stream=sys.stderr, level=logging.INFO) output_exists = output.exists() force_termination = False def terminate(*args): nonlocal force_termination if force_termination: logger.warning(f'Process {process_no}: Force termination') raise KeyboardInterrupt('Force exit') else: logger.info(f'Process {process_no}: Set to gracefully terminate') force_termination = True signal.signal(signal.SIGINT, terminate) wsdl = 'https://www.marktstammdatenregister.de/MaStRAPI/wsdl/mastr.wsdl' transport = Transport(cache=InMemoryCache(), operation_timeout=60) settings = Settings(strict=False, xml_huge_tree=True) client = Client(wsdl=wsdl, transport=transport, settings=settings) client_bind = client.bind('Marktstammdatenregister', 'Anlage') with output.open('a') as f: writer = csv.DictWriter(f, field_names) if not output_exists: writer.writeheader() while True: unit_mastr_numbers = queue.get(block=True) logger.info(f'Process {process_no}: Processing next batch') if unit_mastr_numbers is None: logger.info( f'Process {process_no}: Received termination sentinel -> no more data to process.' ) return errors_count = 0 for unit_number in unit_mastr_numbers: if force_termination: logger.info( f'Process {process_no}: Gracefully terminating') return if errors_count > ERRORS_LIMIT: logger.warning( f'Process {process_no}: Reached errors limit, discarding this batch' ) break try: c = fetch_unit(client_bind, api_key, mastr_number, unit_number) respond = serialize_object(c) writer.writerow({ k: (v.get('Wert', '<<unknown structure>>') if hasattr( v, 'get') else v) for k, v in respond.items() }) # We got successful reply, the previous errors might not be related ==> continue errors_count = 0 except Fault as e: logger.warning( f'Process {process_no}: Got error, but continuing: {e.message}' ) errors_count += 1
def dl_session(session_id, directory="Webcasts/", prefix=""): """ Downloads the given webcast to the selected directory """ delivery_info = json_api("/Panopto/Pages/Viewer/DeliveryInfo.aspx", { "deliveryId": session_id, "responseType": "json" }, True, "data") if not delivery_info or "Delivery" not in delivery_info: # Unable to retrieve session info (insufficient permissions) print("[Warning] Could not retrieve info for webcast ID: {}".format( session_id)) return None session_name = delivery_info["Delivery"]["SessionName"] print("{}Downloading webcast: {}".format(prefix, session_name)) # Create template filename temp_fp = TEMP_DIR + clean(session_name) + "_{}.mp4" output_fp = directory + clean(session_name) + ".{}" # If only the mp4 podcast is available, download it if delivery_info["Delivery"]["IsPurgedEncode"]: print(" -> Downloading video podcast...", end="\r") sm = Client( "{}/Panopto/PublicAPI/4.6/SessionManagement.svc?singleWsdl".format( PANOPTO_BASE), transport=Transport(session=s)) sess_info = sm.service.GetSessionsById(sessionIds=session_id) embed_stream = sess_info[0]['IosVideoUrl'] dl_stream(embed_stream, output_fp.format("mp4")) print(" -> Video podcast downloaded! ") # Otherwise, download all the available streams and splice them together else: streams = delivery_info["Delivery"]["Streams"] # Split the streams into three categories - audio, video and screen recordings av_streams = list( filter(lambda x: x["Tag"] == "AUDIO" or x["Tag"] == "DV", streams)) screen_streams = list( filter(lambda x: x["Tag"] == "SCREEN" or x["Tag"] == "OBJECT", streams)) # Extract Powerpoint slides for webcasts that are PPT slides + audio recording ppt_slides = list( filter(lambda x: x["EventTargetType"] == "PowerPoint", delivery_info["Delivery"]["Timestamps"])) # Handle some potential edge cases and exit this function without downloading if they occur # I don't think that there can be >1 audio or video stream, but just flag it out anyways if len(av_streams) > 1: print("[Error] Found more than 1 audio or video stream") return None # 0 streams - what the hell is going on here? if len(streams) == 0: print("[Error] No streams found") return None # Streams with unidentified tags - needs further testing if len(streams) - len(av_streams) - len(screen_streams) != 0: print("[Error] Unidentified streams") return None # Create temp directory to do our work in if not os.path.exists(TEMP_DIR): os.makedirs(TEMP_DIR) # Keep track of the streams we've downloaded # Stored as a list of {STREAM_TYPE, FILEPATH} dicts downloaded_streams = [] # SCREEN/OBJECT streams: Download all and splice them into a single file if len(screen_streams) > 0: # 1. Download all video files to TEMP_DIR and record the segments segments = [] for idx, screen in enumerate(screen_streams): print(" -> Downloading screen recording {} of {}...".format( idx + 1, len(screen_streams)), end="\r") screen_fp = "video-{}.mp4".format(idx) dl_stream(screen["StreamUrl"], TEMP_DIR + screen_fp) for segment in screen["RelativeSegments"]: segment["File"] = screen_fp segment["StreamDuration"] = screen["RelativeEnd"] - screen[ "RelativeStart"] segments.append(segment) # 2. Process segements for idx, segment in enumerate(segments): if idx == len(segments) - 1: next_start = delivery_info["Delivery"]["Duration"] else: next_start = segments[idx + 1]["RelativeStart"] # If there is a gap between the end of this segment and the start of the next (or end of the video), attempt to supplement with additional video from the source # If there is insufficient video, supplement with as much as possible if round(segment["RelativeStart"] + (segment["End"] - segment["Start"]) - next_start) < 0: segment["End"] = min( segment["StreamDuration"], segment["Start"] + next_start - segment["RelativeStart"]) # If this causes the end of one segment to be equal to the start of the next, combine them to avoid unnecessary splicing if idx < len(segments) - 1 and segment["End"] == segments[ idx + 1]["Start"] and segment["File"] == segments[idx + 1]["File"]: segment["End"] = segments[idx + 1]["End"] segments.pop(idx + 1) # 3. Create concat demuxer file black_count = 0 total_time = 0 black_fp = TEMP_DIR + "black-{}.mp4" demux_fp = TEMP_DIR + "screen.txt" with open(demux_fp, "a") as demux: for segment in segments: if round(segment["RelativeStart"] - total_time, 3) > 0: # If there is a gap between the total running time and the start of the next segment, create a black screen to fill the difference create_black_screen( TEMP_DIR + segment["File"], segment["RelativeStart"] - total_time, black_fp.format(black_count)) demux.write("file black-{}.mp4\n".format(black_count)) total_time = segment["RelativeStart"] black_count += 1 # Add in details for the next file segment demux.write("file {}\n".format(segment["File"])) demux.write("inpoint {:.3f}\n".format(segment["Start"])) demux.write("outpoint {:.3f}\n".format(segment["End"])) total_time += segment["End"] - segment["Start"] # Create one last black screen, if necessary if round(delivery_info["Delivery"]["Duration"] - total_time, 3) > 0: create_black_screen( TEMP_DIR + segment["File"], delivery_info["Delivery"]["Duration"] - total_time, black_fp.format(black_count)) demux.write("file black-{}.mp4\n".format(black_count)) downloaded_streams.append({"Type": "Screen", "Filepath": demux_fp}) print(format(" -> Screen recording(s) downloaded", TERM_WIDTH)) # PPT slides: Create video file and mux with audio if len(ppt_slides) > 0: demux_fp = TEMP_DIR + "slides.txt" with open(demux_fp, "a") as demux: for idx, slide in enumerate(ppt_slides): img_fp = TEMP_DIR + "slide-{}.jpg".format(idx) slide_fp = "slide-{}.mp4".format(idx) print(" -> Downloading slide {} of {}...".format( idx + 1, len(ppt_slides)), end="\r") # Download slide and write it to an image file img = s.post( PANOPTO_BASE + "/Panopto/Pages/Viewer/Image.aspx", { "id": slide["ObjectIdentifier"], "number": slide["ObjectSequenceNumber"] }) if img.headers["Content-Type"] == "image/jpeg": with open(img_fp, "wb") as img_file: img_file.write(img.content) else: print("[Error] Unknown filetype for slide #{}: {}". format(slide["ObjectSequenceNumber"], img.headers["Content-Type"])) exit() # Set start and end times start = 0 if idx == 0 else round(slide["Time"], 3) end = round(delivery_info["Delivery"]["Duration"], 3) if idx == len(ppt_slides) - 1 else round( ppt_slides[idx + 1]["Time"], 3) # Convert slide image to video create_slide_video(img_fp, end - start, TEMP_DIR + slide_fp) # Add details to the concat demuxer with open(TEMP_DIR + "concat.txt", "a") as concat: concat.write("file {}\n".format(slide_fp)) downloaded_streams.append({"Type": "Slides", "Filepath": demux_fp}) print(format(" -> Powerpoint slide(s) downloaded!", TERM_WIDTH)) # AUDIO or DV streams for av in av_streams: stream_type = "video" if av["Tag"] == "DV" else av["Tag"].lower() print(" -> Downloading {} stream...".format(stream_type), end="\r") av_fp = temp_fp.format(stream_type) dl_stream(av["StreamUrl"], av_fp) downloaded_streams.append({ "Type": stream_type.capitalize(), "Filepath": av_fp }) print(" -> {} stream downloaded! ".format( stream_type.capitalize())) stream_types = [stream["Type"] for stream in downloaded_streams] if "Screen" in stream_types and "Video" in stream_types: combine_streams(downloaded_streams, output_fp.format("mkv")) elif "Screen" in stream_types or "Slides" in stream_types: combine_streams(downloaded_streams, output_fp.format("mp4")) else: for stream in downloaded_streams: shutil.copyfile(stream["Filepath"], output_fp.format("mp4")) # Cleanup all temporary files if os.path.exists(TEMP_DIR): shutil.rmtree(TEMP_DIR)
from pdfminer.converter import TextConverter from pdfminer.layout import LAParams from pdfminer.pdfdocument import PDFDocument from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager from pdfminer.pdfpage import PDFPage from pdfminer.pdfparser import PDFParser from tqdm import tqdm from zeep import Client, Transport from filtro.models import TipoMovimento logger = logging.getLogger(__name__) s = requests.Session() s.verify = False transport = Transport(session=s) client = Client( "https://webserverseguro.tjrj.jus.br/MNI/Servico.svc?singleWsdl", transport=transport, ) def obter_documentos(processo): conteudo = client.service.consultarProcesso( idConsultante=settings.ID_MNI, senhaConsultante=settings.SENHA_MNI, numeroProcesso=processo, movimentos=0, _value_1={"incluirDocumentos": 1}, )
def get_SOAP_client(url): transport = Transport(cache=InMemoryCache()) return Client(url, transport=transport)