def authenticate(self, environ): """ This function takes a WSGI environment and authenticates the request returning authenticated user or error. """ method = REQUEST_METHOD(environ) fullpath = url_quote(SCRIPT_NAME(environ)) + url_quote( PATH_INFO(environ)) authorization = AUTHORIZATION(environ) if not authorization: return self.build_authentication() (authmeth, auth) = authorization.split(" ", 1) if 'digest' != authmeth.lower(): return self.build_authentication() amap = dict(_auth_to_kv_pairs(auth)) try: username = amap['username'] authpath = amap['uri'] nonce = amap['nonce'] realm = amap['realm'] response = amap['response'] assert authpath.split("?", 1)[0] in fullpath assert realm == self.realm qop = amap.get('qop', '') cnonce = amap.get('cnonce', '') nc = amap.get('nc', '00000000') if qop: assert 'auth' == qop assert nonce and nc except: return self.build_authentication() ha1 = self.authfunc(environ, realm, username) return self.compute(ha1, username, response, method, authpath, nonce, nc, cnonce, qop)
def authenticate(self, environ): """ This function takes a WSGI environment and authenticates the request returning authenticated user or error. """ method = REQUEST_METHOD(environ) fullpath = url_quote(SCRIPT_NAME(environ)) + url_quote(PATH_INFO(environ)) authorization = AUTHORIZATION(environ) if not authorization: return self.build_authentication() (authmeth, auth) = authorization.split(" ", 1) if "digest" != authmeth.lower(): return self.build_authentication() amap = dict(_auth_to_kv_pairs(auth)) try: username = amap["username"] authpath = amap["uri"] nonce = amap["nonce"] realm = amap["realm"] response = amap["response"] assert authpath.split("?", 1)[0] in fullpath assert realm == self.realm qop = amap.get("qop", "") cnonce = amap.get("cnonce", "") nc = amap.get("nc", "00000000") if qop: assert "auth" == qop assert nonce and nc except: return self.build_authentication() ha1 = self.authfunc(environ, realm, username) return self.compute(ha1, username, response, method, authpath, nonce, nc, cnonce, qop)
def _get_project_url(self): s = self._settings r = self._run_obj app_url = s.base_url.replace("//api.", "//app.") url = "{}/{}/{}".format(app_url, url_quote(r.entity), url_quote(r.project)) return url
def generate_index(self): display_name = escape_html(self.dir_name if self.dir_name else "/") html = StringIO() html.write(self.header % locals()) # If this is a subdirectory, create a link back to the parent. if self.dir_name: parent_dirname = ("/" + self.dir_name).rsplit("/", 1)[0] html.write(self.parent_backlink % locals()) for subdir_name, subdir in sorted(iteritems(self.subdirs)): subdir_link = escape_html(url_quote( self.dir_name + "/" + subdir_name if self.dir_name else subdir_name)) subdir_name = escape_html(subdir_name) html.write(self.subdir_link % locals()) for filename, key in sorted(iteritems(self.contents)): ext = splitext(filename)[-1] icon_name = self.icons.get(ext, "binary.png") suffix_type = self.suffix_types.get(ext, " ") file_link = escape_html(url_quote(key.name)) filename = escape_html(filename) last_modified = escape_html(key.last_modified) size = str(key.size) description = "" html.write(self.file_link % locals()) html.write(self.footer % locals()) return html.getvalue()
def _send_update(self, url, attribute_map, attribute_calculations, record): d = record.date.replace(tzinfo=self.station_time_zone).astimezone( pytz.UTC).replace(tzinfo=None) url = '%s&ID=%s&PASSWORD=%s&dateutc=%s' % ( url, self.station_id, url_quote(self.password, safe=''), url_quote(d.strftime('%Y-%m-%d %H:%M:%S'), safe=''), ) for parameter, field in attribute_map or []: if field and record[field] is not None: url += '&%s=%s' % ( parameter, url_quote(record[field], safe=''), ) for parameter, function in attribute_calculations or []: if function: value = function(self, record) if value is not None: url += '&%s=%s' % ( parameter, url_quote(value, safe=''), ) response = self._session.get(url) assert 200 <= response.status_code < 300, 'Status code %s unexpected' % response.status_code assert response.text == 'success', 'Response "%s" unexpected' % response.text
def _send_update(self, url, attribute_map, attribute_calculations, record): d = record.date.replace(tzinfo=self.station_time_zone).astimezone(pytz.UTC).replace(tzinfo=None) url = '%s&ID=%s&PASSWORD=%s&dateutc=%s' % ( url, self.station_id, url_quote(self.password, safe=''), url_quote(d.strftime('%Y-%m-%d %H:%M:%S'), safe=''), ) for parameter, field in attribute_map or []: if field and record[field] is not None: url += '&%s=%s' % (parameter, url_quote(record[field], safe=''), ) for parameter, function in attribute_calculations or []: if function: value = function(self, record) if value is not None: url += '&%s=%s' % (parameter, url_quote(value, safe=''), ) response = self._session.get(url) if not (200 <= response.status_code < 300): raise AssertionError('Status code %s unexpected' % response.status_code) if response.text != 'success': raise AssertionError('Response "%s" unexpected' % response.text)
def test_recursive_put(self): @contextlib.contextmanager def tempdir(prefix='tmp'): tmpdir = tempfile.mkdtemp(prefix=prefix) try: yield tmpdir finally: shutil.rmtree(tmpdir) def create_file(_dir, name, content): path = os.path.join(_dir, name) actual_dir = os.path.dirname(path) if not os.path.exists(actual_dir): os.makedirs(actual_dir) with open(path, 'w') as f: f.write(content) _captures = [] def capture(request, context): _captures.append({ 'headers': getattr(request._request, 'headers'), 'body': request._request.body, }) context.status_code = 200 with tempdir() as _dir: base = (r'^http://pylxd.test/1.0/containers/' r'an-container/files\?path=') rules = [{ 'text': capture, 'method': 'POST', 'url': base + url_quote('target', safe='') + '$' }, { 'text': capture, 'method': 'POST', 'url': base + url_quote('target/dir', safe='') + '$' }, { 'text': capture, 'method': 'POST', 'url': base + url_quote('target/file1', safe='') + '$' }, { 'text': capture, 'method': 'POST', 'url': base + url_quote('target/dir/file2', safe='') + '$' }] self.add_rules(rules) create_file(_dir, 'file1', "This is file1") create_file(_dir, 'dir/file2', "This is file2") self.container.files.recursive_put(_dir, './target/') self.assertEqual(_captures[0]['headers']['X-LXD-type'], 'directory') self.assertEqual(_captures[1]['body'], b"This is file1") self.assertEqual(_captures[2]['headers']['X-LXD-type'], 'directory') self.assertEqual(_captures[3]['body'], b"This is file2")
def test_get_uri_for_bounty_submission(self): """ tests that the get_uri_for_bounty_submission method works as expected. """ self.client = BugcrowdClient('api-token') submission = get_example_submission() submission['bounty_code'] = '<bounty_code>' submission['reference_number'] = '<reference_number>' expected_uri = 'https://tracker.bugcrowd.com/%s/submissions/%s' % ( url_quote(submission['bounty_code']), url_quote(submission['reference_number'])) self.assertEqual(get_uri_for_bounty_submission(submission), expected_uri)
def get_result_url(self): reqs = {} # required reqs['access_key'] = url_quote(self.aws_access_key_id) reqs['secret_key'] = url_quote(self.aws_secret_access_key) reqs['bucket'] = self.bucket reqs['path'] = self.path # optional params = {} for name in ['format']: if hasattr(self, name): params[name] = getattr(self, name) reqs['params'] = urlencode([(key, params[key]) for key in params if params[key]]) return "s3://{access_key}:{secret_key}@/{bucket}/{path}?{params}".format(**reqs)
def device_qrcode(request, public_id): device = models.Device.get_by_id(public_id) if not util.has_device_perm(request, device): raise PermissionDenied("No permission for device") #device_class = devices.get_class(self.object.type).qr_data(device=device) url_base = "{0}://{1}".format(request.scheme, settings.POST_DOMAIN) data = [ ('post', url_base + reverse('post')), ('config', url_base + '/config'), ('device_id', device.device_id), ('public_id', device.public_id), ('secret_id', device.secret_id), ('device_type', device.type), ('selfsigned_post', 'https://' + settings.POST_DOMAIN_SS + reverse('post')), ('selfsigned_cert_der_sha256', settings.KOOTA_SSL_CERT_DER_SHA256), ('selfsigned_cert_pem_sha256', settings.KOOTA_SSL_CERT_PEM_SHA256), ] uri = 'koota:?' + '&'.join('%s=%s' % (k, url_quote(v)) for k, v in data) img = qrcode.make(uri, border=4, box_size=2, error_correction=qrcode.constants.ERROR_CORRECT_L) cimage = io.BytesIO() img.save(cimage) cimage.seek(0) return HttpResponse(cimage.getvalue(), content_type='image/png')
def __extract_metadata(self, doc, payload): filename = os.path.basename(doc.path) headers = { 'Accept': 'application/json', 'Content-Disposition': 'attachment; filename=%s' % url_quote(filename) } if doc.meta['Content-Type']: headers['Content-Type'] = doc.meta['Content-Type'] tika_url = self.config.get(helper.TIKA_META) connection = self.config[helper.INJECTOR].get_http_connection(tika_url) payload.seek(0) connection.request('PUT', '/meta', payload.read(), headers) payload.seek(0) response = connection.getresponse() try: if response.status >= 400: logging.error('tika error %d (%s): %s', response.status, response.reason, doc.path) return {} response_data = response.read() finally: response.close() try: result = json.loads(response_data.decode('utf-8')) except (ValueError, UnicodeDecodeError): logging.error('invalid response from tika for %s', doc.path) result = {} return result
def test_unicode(self): start = time.time() # Encoded utf8 query strings MUST be parsed correctly. # Here, the URL is encoded in utf8 and then %HEX # See https://docs.cherrypy.org/en/latest/_modules/cherrypy/test/test_encoding.html for more self.getPage(url_quote(u"/üŋïĉóđē".encode("utf-8"))) time.sleep( 0.01 ) # Without this here, span may not be ready for inspection, and timings can be incorrect. end = time.time() # ensure request worked self.assertStatus("200 OK") self.assertHeader("Content-Type", "text/html;charset=utf-8") self.assertBody( b"\xc3\xbc\xc5\x8b\xc3\xaf\xc4\x89\xc3\xb3\xc4\x91\xc4\x93") # ensure trace worked assert not self.tracer.current_span(), self.tracer.current_span( ).pprint() spans = self.tracer.writer.pop() assert len(spans) == 1 s = spans[0] assert s.service == "test.cherrypy.service" assert s.resource == u"GET /üŋïĉóđē" assert s.start >= start assert s.duration <= end - start assert s.error == 0 assert_span_http_status_code(s, 200) assert s.meta.get(http.METHOD) == "GET" assert s.meta.get(http.URL) == u"http://127.0.0.1:54583/üŋïĉóđē"
def consume(self, doc, payload): """ Upload document to Apache Tika and add result to document as text. :param doc: Document object. :param payload: File pointer beloning to document. :type doc: ``gransk.core.document.Document`` :type payload: ``file`` """ if not self._accept(doc): return filename = os.path.basename(doc.path) content_type = doc.meta['Content-Type'] payload.seek(0) data = payload.read() doc.set_size(payload.tell()) if content_type == 'application/pdf' and self.detect_scanned_pdf: tmp_path = os.path.join(self.tmp_root, '%s-%s.pdf' % (self.wid, doc.docid[0:8])) if not os.path.exists(self.tmp_root): os.makedirs(self.tmp_root) with open(tmp_path, 'wb') as fp: fp.write(data) if self._is_pdf_scanned(tmp_path): data_tiff = self._convert_pdf_to_tiff(tmp_path) if data_tiff: data = data_tiff filename = '%s.tiff' % filename content_type = 'image/tiff' os.remove(tmp_path) headers = { 'Content-Disposition': 'attachment; filename=%s' % url_quote(filename), 'Content-type': content_type, } if self.ocr_languages: headers['X-Tika-OCRLanguage'] = self.ocr_languages connection = self.config[helper.INJECTOR].get_http_connection() connection.request('PUT', '/tika', data, headers) response = connection.getresponse() try: if response.status >= 400: logging.error('tika error %d (%s): %s', response.status, response.reason, doc.path) else: doc.text = response.read().strip().decode('utf-8') finally: response.close()
def format_url_ids(cls, path, id, secondary_id): if id: assert path[-1] == '/', path utf8_id = text_type(id).encode('utf-8').replace( '/', '') # TODO: ensure slashes don't get %-decoded in requests path += url_quote(utf8_id, safe='') return path.format(secondary_id)
def getBaseUrl(self): '''Return a file: URL that probably points to the basedir. This is used as a halfway sane default when the base URL is not provided; not perfect, but should work in most cases.''' components = util.splitpath(os.path.abspath(self.basepath)) url = '/'.join([url_quote(component, '') for component in components]) return 'file:///' + url + '/'
def test_get_api_uri_for_bounty_submissions(self): """ tests that the get_api_uri_for_bounty_submissions method works as expected. """ expected_uri = self.client.base_uri + ('bounties/%s/submissions' % url_quote(self._bounty['uuid'])) uri = self.client.get_api_uri_for_bounty_submissions(self._bounty) self.assertEqual(uri, expected_uri)
def insert_quick_link(message, slack_base_url=None): # TODO: does the slack api document this format anywhere... if (message.kind == 'slack' and message.sub_kind == 'message' and slack_base_url and message.body.get("ts") and message.body.get('channel')): m_link = slack_base_url if not m_link.endswith("/"): m_link += "/" m_link += "archives/%s/" % url_quote(message.body['channel']) m_link += "p" + url_quote(message.body['ts'].replace(".", "")) m_thread_ts = message.body.get('thread_ts') if m_thread_ts: m_link += "?" m_link += url_encode({ 'thread_ts': m_thread_ts, }) message.body['quick_link'] = m_link
def test_get_api_uri_for_submission(self): """ tests that the get_api_uri_for_submission method works as expected. """ submission = get_example_submission() expected_uri = self.client.base_uri + ('submissions/%s' % url_quote(submission['uuid'])) self.assertEqual(self.client.get_api_uri_for_submission(submission), expected_uri)
def test_query_string_decoding(self): URI_TMPL = '/reqparams?q={q}' europoundUtf8_2_bytes = europoundUnicode.encode('utf-8') europoundUtf8_2nd_byte = europoundUtf8_2_bytes[1:2] # Encoded utf8 query strings MUST be parsed correctly. # Here, q is the POUND SIGN U+00A3 encoded in utf8 and then %HEX self.getPage(URI_TMPL.format(q=url_quote(europoundUtf8_2_bytes))) # The return value will be encoded as utf8. self.assertBody(ntob('q: ') + europoundUtf8_2_bytes) # Query strings that are incorrectly encoded MUST raise 404. # Here, q is the second byte of POUND SIGN U+A3 encoded in utf8 # and then %HEX # TODO: check whether this shouldn't raise 400 Bad Request instead self.getPage(URI_TMPL.format(q=url_quote(europoundUtf8_2nd_byte))) self.assertStatus(404) self.assertErrorPage( 404, 'The given query string could not be processed. Query ' "strings for this resource must be encoded with 'utf8'.")
def test_query_string_decoding(self): URI_TMPL = '/reqparams?q={q}' europoundUtf8_2_bytes = europoundUnicode.encode('utf-8') europoundUtf8_2nd_byte = europoundUtf8_2_bytes[1:2] # Encoded utf8 query strings MUST be parsed correctly. # Here, q is the POUND SIGN U+00A3 encoded in utf8 and then %HEX self.getPage(URI_TMPL.format(q=url_quote(europoundUtf8_2_bytes))) # The return value will be encoded as utf8. self.assertBody(b'q: ' + europoundUtf8_2_bytes) # Query strings that are incorrectly encoded MUST raise 404. # Here, q is the second byte of POUND SIGN U+A3 encoded in utf8 # and then %HEX # TODO: check whether this shouldn't raise 400 Bad Request instead self.getPage(URI_TMPL.format(q=url_quote(europoundUtf8_2nd_byte))) self.assertStatus(404) self.assertErrorPage( 404, 'The given query string could not be processed. Query ' "strings for this resource must be encoded with 'utf8'.")
def _send_tensorboard(self, tb_root, tb_logdirs, send_manager): if self._entity is None: viewer, server_info = send_manager._api.viewer_server_info() self._entity = viewer.get("entity") proto_run = wandb_internal_pb2.RunRecord() proto_run.run_id = self._run_id or wandb.util.generate_id() proto_run.project = self._project or wandb.util.auto_project_name(None) proto_run.entity = self._entity url = "{}/{}/{}/runs/{}".format( self._app_url, url_quote(proto_run.entity), url_quote(proto_run.project), url_quote(proto_run.run_id), ) print("Syncing: %s ..." % url) sys.stdout.flush() record = send_manager._interface._make_record(run=proto_run) send_manager.send(record) settings = wandb.Settings( root_dir=TMPDIR.name, run_id=proto_run.run_id, _start_datetime=datetime.datetime.now(), _start_time=time.time(), ) watcher = tb_watcher.TBWatcher( settings, proto_run, send_manager._interface, True ) for tb in tb_logdirs: watcher.add(tb, True, tb_root) sys.stdout.flush() watcher.finish() # send all of our records like a boss while not send_manager._interface.record_q.empty(): data = send_manager._interface.record_q.get(block=True) send_manager.send(data) sys.stdout.flush() send_manager.finish()
def get_result_url(self): reqs = {} for name in ['server', 'username', 'password', 'datasource']: if getattr(self, name) is None: raise TypeError('missing option "{0}" for {1}'.format(name, self)) reqs[name] = url_quote(getattr(self, name)) params = { 'ssl': self.ssl, 'ssl_verify': self.ssl_verify, 'server_version': self.server_version, 'site': self.site, 'project': self.project, 'mode': self.mode, } reqs['params'] = urlencode([(key, params[key]) for key in params if params[key] is not None]) return "tableau://{username}:{password}@{server}/{datasource}?{params}".format(**reqs)
def fixurl(url, unquote=None): old = url url = url_unquote(url) if unquote is None: unquote = url is old url = decode(url) try: url = smart_text(url, encoding='unicode-escape') except UnicodeDecodeError: pass url = html_unescape(url) url = re.sub(r'(?<!:)/{2,}', '/', url).strip().lstrip('.') if not unquote: url = url_quote(url) return url
def test_unicode(self): # Encoded utf8 query strings MUST be parsed correctly. # Here, the URL is encoded in utf8 and then %HEX # See https://docs.cherrypy.org/en/latest/_modules/cherrypy/test/test_encoding.html for more self.getPage(url_quote(u"/üŋïĉóđē".encode("utf-8"))) time.sleep(0.1) self.assertStatus("200 OK") self.assertHeader("Content-Type", "text/html;charset=utf-8") self.assertBody( b"\xc3\xbc\xc5\x8b\xc3\xaf\xc4\x89\xc3\xb3\xc4\x91\xc4\x93") # ensure trace worked assert not self.tracer.current_span() spans = self.pop_spans() assert len(spans) == 1 s = spans[0] assert s.service == "test.cherrypy.service" assert s.resource == u"GET /üŋïĉóđē" assert s.error == 0 assert_span_http_status_code(s, 200) assert s.get_tag(http.METHOD) == "GET" assert s.get_tag(http.URL) == u"http://127.0.0.1:54583/üŋïĉóđē"
def device_qrcode(request, public_id): device = models.Device.get_by_id(public_id) if not util.has_device_perm(request, device): raise PermissionDenied("No permission for device") #device_class = devices.get_class(self.object.type).qr_data(device=device) url_base = "{0}://{1}".format(request.scheme, settings.POST_DOMAIN) data = [('post', url_base+reverse('post')), ('config', url_base+'/config'), ('device_id', device.device_id), ('public_id', device.public_id), ('secret_id', device.secret_id), ('device_type', device.type), ('selfsigned_post', 'https://'+settings.POST_DOMAIN_SS+reverse('post')), ('selfsigned_cert_der_sha256', settings.KOOTA_SSL_CERT_DER_SHA256), ('selfsigned_cert_pem_sha256', settings.KOOTA_SSL_CERT_PEM_SHA256), ] uri = 'koota:?'+'&'.join('%s=%s'%(k, url_quote(v)) for k,v in data) img = qrcode.make(uri, border=4, box_size=2, error_correction=qrcode.constants.ERROR_CORRECT_L) cimage = io.BytesIO() img.save(cimage) cimage.seek(0) return HttpResponse(cimage.getvalue(), content_type='image/png')
def quote(text, safechars='/'): """Percent-encode given text.""" return url_quote(text, safechars)
def run(self): for sync_item in self._sync_list: if os.path.isdir(sync_item): files = os.listdir(sync_item) filtered_files = list( filter(lambda f: f.endswith(WANDB_SUFFIX), files)) if check_and_warn_old(files) or len(filtered_files) != 1: print("Skipping directory: {}".format(sync_item)) continue sync_item = os.path.join(sync_item, filtered_files[0]) dirname = os.path.dirname(sync_item) files_dir = os.path.join(dirname, "files") sd = dict( files_dir=files_dir, _start_time=0, git_remote=None, resume=None, program=None, ignore_globs=(), run_id=None, entity=None, project=None, run_group=None, job_type=None, run_tags=None, run_name=None, run_notes=None, save_code=None, ) settings = settings_static.SettingsStatic(sd) record_q = queue.Queue() result_q = queue.Queue() publish_interface = interface.BackendSender(record_q=record_q) sm = sender.SendManager( settings=settings, record_q=record_q, result_q=result_q, interface=publish_interface, ) ds = datastore.DataStore() ds.open_for_scan(sync_item) # save exit for final send exit_pb = None shown = False while True: data = ds.scan_data() if data is None: break pb = wandb_internal_pb2.Record() pb.ParseFromString(data) record_type = pb.WhichOneof("record_type") if self._view: if self._verbose: print("Record:", pb) else: print("Record:", record_type) continue if record_type == "run": if self._run_id: pb.run.run_id = self._run_id if self._project: pb.run.project = self._project if self._entity: pb.run.entity = self._entity pb.control.req_resp = True elif record_type == "exit": exit_pb = pb continue elif record_type == "final": assert exit_pb, "final seen without exit" pb = exit_pb exit_pb = None sm.send(pb) # send any records that were added in previous send while not record_q.empty(): data = record_q.get(block=True) sm.send(data) if pb.control.req_resp: result = result_q.get(block=True) result_type = result.WhichOneof("result_type") if not shown and result_type == "run_result": r = result.run_result.run # TODO(jhr): hardcode until we have settings in sync url = "{}/{}/{}/runs/{}".format( self._app_url, url_quote(r.entity), url_quote(r.project), url_quote(r.run_id), ) print("Syncing: %s ..." % url, end="") sys.stdout.flush() shown = True sm.finish() if self._mark_synced: synced_file = "{}{}".format(sync_item, SYNCED_SUFFIX) with open(synced_file, "w"): pass print("done.")
def run(self): for sync_item in self._sync_list: tb_event_files, tb_logdirs, tb_root = self._find_tfevent_files( sync_item) if os.path.isdir(sync_item): files = os.listdir(sync_item) filtered_files = list( filter(lambda f: f.endswith(WANDB_SUFFIX), files)) if tb_root is None and (check_and_warn_old(files) or len(filtered_files) != 1): print("Skipping directory: {}".format(sync_item)) continue if len(filtered_files) > 0: sync_item = os.path.join(sync_item, filtered_files[0]) sync_tb = self._setup_tensorboard(tb_root, tb_logdirs, tb_event_files, sync_item) # If we're syncing tensorboard, let's use a tmp dir for images etc. root_dir = TMPDIR.name if sync_tb else os.path.dirname(sync_item) sm = sender.SendManager.setup(root_dir) if sync_tb: self._send_tensorboard(tb_root, tb_logdirs, sm) continue ds = datastore.DataStore() try: ds.open_for_scan(sync_item) except AssertionError as e: print(".wandb file is empty ({}), skipping: {}".format( e, sync_item)) continue # save exit for final send exit_pb = None finished = False shown = False while True: data = self._robust_scan(ds) if data is None: break pb, exit_pb, cont = self._parse_pb(data, exit_pb) if exit_pb is not None: finished = True if cont: continue sm.send(pb) # send any records that were added in previous send while not sm._record_q.empty(): data = sm._record_q.get(block=True) sm.send(data) if pb.control.req_resp: result = sm._result_q.get(block=True) result_type = result.WhichOneof("result_type") if not shown and result_type == "run_result": r = result.run_result.run # TODO(jhr): hardcode until we have settings in sync url = "{}/{}/{}/runs/{}".format( self._app_url, url_quote(r.entity), url_quote(r.project), url_quote(r.run_id), ) print("Syncing: %s ..." % url, end="") sys.stdout.flush() shown = True sm.finish() # Only mark synced if the run actually finished if self._mark_synced and not self._view and finished: synced_file = "{}{}".format(sync_item, SYNCED_SUFFIX) with open(synced_file, "w"): pass print("done.")
def _send_tensorboard(self, tb_root, tb_logdirs, send_manager): if self._entity is None: viewer, server_info = send_manager._api.viewer_server_info() self._entity = viewer.get("entity") proto_run = wandb_internal_pb2.RunRecord() proto_run.run_id = self._run_id or wandb.util.generate_id() proto_run.project = self._project or wandb.util.auto_project_name(None) proto_run.entity = self._entity url = "{}/{}/{}/runs/{}".format( self._app_url, url_quote(proto_run.entity), url_quote(proto_run.project), url_quote(proto_run.run_id), ) print("Syncing: %s ..." % url) sys.stdout.flush() # using a handler here automatically handles the step # logic, adds summaries to the run, and handles different # file types (like images)... but we need to remake the send_manager record_q = queue.Queue() sender_record_q = queue.Queue() new_interface = interface.BackendSender(record_q) send_manager = sender.SendManager(send_manager._settings, sender_record_q, queue.Queue(), new_interface) record = send_manager._interface._make_record(run=proto_run) settings = wandb.Settings( root_dir=TMPDIR.name, run_id=proto_run.run_id, _start_datetime=datetime.datetime.now(), _start_time=time.time(), ) handle_manager = handler.HandleManager(settings, record_q, None, False, sender_record_q, None, new_interface) mkdir_exists_ok(settings.files_dir) send_manager.send_run(record, file_dir=settings.files_dir) watcher = tb_watcher.TBWatcher(settings, proto_run, new_interface, True) for tb in tb_logdirs: watcher.add(tb, True, tb_root) sys.stdout.flush() watcher.finish() # send all of our records like a boss progress_step = 0 spinner_states = ["-", "\\", "|", "/"] line = " Uploading data to wandb\r" while len(handle_manager) > 0: data = next(handle_manager) handle_manager.handle(data) while len(send_manager) > 0: data = next(send_manager) send_manager.send(data) print_line = spinner_states[progress_step % 4] + line wandb.termlog(print_line, newline=False, prefix=True) progress_step += 1 # finish sending any data while len(send_manager) > 0: data = next(send_manager) send_manager.send(data) sys.stdout.flush() handle_manager.finish() send_manager.finish()
def encode_for_url(url_component, safe=SAFE_CHARS): return url_quote(url_component, safe)
def test_recursive_put(self): @contextlib.contextmanager def tempdir(prefix='tmp'): tmpdir = tempfile.mkdtemp(prefix=prefix) try: yield tmpdir finally: shutil.rmtree(tmpdir) def create_file(_dir, name, content): path = os.path.join(_dir, name) actual_dir = os.path.dirname(path) if not os.path.exists(actual_dir): os.makedirs(actual_dir) with open(path, 'w') as f: f.write(content) _captures = [] def capture(request, context): _captures.append({ 'headers': getattr(request._request, 'headers'), 'body': request._request.body, }) context.status_code = 200 with tempdir() as _dir: base = (r'^http://pylxd.test/1.0/containers/' 'an-container/files\?path=') rules = [ { 'text': capture, 'method': 'POST', 'url': base + url_quote('target', safe='') + '$' }, { 'text': capture, 'method': 'POST', 'url': base + url_quote('target/dir', safe='') + '$' }, { 'text': capture, 'method': 'POST', 'url': base + url_quote('target/file1', safe='') + '$' }, { 'text': capture, 'method': 'POST', 'url': base + url_quote('target/dir/file2', safe='') + '$' } ] self.add_rules(rules) create_file(_dir, 'file1', "This is file1") create_file(_dir, 'dir/file2', "This is file2") self.container.files.recursive_put(_dir, './target/') self.assertEqual(_captures[0]['headers']['X-LXD-type'], 'directory') self.assertEqual(_captures[1]['body'], b"This is file1") self.assertEqual(_captures[2]['headers']['X-LXD-type'], 'directory') self.assertEqual(_captures[3]['body'], b"This is file2")
def url_fragment(self): """ The ``id`` of this node, suitable for use in a url fragment. """ return url_quote(self.id)
def main_bigg(args=None, urlopen=urlopen): """Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. """ parser = argparse.ArgumentParser( description='Import from BiGG database') parser.add_argument('--dest', metavar='path', default='.', help='Destination directory (default is ".")') parser.add_argument('--no-exchange', action='store_true', help=('Disable importing exchange reactions as' ' exchange compound file.')) parser.add_argument('--split-subsystem', action='store_true', help='Enable splitting reaction files by subsystem') parser.add_argument('--merge-compounds', action='store_true', help=('Merge identical compounds occuring in various' ' compartments.')) parser.add_argument('--force', action='store_true', help='Enable overwriting model files') parser.add_argument('id', help='BiGG model to import ("list" to see all)') args = parser.parse_args(args) # Set up logging for the command line interface if 'PSAMM_DEBUG' in os.environ: level = getattr(logging, os.environ['PSAMM_DEBUG'].upper(), None) if level is not None: logging.basicConfig(level=level) else: logging.basicConfig( level=logging.INFO, format='%(levelname)s: %(message)s') # Print list of available models if args.id == 'list': print('Available models:') f = urlopen('http://bigg.ucsd.edu/api/v2/models') doc = json.loads(f.read().decode('utf-8')) results = doc['results'] id_width = min(max(len(result['bigg_id']) for result in results), 16) for result in sorted(results, key=lambda x: x.get('organism')): print('{} {}'.format( result.get('bigg_id').ljust(id_width), result.get('organism'))) return 0 importer_entry = None try: importer_entry = next( pkg_resources.iter_entry_points('psamm.importer', 'JSON')) except StopIteration: logger.error('Failed to locate the COBRA JSON model importer!') sys.exit(-1) importer_class = importer_entry.load() importer = importer_class() try: f = urlopen( 'http://bigg.ucsd.edu/api/v2/models/{}/download'.format( url_quote(args.id))) model = importer.import_model(codecs.getreader('utf-8')(f)) except ModelLoadError as e: logger.error('Failed to load model!', exc_info=True) importer.help() parser.error(text_type(e)) except ParseError as e: logger.error('Failed to parse model!', exc_info=True) logger.error(text_type(e)) sys.exit(-1) if args.merge_compounds: compounds_before = len(model.compounds) sbml.merge_equivalent_compounds(model) if len(model.compounds) < compounds_before: logger.info( 'Merged {} compound entries into {} entries by' ' removing duplicates in various compartments'.format( compounds_before, len(model.compounds))) print('Model: {}'.format(model.name)) print('- Biomass reaction: {}'.format(model.biomass_reaction)) print('- Compartments: {}'.format(len(model.compartments))) print('- Compounds: {}'.format(len(model.compounds))) print('- Reactions: {}'.format(len(model.reactions))) print('- Genes: {}'.format(count_genes(model))) # Check if dest directory is empty. If we get an error assume that the # directory does not exist. dest_is_empty = False try: dest_is_empty = len(os.listdir(args.dest)) == 0 except OSError: dest_is_empty = True if not dest_is_empty: if not args.force: logger.error('Destination directory is not empty. Use --force' ' option to proceed anyway, overwriting any existing' ' files in {}'.format(args.dest)) return 1 else: logger.warning('Destination directory is not empty, overwriting' ' existing files in {}'.format(args.dest)) # Create destination directory if not exists dest = args.dest mkdir_p(dest) convert_exchange = not args.no_exchange write_yaml_model(model, dest, convert_exchange=convert_exchange, split_subsystem=args.split_subsystem)
def encode_for_url(url_component, safe=SAFE_CHARS): from six.moves.urllib.parse import quote as url_quote # pylint: disable=import-error return url_quote(url_component, safe)
def getUrlFromFilename(self, filename): """Construct URL from filename.""" components = util.splitpath( util.getRelativePath(self.basepath, filename)) url = '/'.join([url_quote(component, '') for component in components]) return self.baseurl + url
def getUrlFromFilename(self, filename): """Construct URL from filename.""" components = util.splitpath(util.getRelativePath(self.basepath, filename)) url = '/'.join([url_quote(component, '') for component in components]) return self.baseurl + url
def main_bigg(args=None, urlopen=urlopen): """Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. """ parser = argparse.ArgumentParser(description='Import from BiGG database') parser.add_argument('--dest', metavar='path', default='.', help='Destination directory (default is ".")') parser.add_argument('--no-exchange', action='store_true', help=('Disable importing exchange reactions as' ' exchange compound file.')) parser.add_argument('--split-subsystem', action='store_true', help='Enable splitting reaction files by subsystem') parser.add_argument('--merge-compounds', action='store_true', help=('Merge identical compounds occuring in various' ' compartments.')) parser.add_argument('--force', action='store_true', help='Enable overwriting model files') parser.add_argument('id', help='BiGG model to import ("list" to see all)') args = parser.parse_args(args) # Set up logging for the command line interface if 'PSAMM_DEBUG' in os.environ: level = getattr(logging, os.environ['PSAMM_DEBUG'].upper(), None) if level is not None: logging.basicConfig(level=level) else: logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') # Print list of available models if args.id == 'list': print('Available models:') f = urlopen('http://bigg.ucsd.edu/api/v2/models') doc = json.loads(f.read().decode('utf-8')) results = doc['results'] id_width = min(max(len(result['bigg_id']) for result in results), 16) for result in sorted(results, key=lambda x: x.get('organism')): print('{} {}'.format( result.get('bigg_id').ljust(id_width), result.get('organism'))) return 0 importer_entry = None try: importer_entry = next( pkg_resources.iter_entry_points('psamm.importer', 'JSON')) except StopIteration: logger.error('Failed to locate the COBRA JSON model importer!') sys.exit(-1) importer_class = importer_entry.load() importer = importer_class() try: f = urlopen('http://bigg.ucsd.edu/api/v2/models/{}/download'.format( url_quote(args.id))) model = importer.import_model(codecs.getreader('utf-8')(f)) except ModelLoadError as e: logger.error('Failed to load model!', exc_info=True) importer.help() parser.error(text_type(e)) except ParseError as e: logger.error('Failed to parse model!', exc_info=True) logger.error(text_type(e)) sys.exit(-1) if args.merge_compounds: compounds_before = len(model.compounds) sbml.merge_equivalent_compounds(model) if len(model.compounds) < compounds_before: logger.info('Merged {} compound entries into {} entries by' ' removing duplicates in various compartments'.format( compounds_before, len(model.compounds))) print('Model: {}'.format(model.name)) print('- Biomass reaction: {}'.format(model.biomass_reaction)) print('- Compartments: {}'.format(len(model.compartments))) print('- Compounds: {}'.format(len(model.compounds))) print('- Reactions: {}'.format(len(model.reactions))) print('- Genes: {}'.format(count_genes(model))) # Check if dest directory is empty. If we get an error assume that the # directory does not exist. dest_is_empty = False try: dest_is_empty = len(os.listdir(args.dest)) == 0 except OSError: dest_is_empty = True if not dest_is_empty: if not args.force: logger.error('Destination directory is not empty. Use --force' ' option to proceed anyway, overwriting any existing' ' files in {}'.format(args.dest)) return 1 else: logger.warning('Destination directory is not empty, overwriting' ' existing files in {}'.format(args.dest)) # Create destination directory if not exists dest = args.dest mkdir_p(dest) convert_exchange = not args.no_exchange write_yaml_model(model, dest, convert_exchange=convert_exchange, split_subsystem=args.split_subsystem)
def get_api_uri(self, path): """ Returns the full api uri for the given path. """ return self.base_uri + url_quote(path)
def get_uri_for_bounty_submission(submission): """ returns the uri for a given bounty submission. """ return 'https://tracker.bugcrowd.com/%s/submissions/%s' % ( url_quote(submission['bounty_code']), url_quote(submission['reference_number']) )