def test(self, counts, memorys, shared, task_info, replicas=1): test_gpu_name = shared_gpu_name if shared else exclusive_gpu_name for i in range(len(counts)): resource = { test_gpu_name: str(counts[i]), gpu_memory_name: '{}Mi'.format(memorys[i] * 1024) } name = "{mode}-c-{count}-m-{memory}".format( mode="shared" if shared else "exclusive", count=counts[i], memory=memorys[i]) namespace = task_info.get("namespace") if task_info.get( "namespace") != None else "default" utils.set_name(task_info["data"], name) utils.set_resources(task_info["data"], resource) print("pod " + name) print(resource) _, result = self.submit_pod(task_info, blocking=True) if result == False: # Failed to start the deployment, delete the deployment self.delete(name, namespace, blocking=True) continue pod_status = get_pod_info(name, namespace).status phase = pod_status.phase container_id = pod_status.container_statuses[0].container_id.split( "//")[1] r = utils.get_container_GPU(container_id, name) print("Pod phase: " + phase) print("Pod GPU: " + r) self.list_node_allocatable_resources() self.list_node_allocated_resources() self.delete(name, namespace, blocking=True)
def imagevenue(url, name, dest, delim, digits, number): print "Downloading images from [imagevenue]...\n" links = get_page_links(url, lambda x: "imagevenue.com" in x) regex_base_url = re.compile(r'.*imagevenue.com', re.IGNORECASE) regex_ext = re.compile(r'\.[a-zA-Z]*$', re.IGNORECASE) for link in links: try: # source image (i.e. "Open image in a new tab") img = get_elements(link, "img#thepic") base_url_match = regex_base_url.search(link) if base_url_match and img is not []: # image name and filetype img_url = img[0]['src'] ext = regex_ext.search(img_url).group(0) # image URL and output filename new_name = set_name(name, ext, delim, number, digits) image_url = "{0}/{1}".format(base_url_match.group(0), img_url) # download download_file(image_url, new_name, dest, number) number += 1 except: pass
def imgur(url, name, dest, delim, digits, number): print "Downloading images from [imgur]...\n" if not str.endswith(url, "/layout/blog"): url += "/layout/blog" links = [div.a.get('href') for div in get_elements( url, "div.item.view.album-view-image-link")] regex = re.compile(r'\.com/\w*(\.[a-zA-Z]*)$', re.IGNORECASE) for link in links: try: # image URL and filetype image_url = "http://" + link[2:] ext = regex.search(image_url).group(1) # output filename new_name = set_name(name, ext, delim, number, digits) # download download_file(image_url, new_name, dest, number) number += 1 except: pass
def visit_ClassDef(self, node): # print '{0}: ClassDef Name: {1}'.format(node.lineno, node.name) utils.set_type(self.data, node.lineno, 'class') utils.set_name(self.data, node.lineno, node.name) utils.add_class(self.data, node.name) # Add class functions # Add class variables (only self.* variables) # self.generic_visit(node) for stmts in [node.bases, node.body, node.decorator_list]: for stmt in stmts: walker = walk_ast( stmt, self.current_scope) # possibly switch scope to Class utils.add_string_to_data(stmt.lineno, walker.data, walker.line) # utils.add_function_line(self.data, node.name, stmt.lineno) utils.combine_all_data(self.data, walker.data) utils.combine_variable_scopes(self.variable_scope, walker.variable_scope) utils.add_function_to_class(self.data, walker.data, node.name) utils.add_variables_to_class(self.data, walker.variable_scope, node.name)
def visit_FunctionDef(self, node): # print '{0}: FunctionDef - def {1}():'.format(node.lineno, node.name) utils.set_type(self.data, node.lineno, 'func') utils.set_name(self.data, node.lineno, node.name) utils.add_function_def(self.data, node.name, node.lineno) self.variable_scope[node.name] = [] arg_walker = WalkAST(node.name) arg_walker.lineno = node.lineno arg_walker.visit(node.args) utils.combine_data(node.lineno, self.data, arg_walker.data) utils.combine_variable_scopes(self.variable_scope, arg_walker.variable_scope) for stmts in [node.body, node.decorator_list]: for stmt in stmts: walker = walk_ast(stmt, node.name) utils.add_string_to_data(stmt.lineno, walker.data, walker.line) utils.add_function_line(self.data, node.name, stmt.lineno) # utils.combine_data(stmt.lineno, self.data, walker.data) utils.combine_all_data(self.data, walker.data) utils.combine_variable_scopes(self.variable_scope, walker.variable_scope)
def mangastream(url, name, dest, delim, digits, number): print "Downloading images from [mangastream]...\n" links = [tag.get('href') for tag in get_html(url).findAll( "ul", {"class": "dropdown-menu"})[-1].select('li > a')] match = re.search(r"(.*\/)(\d*)$", links[-1]) base_url, num_pages = match.group(1), int(match.group(2)) for i in range(1, num_pages + 1): try: image_url = get_html( base_url + str(i)).select("#manga-page")[0].get("src") new_name = set_name("", ".jpg", "", i, digits) download_file(image_url, new_name, dest, i) except: print "exception" pass
def imagebam(url, name, dest, delim, digits, number): print "Downloading images from [imagebam]...\n" # gallery page numbers (ascending) page_count = [int(el.contents[0]) for el in get_elements(url, "a.pagination_link")] if page_count: # multi-page gallery links = get_imagebam_htmlcode_links(url, page_count[-1]) else: # single-page gallery links = get_page_links(url, lambda x: "imagebam.com" in x) # remove any duplicate links links = list(unique_everseen(links)) regex = re.compile(r'\.[a-zA-Z]*$', re.IGNORECASE) for link in links: try: # source image (i.e. "Open image in a new tab") src = [el['src'] for el in get_elements(link, 'img') if 'id' in el.attrs] if len(src) > 0: # image URL image_url = src[0] # filetype ext = regex.search(image_url) if ext is None: ext = ".jpg" else: ext = ext.group(0) # output filename new_name = set_name(name, ext, delim, number, digits) # download download_file(image_url, new_name, dest, number) number += 1 except: pass
def imgbox(url, name, dest, delim, digits, number): print "Downloading images from [imgbox]...\n" links = ['https://imgbox.com/' + el['href'] for el in get_elements(url, '#gallery-view-content a')] regex = re.compile(r'(\.[a-zA-Z]*)$', re.IGNORECASE) for link in links: try: image_url = [el['src'] for el in get_elements(link, '#img')][0] ext = regex.search(image_url).group(1) new_name = set_name(name, ext, delim, number, digits) download_file(image_url, new_name, dest, number) number += 1 except: pass
def imgur(url, name, dest, delim, digits, number): print "Downloading images from [imgur]...\n" links = ['https:' + el['src'] for el in get_elements(url, '.post-image-placeholder, .post-image img')] regex = re.compile(r'\.com/\w*(\.[a-zA-Z]*)$', re.IGNORECASE) for image_url in links: try: # filetype ext = regex.search(image_url).group(1) # output filename new_name = set_name(name, ext, delim, number, digits) # download download_file(image_url, new_name, dest, number) number += 1 except: pass
def imgbox(url, name, dest, delim, digits, number): print "Downloading images from [imgbox]...\n" links = [el['src'] for el in get_elements(url, '#gallery-view-content img')] regex = re.compile(r'\.com/(\w*)(\.[a-zA-Z]*)$', re.IGNORECASE) for link in links: try: # image name and filetype match = regex.search(link) image, ext = match.group(1), match.group(2) # image URL and output filename image_url = "http://i.imgbox.com/" + image new_name = set_name(name, ext, delim, number, digits) # download download_file(image_url, new_name, dest, number) number += 1 except: pass
def someimage(url, name, dest, delim, digits, number): print "Downloading images from [someimage]...\n" links = get_image_links(url, lambda x: "t1.someimage.com" in x) regex = re.compile(r'\.com/(\w*(\.[a-zA-Z]*))$', re.IGNORECASE) for link in links: try: # image name and filetype match = regex.search(link) image = match.group(1) ext = match.group(2) # image URL and output filename new_name = set_name(name, ext, delim, number, digits) image_url = "http://i1.someimage.com/" + image # download download_file(image_url, new_name, dest, number) number += 1 except: pass
def hotflick(url, name, dest, delim, digits, number): print "Downloading images from [hotflick]...\n" # get all page links if the gallery has more than one page div = get_html(url).find('div', {"class": "box-paging"}) gallery_page_links = [str(tag['href']) for tag in div.findAll('a', href=True)] # get image links if gallery_page_links != []: links = [] for page in gallery_page_links: links.extend([link for link in get_page_links( "http://hotflick.net/" + page) if "/v/?q=" in link]) else: links = [link for link in get_page_links(url) if "/v/?q=" in link] regex = re.compile(r'\.net/\w/v/\?q\=(\d+)\.(.*)(\.\w*)$', re.IGNORECASE) for link in links: try: # image name and filetype match = regex.search(link) ext = match.group(3) # image URL and output filename new_name = set_name(name, ext, delim, number, digits) image_url = "http://www.hotflick.net/u/n/{0}/{1}{2}".format( match.group(1), match.group(2), ext) # download download_file(image_url, new_name, dest, number) number += 1 except: print "exception" pass
def upix(url, name, dest, delim, digits, number): print "Downloading images from [upix]...\n" links = [str(tag['href']) for tag in get_html(url).findAll('a', {"class": "thumb"})] base_url = url if str.endswith(url, "/#none"): base_url = url[:-5] regex = re.compile(r'(\.[a-zA-Z]*)$', re.IGNORECASE) for link in links: try: # image URL and output filename image_url = base_url + link ext = regex.search(image_url).group(1) new_name = set_name(name, ext, delim, number, digits) # download download_file(image_url, new_name, dest, number) number += 1 except: pass
def imgur(url, name, dest, delim, digits, number): print "Downloading images from [imgur]...\n" if not str.endswith(url, "/layout/blog"): url += "/layout/blog" links = get_html(url).findAll('meta', {'property': 'og:image'}) links = [link['content'] for link in links[1:]] regex = re.compile(r'\.com/\w*(\.[a-zA-Z]*)$', re.IGNORECASE) for image_url in links: try: # filetype ext = regex.search(image_url).group(1) # output filename new_name = set_name(name, ext, delim, number, digits) # download download_file(image_url, new_name, dest, number) number += 1 except: pass
def test_set_name(self): self.assertEqual(set_name("abc", "", "", 1, 1), "abc1") self.assertEqual(set_name("abc", ".txt", "", 1, 1), "abc1.txt") self.assertEqual(set_name("abc", ".txt", "_", 1, 1), "abc_1.txt") self.assertEqual(set_name("abc", ".txt", "_", 38, 1), "abc_38.txt") self.assertEqual(set_name("abc", ".txt", "_", 38, 6), "abc_000038.txt")
# print(get_node_labels("tusimple")) # remove_node_label("tusimple", "foo") # print(get_node_labels("tusimple")) # remove_node_label("tusimple", "foo") # print(get_node_labels("tusimple")) # append_or_update_node_label("tusimple", "foo", "bar2") # tclient.submit_pod(utils.convert_tuyaco_dict_to_task_info(), True) # namespace = "default" # image = "tensorflow/tensorflow:latest-gpu" # replicas = 1 print("initializing test environment") shared_name = "shared-gpu" resource = {shared_gpu_name: '3'} utils.set_name(task_info["data"], shared_name) utils.set_resources(task_info["data"], resource) print(task_info) tclient.submit_pod(task_info, blocking=True) exclusive_name = "exclusive-gpu" resource = {exclusive_gpu_name: '3'} utils.set_name(task_info["data"], exclusive_name) utils.set_resources(task_info["data"], resource) tclient.submit_pod(task_info, blocking=True) try: print("*" * 50 + "shared count" + "*" * 50) print("*" * 100) counts = [5, 6] tclient.test(counts, [0] * len(counts), True, task_info) print("*" * 50 + "exclusive count" + "*" * 50)
def label_function(self, function_address): # Set repeatable function comment SetFunctionCmt(function_address, self.comment, 1) # Rename function self.count = utils.set_name(function_address, self.name, self.count)