Пример #1
0
def openFBlinksNewTab(browser,linkList):
	main_window = browser.current_window_handle
	fr=open("/home/ubuntu/data/scripts/data.txt","a")
	# Open the link in a new tab by sending key strokes on the element
	count=1
	for first_link in linkList:
		try:
			
			print " "+str(count),
			# Use: Keys.CONTROL + Keys.SHIFT + Keys.RETURN to open tab on top of the stack 
			#first_link.send_keys(Keys.CONTROL + Keys.RETURN)
			#body = browser.find_element_by_tag_name("body")
			#body.send_keys(Keys.CONTROL + 't')
			# Switch tab to the new tab, which we will assume is the next one on the right
			#browser.find_element_by_tag_name('body').send_keys(Keys.CONTROL + Keys.TAB)

			# Put focus on current window which will, in fact, put focus on the current visible tab
			#browser.switch_to_window(main_window)
			count=count+1
			#print "Link"+str(count)+":"+str(first_link)
			#print count,
			browser.set_page_load_timeout(130)
			browser.get(first_link)
			# do whatever you have to do on this page, we will just got to sleep for now
			#time.sleep(10)
			#break
			# Close current tab
			time.sleep(5)

			browser.find_element_by_tag_name("body").send_keys(Keys.PAGE_DOWN);
			browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
			time.sleep(2)
			browser.find_element_by_tag_name("body").send_keys(Keys.PAGE_DOWN);
			browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
			time.sleep(2)
			browser.find_element_by_tag_name("body").send_keys(Keys.PAGE_DOWN);
			browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
			time.sleep(2)
			browser.find_element_by_tag_name("body").send_keys(Keys.PAGE_DOWN);
			browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
			time.sleep(2)
			browser.find_element_by_tag_name("body").send_keys(Keys.PAGE_DOWN);
			browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
			time.sleep(2)
			browser.find_element_by_tag_name("body").send_keys(Keys.PAGE_DOWN);
			#wd.save_screenshot(os.path.join("/home/ubuntu/data/scripts",(str(count)+".png")))
			#print "User scroll now"
			#time.sleep(160)
			
			results=browser.find_element_by_id("initial_browse_result")
			brief=results.find_element_by_css_selector('._4-u2._3csh._4-u8')
			briefHeading=brief.find_element_by_class_name("_32lj")
			briefD=brief.find_element_by_class_name("_32lk")
			

			heading=escapechars(briefHeading.text)
			Description=escapechars(briefD.text)
			
			
			posthash=str(heading)
			posthash=getmd5(posthash)
			time.sleep(5)
			#pdb.set_trace()
			#print "art1 data",
			articles=results.find_elements_by_css_selector("._4-u2._2avg._2_wj._4-u8")
			try:
				art1=articles[0].find_elements_by_class_name("_22p7")[0]
				articles=articles[0]
			except:
				art1=articles[1].find_elements_by_class_name("_22p7")[0]
				articles=articles[1]
			imgTag=art1.find_element_by_tag_name("img")
			imgSrc=imgTag.get_attribute("src")
			imgFile = urllib.URLopener()
			if "jpg" in imgSrc or "jpeg" in imgSrc:
				imageType=".jpg"
			else:
				imageType=".png"
			imgFile.retrieve(imgSrc, "1"+posthash+imageType)
			cropImage("1"+posthash+imageType)
			ftpUpl.uploadFileToFtp("1"+posthash+".png")
			art1=art1.find_element_by_css_selector(".mbs._6m6").find_element_by_tag_name("a")
			link1=art1.get_attribute("href")
			art1=escapechars(art1.text)
			#print "art2 data",
			art2=articles.find_elements_by_class_name("_22p7")[1]
			imgTag=art2.find_element_by_tag_name("img")
			imgSrc=imgTag.get_attribute("src")
			if "jpg" in imgSrc or "jpeg" in imgSrc:
				imageType=".jpg"
			else:
				imageType=".png"
			imgFile = urllib.URLopener()
			imgFile.retrieve(imgSrc, "2"+posthash+imageType)
			cropImage("2"+posthash+imageType)
			ftpUpl.uploadFileToFtp("2"+posthash+".png")
			art2=art2.find_element_by_css_selector(".mbs._6m6").find_element_by_tag_name("a")
			link2=art2.get_attribute("href")
			art2=escapechars(art2.text)
			l1=link1[30:]
			
			l1=urllib.unquote(l1).decode('utf8')
			l1=l1.split("&h=")[0]
			l2=link2[30:]
			l2=urllib.unquote(l2).decode('utf8')
			l2=l2.split("&h=")[0]			
		except Exception,e:
			#print '\033[1;31mGetting Stories:\033[1;m',
			f=open("Tracebacklogs.log","a")
			f.write(traceback.format_exc())
			f.close()
			stories=wd.find_elements_by_class_name("fbStoryAttachmentImage")
			story1=getstory(stories,0,posthash)
			story2=getstory(stories,1,posthash)
			if story2!=None and story1!=None:
				(art1,l1)=story1
				(art2,l2)=story2
				if l1.count("http")>1:
					l1=l1[30:]
					l1=l1.split("&h=")[0]
				l1=urllib.unquote(l1).decode('utf8')
				if l2.count("http")>1:
					l2=l2[30:]
					l2=l2.split("&h=")[0]
				l2=urllib.unquote(l2).decode('utf8')
			else:
				print '\033[1;31mUnable to get stories:\033[1;m',
				return
		
	
		lf=open("links.text","a")
		lf.write(first_link)
		lf.write("\n")
		lf.close()
		
		'''
		postSuccess=awsPost.main(getmd5(heading),heading.encode("utf-8"), Description.encode("utf-8"), art1.encode("utf-8"),l1.encode("utf-8"),art2.encode("utf-8"),l2.encode("utf-8"))			
		
		if postSuccess:
		'''
		
		postSuccess=rds.main(posthash,heading.encode("utf-8"), Description.encode("utf-8"), art1.encode("utf-8"),l1.encode("utf-8"),art2.encode("utf-8"),l2.encode("utf-8"))			
root_directory=root_directory.split('=',1)
root_directory=root_directory[1]

print url
print tag
print bug_start
print bug_end
print max_timeout_secs
print root_directory

directory=os.getcwd()+root_directory
#os.chdir(directory)

if not os.path.exists(directory):
    os.makedirs(directory)

bug_range=int(bug_end)-int(bug_start)

for i in range(bug_range+1):
    url_for_search=url+"/browse/"+str(tag)+"-"+str(bug_start)
    savefile= urllib.URLopener()
    save_directory=directory+"/"+str(bug_start)+".txt"
    savefile.retrieve(url_for_search, save_directory)
    print "Downloading bug # " +str(bug_start)
    random_timeout=random.randrange(1,int(max_timeout_secs))
    print "timeout=" +str(random_timeout)
    time.sleep(random_timeout)
    bug_start=int(bug_start)+1
    

Пример #3
0
    return images


def onehot_labels(labels):
    return np.eye(100)[labels]


def unpickle(file):
    import cPickle
    fo = open(file, 'rb')
    dict = cPickle.load(fo)
    fo.close()
    return dict


tt = urllib.URLopener()
tt.retrieve("http://kashaaf.com/cifar-100-python/train", "train")
tt.retrieve("http://kashaaf.com/cifar-100-python/test", "test")

X = get_proper_images(unpickle('train')['data'])
Y = onehot_labels(unpickle('train')['fine_labels'])
X_test = get_proper_images(unpickle('test')['data'])
Y_test = onehot_labels(unpickle('test')['fine_labels'])
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=15.)
network = input_data(shape=[None, 32, 32, 3],
                     data_preprocessing=img_prep,
Пример #4
0
def slave_download(target):
    if os.path.isfile(slave_jar):
        os.remove(slave_jar)

    loader = urllib.URLopener()
    loader.retrieve(os.environ['JENKINS_URL'] + '/jnlpJars/slave.jar', '/var/lib/jenkins/slave.jar')
Пример #5
0
def __unzip(source, target):
    with contextlib.closing(zipfile.ZipFile(source, "r")) as z:
        z.extractall(target)


# ==== Entry Point ====

# check for default destination path
if (_out_path_ == "/tmp") or (_out_path_ == "/tmp/"):
    # destination path set required
    xbmcgui.Dialog().ok(_addonname_, _lang_(30012))
    xbmcaddon.Addon().openSettings()
    sys.exit(0)

# downloader
dlfile = urllib.URLopener()

# show main dialog before start update process
dialog = xbmcgui.Dialog().yesno(_addonname_, _lang_(30001), "", _lang_(30002))

# check for no list update
if dialog:
    # update request

    # create progress bar
    pbar = xbmcgui.DialogProgress()
    pbar.create(_addonname_, _lang_(30008))
    pbar.update(10)

    try:
        # try to get config file
Пример #6
0
def uploadFileByUrl(url):
    if not url:
        return
    file = urllib.URLopener()
    file.retrieve(url, IMG_LOCATION)
Пример #7
0
import urllib
import os

l = ['Thumb', 'Index', 'Middle', 'Ring', 'Little']

count = 1
for i in range(16):
    for j in l:
        url = 'http://www.unilorin.edu.ng/step-b/biometrics/Female/Prints/' + 'LFinger/' + str(
            j) + '/f' + str(i) + '.bmp'
        image = urllib.URLopener()
        name = str(count) + '.bmp'
        count += 1
        image.retrieve(url, name)
    for k in l:
        url = 'http://www.unilorin.edu.ng/step-b/biometrics/Female/Prints/' + 'RFinger/' + str(
            j) + '/f' + str(i) + '.bmp'
        name = str(count) + '.bmp'
        count += 1
        image = urllib.URLopener()
        image.retrieve(url, name)
}
sd = OrderedDict([(map[k] if k in map else k, v) for k, v in sd.items()])
torch.save(sd, path.join("models", "vgg19-d01eb7cb.pth"))

# Download the VGG-16 model and fix the layer names
print("Downloading the VGG-16 model")
sd = load_url("https://web.eecs.umich.edu/~justincj/models/vgg16-00b39a1b.pth")
map = {
    'classifier.1.weight': u'classifier.0.weight',
    'classifier.1.bias': u'classifier.0.bias',
    'classifier.4.weight': u'classifier.3.weight',
    'classifier.4.bias': u'classifier.3.bias'
}
sd = OrderedDict([(map[k] if k in map else k, v) for k, v in sd.items()])
torch.save(sd, path.join("models", "vgg16-00b39a1b.pth"))

# Download the NIN model
print("Downloading the NIN model")
if version_info[0] < 3:
    import urllib
    urllib.URLopener().retrieve(
        "https://raw.githubusercontent.com/ProGamerGov/pytorch-nin/master/nin_imagenet.pth",
        path.join("models", "nin_imagenet.pth"))
else:
    import urllib.request
    urllib.request.urlretrieve(
        "https://raw.githubusercontent.com/ProGamerGov/pytorch-nin/master/nin_imagenet.pth",
        path.join("models", "nin_imagenet.pth"))

print("All models have been successfully downloaded")
Пример #9
0
def retrieve_database(database_url, filepath):
    # if jobtime:
    #     print print_msg['download_start']
    # t0 = time.time()
    database = urllib.URLopener()
    database.retrieve(database_url, filepath)
Пример #10
0
class MonitorSpider(scrapy.Spider):
    csvPath = 'https://s3.amazonaws.com/shichaoji/diaryID.csv'

    name = 'monitor'
    allowed_domains = ['www.soyoung.com']
    link = 'http://www.soyoung.com/dpg'
    start_urls = []

    opener = urllib.URLopener()
    fh = opener.open(csvPath)
    for line in fh.readlines():
        start_urls.append(link + line.strip())
    start_urls = start_urls[1:]

    #df = pd.read_csv(csvPath)
    #for l in df['group_id'].tolist():
    #start_urls.append(link+str(l))

    print 'starting: ', len(start_urls), 'first: ', start_urls[0]

    def parse(self, response):

        if response.url == 'http://www.soyoung.com/dp':
            return

        basic = {}

        basic['diary_link'] = response.url
        basic['diary_name'] = response.css(
            'div.diary-info > h2::text').extract()[0]

        user = response.css('div.diary-info > div.avatar-box > p > a')
        basic['user_name'] = user.css('::text').extract()[0].strip()
        basic['user_link'] = user.css('::attr(href)').extract()[0].strip()

        dinfo = response.css('div.diary-info > div.info-box > div.row')

        try:
            proj, dtime, hos, doc, prod, price, pre_img = dinfo
        except Exception as e:
            print 'X1', e

        try:
            basic['project_name'] = proj.css(
                'div.value.tag::text').extract()[0].strip()
        except Exception as e:
            print 'pj', e

        try:
            basic['diary_date'] = dtime.css(
                'div.value.date::text').extract()[0].strip()
        except Exception as e:
            print 'pt', e

        try:
            hx = hos.css('a::text').extract()
            hy = hos.css('a::attr(href)').extract()
            if len(hx) > 0 and len(hy) > 0:
                basic['hospital'] = hx[0].strip()
                basic['hospital_link'] = hy[0].strip()

        except Exception as e:
            print 'hp', e

        try:
            dx = doc.css('a::text').extract()
            dy = doc.css('a::attr(href)').extract()

            if len(dx) > 0 and len(dy) > 0:
                basic['doctor_name'] = dx[0].strip()
                basic['doctor_link'] = dy[0].strip()
        except Exception as e:
            print 'dc', e

        try:
            basic['product_name'] = prod.css('a::text').extract()[0].strip()
            basic['product_link'] = prod.css(
                'a::attr(href)').extract()[0].strip()
        except Exception as e:
            print 'pd', e

        try:
            basic['price'] = price.css(
                'div.value.price::text').extract()[0].strip()
        except Exception as e:
            print 'pc', e

        try:
            m = 0
            for img in pre_img.css('div.before-photos > a'):
                m += 1
                basic['pre_surg_pic' +
                      str(m)] = img.css('::attr(href)').extract()[0].strip()

        except Exception as e:
            print 'pi', e

        for i in response.css('div.diary-list > ul > li.diary-item'):
            item = DiaryItem(basic)

            item['post_title'] = i.css('span.day::text').extract()[0]
            item['post_link'] = i.css(
                'p.describe > a::attr(href)').extract()[0]
            item['post_text'] = i.css('p.describe > a::text').extract()[0]

            n = 0
            for p in i.css(
                    'ul.photo-list > li > a > img::attr(data-img)').extract():
                n += 1
                item['post_image' + str(n)] = p
            tp = 'photo-diary'

            vid = i.css('div.video-poster > a > img::attr(data-img)').extract()
            if len(vid) > 0:
                tp = 'video-diary'
                item['video_image'] = vid[0]

            item['post_type'] = tp

            coll = i.css('div.other-box a::text').extract()
            item['views'] = coll[0]
            item['comments'] = coll[1]
            item['favor'] = coll[2]

            yield item
Пример #11
0
    def loginUser(self, params={}):
        if self.__settings__.getSetting("auth"):
            xbmc.executebuiltin(
                "Notification(%s, %s, 2500)" %
                (self.localize('Auth'), self.localize('Already logged in')))
            return

        xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=False)
        keyboardUser = xbmc.Keyboard("", self.localize("Input Username:"******"", self.localize("Input Password:"******"<img src='/captcha\?captcha_id=(\d+)'").search(
            content)
        if captcha:
            urllib.URLopener().retrieve(
                self.URL_SECURE + '/captcha?captcha_id=' + captcha.group(1),
                tempfile.gettempdir() + '/captcha.png')
            window = xbmcgui.Window(xbmcgui.getCurrentWindowId())
            image = xbmcgui.ControlImage(
                460, 20, 360, 160,
                tempfile.gettempdir() + '/captcha.png')
            window.addControl(image)
            keyboardCaptcha = xbmc.Keyboard(
                "", self.localize("Input symbols from CAPTCHA image:"))
            keyboardCaptcha.doModal()
            captchaText = keyboardCaptcha.getText()
            captchaId = captcha.group(1)
            window.removeControl(image)
            if not captchaText:
                return
        else:
            captchaText = captchaId = ''

        try:
            cookieJar = cookielib.CookieJar()
            opener = urllib2.build_opener(
                urllib2.HTTPCookieProcessor(cookieJar))
            data = urllib.urlencode({
                'login': username,
                'password': password,
                'flag_permanent': 1,
                'captcha_value': captchaText,
                'captcha_id': captchaId
            })
            value = opener.open(self.URL_SECURE + "/login", data).read()
            if re.compile("<a href='/logout'>").search(value):
                xbmc.executebuiltin("Notification(%s, %s, 2500)" %
                                    (self.localize('Auth'),
                                     self.localize('Login successfull')))
                for cookie in cookieJar:
                    if cookie.name == 'ukey':
                        self.__settings__.setSetting("auth",
                                                     'ukey=' + cookie.value)
            else:
                xbmc.executebuiltin(
                    "Notification(%s, %s, 2500)" %
                    (self.localize('Auth'), self.localize('Login failed')))
                self.loginUser()
        except urllib2.HTTPError, e:
            print self.__plugin__ + " loginUser() exception: " + str(e)
Пример #12
0
	def GET():
			try:
				try:
					grey = Style.DIM+Fore.WHITE
					# site = raw_input("[?] Enter URL:\n[?] > ") #Taking URL
					site = options.url
					if 'https://' in site:
						pass
					elif 'http://' in site:
						pass
					else:
						site = "http://"+site
					finalurl = urlparse.urlparse(site)
					urldata = urlparse.parse_qsl(finalurl.query)
					domain0 = '{uri.scheme}://{uri.netloc}/'.format(uri=finalurl)
					domain = domain0.replace("https://","").replace("http://","").replace("www.","").replace("/","")
					print (Style.DIM+Fore.WHITE+"[+] Checking if "+domain+" is available..."+Style.RESET_ALL)
					connection = httplib.HTTPConnection(domain)
					connection.connect()
					print("[+] "+Fore.GREEN+domain+" is available! Good!"+Style.RESET_ALL)
					url = site
					paraname = []
					paravalue = []

					wordlist = 'wordlist.txt'
					print(grey+"[+] Using Default wordlist..."+Style.RESET_ALL)

					payloads = []
					wordlistimport(wordlist,payloads)
					lop = str(len(payloads))
					grey = Style.DIM+Fore.WHITE
					print(Style.DIM+Fore.WHITE+"[+] "+lop+" Payloads loaded..."+Style.RESET_ALL)
					print("[+] Bruteforce start:")
					o = urlparse.urlparse(site)
					parameters = urlparse.parse_qs(o.query,keep_blank_values=True)
					path = urlparse.urlparse(site).scheme+"://"+urlparse.urlparse(site).netloc+urlparse.urlparse(site).path
					for para in parameters: #Arranging parameters and values.
						for i in parameters[para]:
							paraname.append(para)
							paravalue.append(i)
					total = 0
					c = 0
					fpar = []
					fresult = []
					progress = 0
					for pn, pv in zip(paraname,paravalue): #Scanning the parameter.
						print(grey+"[+] Testing '"+pn+"' parameter..."+Style.RESET_ALL)
						fpar.append(str(pn))
						for x in payloads: #
							validate = x.translate(None, whitespace)
							if validate == "":
								progress = progress + 1
							else:
								sys.stdout.write("\r[+] %i / %s payloads injected..."% (progress,len(payloads)))
								sys.stdout.flush()
								progress = progress + 1
								enc = urllib.quote_plus(x)
								data = path+"?"+pn+"="+pv+enc
								u = urllib.URLopener()
								u.addheaders = []
								if options.cookie is not None:
									u.addheader('Cookie',options.cookie)
								if options.user_agent is not None:
									u.addheader('User-Agent',options.user_agent)
								f = u.open(data)
								sourcecode = f.read()
								f.close()
								# page = urllib.urlopen(data)
								# sourcecode = page.read()
								if x in sourcecode:
									print(Style.BRIGHT+Fore.RED+"\n[!]"+" XSS Vulnerability Found! \n"+Fore.RED+Style.BRIGHT+"[!]"+" Parameter:\t%s\n"+Fore.RED+Style.BRIGHT+"[!]"+" Payload:\t%s"+Style.RESET_ALL)%(pn,x)
									fresult.append("  Vulnerable  ")
									c = 1
									total = total+1
									progress = progress + 1
									break
								else:
									c = 0
						if c == 0:
							print(Style.BRIGHT+Fore.GREEN+"\n[+]"+Style.RESET_ALL+Style.DIM+Fore.WHITE+" '%s' parameter not vulnerable."+Style.RESET_ALL)%pn
							fresult.append("Not Vulnerable")
							progress = progress + 1
							pass
						progress = 0
					complete(fpar,fresult,total,domain)
				except(httplib.HTTPResponse, socket.error) as Exit:
					print(Style.BRIGHT+Fore.RED+"[!] Site "+domain+" is offline!"+Style.RESET_ALL)
					again()
			except(KeyboardInterrupt) as Exit:
				print("\nExit...")
    # clean format
    for f in release['formats']:
        if '@text' in f:
            del f['@text']

    dump_json_f.write(json.dumps(release) + '\n')

    processed += 1
    if not processed % 10000:
        print("Processed %d releases" % processed)
    return True


if os.path.isfile(dump_gz):
    print("Dump file already found (%s)" % dump_gz)
else:
    print("Downloading Discogs releases data dump archive (%s)" % dump_url)
    urllib.URLopener().retrieve(url, dump_gz, reporthook=download_progress)
    print("")

if os.path.isfile(dump_json):
    print("Json dump file already found (%s)" % dump_json)
else:
    print("Preprocessing data dump archive into json dump (%s)" % dump_json)
    dump_json_f = open(dump_json, 'w')
    xmltodict.parse(GzipFile(dump_gz), item_depth=2, item_callback=get_release)

    print("%d releases loaded" % processed)
    print("%d releases skipped due to errors" % errors)
Пример #14
0
    if len(line.split('/')) < 2:
        continue
    repo_owner = line.split('/')[0]
    repo_name = line.split('/')[1].rstrip()
    file_path = '/Dockerfile'
    print 'FILE PATH', file_path
    print repo_owner, repo_name
    download_url = ''
    if (branch_path == ''):
        try:
            file_git = gitapi.repos(repo_owner)(repo_name).contents(
                'Dockerfile').get()
            download_url = file_git.download_url
        except Exception, e:
            print 'NOT FOUND FOR', line
            continue
    else:
        file_git = gitapi.repos(repo_owner)(repo_name).contents(
            file_path + '?ref=' + branch_path).get()
    download_file = urllib.URLopener()
    print download_url
    download_file.retrieve(
        download_url,
        './downloads1/Dockerfile_' + repo_owner + '_' + repo_name)
    count = count + 1
    print count

    #line = fo.readline()
branch_path = ''
#fo.close()
Пример #15
0
import os
import os.path
import sys
import time
import traceback
import urllib
from distutils.core import setup  # , Extension, Command

assert sys.version_info[0] == 2 and sys.version_info[1] >= 7,\
    "requires Python version 2.7 or later, but not Python 3.x"

remote = "https://storage.googleapis.com/tmb-models/"
models = ["bin-000000046-005393.pt"]
for model in models:
    if not os.path.exists(model):
        print("downloading", remote + model)
        urllib.URLopener().retrieve(remote + model, model)

scripts = """
ocrobin-train
ocrobin-pred
""".split()

setup(name='ocrobin',
      version='v0.0',
      author="Thomas Breuel",
      description="Document image binarization.",
      packages=["ocrobin"],
      scripts=scripts,
      data_files=[("share/ocrobin", models)])
Пример #16
0
def do_pmi_diffs():
    global baseCoreClrPath

    # Setup scratch directories. Names are short to avoid path length problems on Windows.
    dotnetcliPath = os.path.abspath(os.path.join(scratch_root, 'cli'))
    jitutilsPath = os.path.abspath(os.path.join(scratch_root, 'jitutils'))
    asmRootPath = os.path.abspath(os.path.join(scratch_root, 'asm'))

    dotnet_tool = 'dotnet.exe' if Is_windows else 'dotnet'

    # Make sure the temporary directories do not exist. If they do already, delete them.

    if not testing:
        # If we can't delete the dotnet tree, it might be because a previous run failed or was
        # cancelled, and the build servers are still running. Try to stop it if that happens.
        if os.path.isdir(dotnetcliPath):
            try:
                log('Removing existing tree: %s' % dotnetcliPath)
                shutil.rmtree(dotnetcliPath, onerror=del_rw)
            except OSError:
                if os.path.isfile(os.path.join(dotnetcliPath, dotnet_tool)):
                    log('Failed to remove existing tree; trying to shutdown the dotnet build servers before trying again.')

                    # Looks like the dotnet too is still there; try to run it to shut down the build servers.
                    temp_env = my_env
                    temp_env["PATH"] = dotnetcliPath + os.pathsep + my_env["PATH"]
                    log('Shutting down build servers')
                    command = ["dotnet", "build-server", "shutdown"]
                    returncode = run_command(command, temp_env)

                    # Try again
                    log('Trying again to remove existing tree: %s' % dotnetcliPath)
                    shutil.rmtree(dotnetcliPath, onerror=del_rw)
                else:
                    log('Failed to remove existing tree')
                    return 1

        if os.path.isdir(jitutilsPath):
            log('Removing existing tree: %s' % jitutilsPath)
            shutil.rmtree(jitutilsPath, onerror=del_rw)
        if os.path.isdir(asmRootPath):
            log('Removing existing tree: %s' % asmRootPath)
            shutil.rmtree(asmRootPath, onerror=del_rw)

        try:
            os.makedirs(dotnetcliPath)
            os.makedirs(jitutilsPath)
            os.makedirs(asmRootPath)
        except OSError:
            if not os.path.isdir(dotnetcliPath):
                log('ERROR: cannot create CLI install directory %s' % dotnetcliPath)
                return 1
            if not os.path.isdir(jitutilsPath):
                log('ERROR: cannot create jitutils install directory %s' % jitutilsPath)
                return 1
            if not os.path.isdir(asmRootPath):
                log('ERROR: cannot create asm directory %s' % asmRootPath)
                return 1

    log('dotnet CLI install directory: %s' % dotnetcliPath)
    log('jitutils install directory: %s' % jitutilsPath)
    log('asm directory: %s' % asmRootPath)

    # Download .NET CLI

    log('Downloading .Net CLI')

    dotnetcliUrl = ""
    dotnetcliFilename = ""

    if Clr_os == 'Linux' and arch == 'x64':
        dotnetcliUrl = "https://dotnetcli.azureedge.net/dotnet/Sdk/2.1.402/dotnet-sdk-2.1.402-linux-x64.tar.gz"
    elif Clr_os == 'Linux' and arch == 'arm':
        dotnetcliUrl = "https://dotnetcli.blob.core.windows.net/dotnet/Sdk/release/2.1.4xx/dotnet-sdk-latest-linux-arm.tar.gz"
    elif Clr_os == 'Linux' and arch == 'arm64':
        # Use the latest (3.0) dotnet SDK. Earlier versions don't work.
        dotnetcliUrl = "https://dotnetcli.blob.core.windows.net/dotnet/Sdk/master/dotnet-sdk-latest-linux-arm64.tar.gz"
    elif Clr_os == 'OSX':
        dotnetcliUrl = "https://dotnetcli.azureedge.net/dotnet/Sdk/2.1.402/dotnet-sdk-2.1.402-osx-x64.tar.gz"
    elif Clr_os == 'Windows_NT':
        dotnetcliUrl = "https://dotnetcli.azureedge.net/dotnet/Sdk/2.1.402/dotnet-sdk-2.1.402-win-x64.zip"
    else:
        log('ERROR: unknown or unsupported OS (%s) architecture (%s) combination' % (Clr_os, arch))
        return 1

    if Is_windows:
        dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.zip')
    else:
        dotnetcliFilename = os.path.join(dotnetcliPath, 'dotnetcli-jitutils.tar.gz')

    log('Downloading: %s => %s' % (dotnetcliUrl, dotnetcliFilename))

    if not testing:
        response = urllib2.urlopen(dotnetcliUrl)
        request_url = response.geturl()
        testfile = urllib.URLopener()
        testfile.retrieve(request_url, dotnetcliFilename)

        if not os.path.isfile(dotnetcliFilename):
            log('ERROR: Did not download .Net CLI')
            return 1

    # Install .Net CLI

    log('Unpacking .Net CLI')

    if not testing:
        if Is_windows:
            with zipfile.ZipFile(dotnetcliFilename, "r") as z:
                z.extractall(dotnetcliPath)
        else:
            tar = tarfile.open(dotnetcliFilename)
            tar.extractall(dotnetcliPath)
            tar.close()

        if not os.path.isfile(os.path.join(dotnetcliPath, dotnet_tool)):
            log('ERROR: did not extract .Net CLI from download')
            return 1

    # Add dotnet CLI to PATH we'll use to spawn processes.

    log('Add %s to my PATH' % dotnetcliPath)
    my_env["PATH"] = dotnetcliPath + os.pathsep + my_env["PATH"]

    # Clone jitutils

    command = 'git clone -b master --single-branch %s %s' % (Jitutils_url, jitutilsPath)
    log(command)
    returncode = 0 if testing else os.system(command)
    if returncode != 0:
        log('ERROR: cannot clone jitutils');
        return 1

    # We're going to start running dotnet CLI commands. Unfortunately, once you've done that,
    # the dotnet CLI sticks around with a set of build server processes running. Put all this
    # in a try/finally, and stop the build servers under any circumstance.

    try:

        #
        # Build jitutils, including "dotnet restore"
        #

        # Change directory to the jitutils root

        with ChangeDir(jitutilsPath):

            # Do "dotnet restore"

            command = ["dotnet", "restore"]
            returncode = run_command(command, my_env)

            # Do build

            command = ['build.cmd', '-p'] if Is_windows else ['bash', './build.sh', '-p']
            returncode = run_command(command, my_env)
            if returncode != 0:
                log('ERROR: jitutils build failed')
                return 1

            jitutilsBin = os.path.join(jitutilsPath, "bin")

            if not testing and not os.path.isdir(jitutilsBin):
                log("ERROR: jitutils not correctly built")
                return 1

            jitDiffPath = os.path.join(jitutilsBin, "jit-diff.dll")
            if not testing and not os.path.isfile(jitDiffPath):
                log("ERROR: jit-diff.dll not built")
                return 1

            jitAnalyzePath = os.path.join(jitutilsBin, "jit-analyze.dll")
            if not testing and not os.path.isfile(jitAnalyzePath):
                log("ERROR: jit-analyze.dll not built")
                return 1

            # Add jitutils bin to path for spawned processes

            log('Add %s to my PATH' % jitutilsBin)
            my_env["PATH"] = jitutilsBin + os.pathsep + my_env["PATH"]

        #
        # Run PMI asm diffs
        #

        # We want this script as a whole to return 0 if it succeeds (even if there are diffs) and only
        # return non-zero if there are any fatal errors.
        #
        # TO DO: figure out how to differentiate fatal errors and a return code indicating there are diffs,
        # and have the invoking netci.groovy code act differently for each case.

        # Generate the diffs
        #
        # Invoke command like:
        #   dotnet c:\gh\jitutils\bin\jit-diff.dll diff --pmi --base --base_root f:\gh\coreclr12 --diff --diff_root f:\gh\coreclr10 --arch x64 --build Checked --tag 1 --noanalyze --output f:\output --corelib
        #
        # We pass --noanalyze and call jit-analyze manually. This isn't really necessary, but it does give us better output
        # due to https://github.com/dotnet/jitutils/issues/175.

        altjit_args = []
        if ci_arch is not None and (ci_arch == 'x86_arm_altjit' or ci_arch == 'x64_arm64_altjit'):
            altjit_args = ["--altjit", "protononjit.dll"]

        # Over which set of assemblies should we generate asm?
        # TODO: parameterize this
        asm_source_args = ["--frameworks", "--benchmarks"]

        command = ["dotnet", jitDiffPath, "diff", "--pmi", "--base", "--base_root", baseCoreClrPath, "--diff", "--diff_root", diff_root, "--arch", arch, "--build", build_type, "--tag", "1", "--noanalyze", "--output", asmRootPath] + asm_source_args + altjit_args
        returncode = run_command(command, my_env)

        # We ignore the return code: it is non-zero if there are any diffs. If there are fatal errors here, we will miss them.
        # Question: does jit-diff distinguish between non-zero fatal error code and the existence of diffs?

        # Did we get any diffs?

        baseOutputDir = os.path.join(asmRootPath, "1", "base")
        if not testing and not os.path.isdir(baseOutputDir):
            log("ERROR: base asm not generated")
            return 1

        diffOutputDir = os.path.join(asmRootPath, "1", "diff")
        if not testing and not os.path.isdir(diffOutputDir):
            log("ERROR: diff asm not generated")
            return 1

        # Do the jit-analyze comparison:
        #   dotnet c:\gh\jitutils\bin\jit-analyze.dll --base f:\output\diffs\1\base --recursive --diff f:\output\diffs\1\diff

        command = ["dotnet", jitAnalyzePath, "--recursive", "--base", baseOutputDir, "--diff", diffOutputDir]
        returncode = run_command(command, my_env)
        if returncode != 0:
            # This is not a fatal error.
            log('Compare: %s %s' % (baseOutputDir, diffOutputDir))

    finally:

        # Shutdown the dotnet build servers before cleaning things up
        # TODO: make this shutdown happen anytime after we've run any 'dotnet' commands. I.e., try/finally style.

        log('Shutting down build servers')
        command = ["dotnet", "build-server", "shutdown"]
        returncode = run_command(command, my_env)

    return 0
Пример #17
0
        file.write(fileIn)
        file.close()
        print "Got firmware file from stdin!"
    else:
        error("Read error on stdin!")
else:
    # Download most recent firmware
    firmwareURL = "http://firmware.ardusub.com/Sub/latest/PX4-" + options.frame + "/ArduSub-v2.px4"
    if options.url:
        firmwareURL = options.url
        print "Downloading latest ArduSub firmware from URL..."
    else:
        print "Downloading latest ArduSub " + options.frame + " firmware..."

    try:
        firmwarefile = urllib.URLopener()
        firmwarefile.retrieve(firmwareURL, "/tmp/ArduSub-v2.px4")

    except Exception as e:
        print(e)
        print "Error downloading firmware! Do you have an internet connection? Try 'ping ardusub.com'"
        exit(1)

# Stop screen session with mavproxy
print "Stopping mavproxy"
os.system("sudo screen -X -S mavproxy quit")

# Flash Pixhawk
print "Flashing Pixhawk..."
if (os.system(
        "python -u /home/pi/companion/Tools/px_uploader.py --port /dev/ttyACM0 /tmp/ArduSub-v2.px4"
Пример #18
0
#Defining some variables
user_agent = {
    'User-agent':
    'Mozilla/5.0 ;Macintosh; Intel Mac OS X 10_10_2; AppleWebKit/537.36 ;KHTML, like Gecko; Chrome/40.0.2214.111 Safari/537.36'
}
credentials = {
    'username': username_input,
    'password': password_input,
    'dest': '/home'
}
usernames_file = "usernames-" + now.strftime("%Y-%m-%d") + ".csv"
database = "profiles-" + now.strftime("%Y-%m-%d") + ".csv"

# Get Files
usernamedl = urllib.URLopener()
usernamedl.retrieve(
    'https://s3-us-west-2.amazonaws.com/cupidscrape-usernames/' +
    usernames_file, usernames_file)

#Start a session with OKCupid
sesh = requests.Session()
login_resp = sesh.post('https://www.okcupid.com/login',
                       data=credentials,
                       headers=user_agent)

#Open the usernames file
open_usernames = open(usernames_file, 'r')
usernames = open_usernames.readlines()
open_database = open(database, 'a')
Пример #19
0
# print all the first cell of all the rows
for row in cur.fetchall():
    print row

source = {
    1: "bbc-news",
    2: "techcrunch",
    3: "bbc-sport",
    4: "the-hindu",
    5: "cnn",
    6: "espn"
}
s = source[random.randint(1, 6)]
url = "https://newsapi.org/v1/articles?source=" + s + "&apiKey=74da5482c5fa4de690959100081eb0db"
file = open("article1.txt", "w")
testfile = urllib.URLopener()
testfile.retrieve(url, "article1.txt")
file.close()
fo = open("article1.txt", "r")
list_data = json.loads(fo.read())
fo.close()
z = 1
for i in list_data["articles"]:

    t = i["title"]
    d = i["description"]
    u = i["url"]
    a = i["author"]
    try:
        cur.execute("""INSERT INTO info_news VALUES (%s,%s,%s,%s,%s)""",
                    (z, t, d, u, a))
Пример #20
0
# This file is part of the Astrometry.net suite.
# Copyright 2014 Denis Vida.

# The Astrometry.net suite is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, version 2.

# The Astrometry.net suite is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with the Astrometry.net suite ; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA

from __future__ import print_function

import urllib
import os

prefix = 'http://cdsarc.u-strasbg.fr/ftp/cats/aliases/U/UCAC3/UCAC3/'

for i in range(1, 361):
    name = 'z'+str(i).zfill(3)+'.bz2'
    url_name = prefix+name

    print('Downloading: '+url_name)
    ucac_file = urllib.URLopener()
    ucac_file.retrieve(url_name, name)
Пример #21
0
def checkUpdates(directory):
    """
    Checks for MNPR updates online and lets the user decide what to do
    Args:
        directory (str): Directory of installed MNPR
    """
    print "Checking for updates..."
    # get local mnpr version
    localPath = os.path.join(directory, "version.json")
    with open(localPath, 'r') as f:
        localMNPR = json.load(f)

    # get online mnpr version
    onlinePath = distURL + "/version.json"
    tempPath = os.path.join(directory, "onlineVersion.json")
    downloader = urllib.URLopener()
    try:
        downloader.retrieve(onlinePath, tempPath)
    except IOError:
        print "Maya can't connect to the internet.",
        return
    with open(tempPath, 'r') as f:
        onlineMNPR = json.load(f)
    os.remove(tempPath)

    # check versions
    localVer = localMNPR.pop("version")
    onlineVer = onlineMNPR.pop("version")
    if onlineVer <= localVer:
        return "Nothing to update"

    # delete unnecessary plugin entries depending on OS
    mayaV = int(lib.mayaVersion())
    localOS = "win"
    if cmds.about(mac=True):
        localOS = "mac"
    elif cmds.about(linux=True):
        localOS = "linux"
    # search in local version
    keys2Delete = []
    for key in localMNPR:
        if "/plugins/" in key:
            if "/{0}/{1}".format(mayaV, localOS) not in key:
                keys2Delete.append(key)
                continue
    # delete unnecessary local keys
    for key in keys2Delete:
        localMNPR.pop(key)
    # search in online version
    keys2Delete = []
    for key in onlineMNPR:
        if "/plugins/" in key:
            if "/{0}/{1}".format(mayaV, localOS) not in key:
                keys2Delete.append(key)
                continue
    # delete unnecessary online keys
    for key in keys2Delete:
        onlineMNPR.pop(key)

    print "LOCAL"
    pprint.pprint(localMNPR)
    print "\nONLINE"
    pprint.pprint(onlineMNPR)

    # compare the two versions
    files2Update = []
    for key in onlineMNPR:
        if key in localMNPR:
            for file in onlineMNPR[key]:
                if file in localMNPR[key]:
                    if onlineMNPR[key][file]>localMNPR[key][file]:
                        # online file is newer than local file, download
                        files2Update.append("{0}/{1}".format(key, file))
                else:
                    # file doesn't exist locally, download
                    files2Update.append("{0}/{1}".format(key, file))
        else:
            for file in onlineMNPR[key]:
                files2Update.append("{0}/{1}".format(key, file))

    files2Delete = []
    for key in localMNPR:
        if key in onlineMNPR:
            for file in localMNPR[key]:
                if file not in onlineMNPR[key]:
                    files2Delete.append("{0}/{1}".format(key, file))
        else:
            for file in localMNPR[key]:
                files2Delete.append("{0}/{1}".format(key, file))

    # check if a shelf needs to update, as Maya would then require a restart
    restartMaya = False
    for f2u in files2Update:
        if "/shelves/" in f2u:
            restartMaya = True
    for f2d in files2Delete:
        if "/shelves/" in f2d:
            restartMaya = True

    # update prompt
    mString = "An update for MNPR is available, do you wish to download and install this update?\n\n"
    mString += "Files to be updated:\n"
    if files2Update:
        for fUpdate in files2Update:
            mString += "-. {0}\n".format(fUpdate)
    else:
        mString += "- None -\n"
    mString += "\nFiles to be deleted:\n"
    if files2Delete:
        for fDelete in files2Delete:
            mString += "-. {0}\n".format(fDelete)
    else:
        mString += "- None -\n"
    reply = cmds.confirmDialog(title='Update is available', message=mString, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No', icn="information")
    # don't do anything
    if reply == "No":
        print "Nothing has been updated",
        return

    if restartMaya:
        mString = "The shelf will be updated, so Maya will close automatically after the update has concluded\n\n"
        mString += "No scenes/preferences will be saved upon closure, do you still wish to proceed?"
        reply = cmds.confirmDialog(title='Shelf update', message=mString, button=['Yes', 'No'], defaultButton='Yes', cancelButton='No', dismissString='No', icn="warning")
        if reply == "No":
            print "Nothing has been updated",
            return

    if updateMNPR(directory, files2Update, files2Delete):
        if restartMaya:
            cmds.quit(abort=True)
import urllib
import glob

rg_projections = urllib.URLopener()
rg_projections.retrieve(
    "https://rotogrinders.com/projected-stats/nfl-qb.csv?site=draftkings",
    "nfl-qb.csv")
rg_projections.retrieve(
    "https://rotogrinders.com/projected-stats/nfl-rb.csv?site=draftkings",
    "nfl-rb.csv")
rg_projections.retrieve(
    "https://rotogrinders.com/projected-stats/nfl-wr.csv?site=draftkings",
    "nfl-wr.csv")
rg_projections.retrieve(
    "https://rotogrinders.com/projected-stats/nfl-te.csv?site=draftkings",
    "nfl-te.csv")
rg_projections.retrieve(
    "https://rotogrinders.com/projected-stats/nfl-defense.csv?site=draftkings",
    "nfl-defense.csv")

read_files = glob.glob("*.csv")

with open("players.csv", "wb") as outfile:
    outfile.write("Name,Salary,Team,Position,Opp,High,Low,Pts\n")
    for f in read_files:
        with open(f, "rb") as infile:
            outfile.write(infile.read())
Пример #23
0
# download assignment 2 data files using

import urllib

# download sample code
sampleCodeURL = "http://vgc.poly.edu/projects/gx5003-fall2014/week2/lab/data/sample_code.py"
codeFile = urllib.URLopener()
codeFile.retrieve(sampleCodeURL, "sample_code.py")

# download csv file with database
csvURL = "http://vgc.poly.edu/projects/gx5003-fall2014/week2/lab/data/NYC_Jobs_sample.csv"
csvfile = urllib.URLopener()
csvfile.retrieve(csvURL, "NYC_Jobs_sample.csv")

# download input and output files for problems 1-6
fileNamePrefix = "sample_data_problem_"

filePath = "http://vgc.poly.edu/projects/gx5003-fall2014/week2/lab/data/"
inputFilePrefix = "sample_data_problem_"
outputFilePrefix = "sample_output_problem_"

for x in range(1, 7):
    inputFileName = inputFilePrefix + str(x) + ".txt"
    inputFileURL = filePath + inputFileName

    outputFileName = outputFilePrefix + str(x) + ".txt"
    outputFileURL = filePath + outputFileName

    testfile = urllib.URLopener()
    testfile.retrieve(inputFileURL, inputFileName)
    testfile = urllib.URLopener()
Пример #24
0
def download_file(url, save_path):
	""" Downloads url to save_path """
	url_opener = urllib.URLopener()
	url_opener.retrieve(url, save_path)
Пример #25
0
 def datareader(url, opener=urllib.URLopener().open):
     return opener(url).read()
Пример #26
0
def search(catalog, **kwargs):
    """
    Search and retrieve information from a Gator catalog.
    
    Two ways to search for data within a catalog C{name}:
        
        1. You're looking for info on B{one target}, then give the target's
        C{ID} or coordinates (C{ra} and C{dec}), and a search C{radius}.
    
        2. You're looking for information of B{a whole field}, then give the
        field's coordinates (C{ra} and C{dec}), and C{radius}.
    
    If you have a list of targets, you need to loop this function.
    
    If you supply a filename, the results will be saved to that path, and you
    will get the filename back as received from urllib.URLopener (should be the
    same as the input name, unless something went wrong).
    
    If you don't supply a filename, you should leave C{filetype} to the default
    C{tsv}, and the results will be saved to a temporary
    file and deleted after the function is finished. The content of the file
    will be read into a dictionary, as well as the units (two separate
    dictionaries with the same keys, depending on the colum names in the
    catalog). The entries in the dictionary are of type C{ndarray}, and will
    be converted to a float-array if possible. If not, the array will consist
    of strings. The comments are also returned as a list of strings.
        
    
    @param catalog: name of a GATOR catalog (e.g. 'II/246/out')
    @type catalog: str
    @keyword filename: name of the file to write the results to (no extension)
    @type filename: str
    @return: filename / catalog data columns, units, comments
    @rtype: str/ record array, dict, list of str
    """
    filename = kwargs.pop('filename', None)  # remove filename from kwargs
    filetype = kwargs.setdefault('filetype', '1')
    if filename is not None and '.' in os.path.basename(filename):
        filetype = os.path.splitext(filename)[1][1:]
    elif filename is not None:
        filename = '%s.%s' % (filename, filetype)

    #-- gradually build URI
    base_url = _get_URI(catalog, **kwargs)
    #-- prepare to open URI
    url = urllib.URLopener()
    filen, msg = url.retrieve(base_url, filename=filename)
    #   maybe we are just interest in the file, not immediately in the content
    if filename is not None:
        logger.info('Querying GATOR source %s and downloading to %s' %
                    (catalog, filen))
        url.close()
        return filen

    #   otherwise, we read everything into a dictionary
    if filetype == '1':
        try:
            results, units, comms = txt2recarray(filen)
        #-- raise an exception when multiple catalogs were specified
        except ValueError:
            raise ValueError, "failed to read %s, perhaps multiple catalogs specified (e.g. III/168 instead of III/168/catalog)" % (
                catalog)
        url.close()
        logger.info('Querying GATOR source %s (%d)' %
                    (catalog, (results is not None and len(results) or 0)))
        return results, units, comms
Пример #27
0
import json
import pandas
import urllib

urlGetFile = 'https://vba.dse.vic.gov.au/vba/downloadVSC.do'
cvsFile = './data/SpeciesList.csv'
jsonfile = './data/SpeciesList.json'

SpeciesListFile = urllib.URLopener()
SpeciesListFile.retrieve(urlGetFile, cvsFile)

data = pandas.read_csv(cvsFile, usecols=[0, 1, 2, 4, 17])
json_file = open(jsonfile, 'w')

for line in data.iterrows():
    line[1].to_json(json_file)
    json_file.write('\n')
Пример #28
0
def download_file_by_url(url, full_filename):
    ## and try and check exception, add retries
    testfile = urllib.URLopener()
    testfile.retrieve(url, full_filename)
Пример #29
0
    def run_selenium_test(self, context, test_name, target_resource_name):
        """		
		:type context ResourceCommandContext
		:param context: 
		:param test_name: 
		:return: 
		"""
        api = self.__initApiSession__(context)
        reservation_details = api.GetReservationDetails(
            context.reservation.reservation_id)
        selenium_resource = SeleniumHub.create_from_context(context)
        current_timestamp = datetime.datetime.strftime(datetime.datetime.now(),
                                                       "%m-%d_%H-%M")
        artifacts_folder_name = "artifacts-" + current_timestamp

        if not target_resource_name:
            target_resource_name = self._get_connected_entity_name(context)
        if target_resource_name in [
                res.Name
                for res in reservation_details.ReservationDescription.Resources
        ]:
            target_resource_details = api.GetResourceDetails(
                target_resource_name)
            target_ip = target_resource_details.FullAddress
            target_port = next(
                attribute.Value
                for attribute in target_resource_details.ResourceAttributes
                if attribute.Name == "WWW_Port")
        else:
            target_service = next(
                service for service in
                reservation_details.ReservationDescription.Services
                if target_resource_name == service.Alias)
            target_service_attributes = {
                att.Name: att.Value
                for att in target_service.Attributes
            }
            target_ip = target_service_attributes["External_URL"]
            if target_ip.startswith("http://"):
                target_ip = target_ip[len("http://"):]
            target_port = 80

        target_url = target_ip + ':' + str(target_port)
        test_url = selenium_resource.tests_location + test_name + ".py"
        api.WriteMessageToReservationOutput(
            context.reservation.reservation_id,
            "Retrieving test: {0}".format(test_url))

        if os.path.isdir(artifacts_folder_name):
            shutil.rmtree(path=artifacts_folder_name, ignore_errors=True)

        os.mkdir(artifacts_folder_name)
        testfile = urllib.URLopener()
        testfile.retrieve(test_url, test_name + ".py")
        api.WriteMessageToReservationOutput(
            context.reservation.reservation_id,
            "Running Test '{0}' with parameters {2}".format(
                sys.executable, test_name + '.py',
                'hub={0}'.format(context.resource.address) +
                ' target={0}'.format(target_url)))

        test_results_filename = os.path.join(artifacts_folder_name,
                                             "test_output.txt")
        return_message = ""
        try:
            # Call the python executable running this driver to run the test.
            test_output = subprocess.check_output([
                sys.executable, test_name + ".py",
                "hub=" + context.resource.address, "target=" + target_url,
                "artifacts_folder=" + artifacts_folder_name
            ],
                                                  stderr=subprocess.STDOUT)
            with open(test_results_filename, mode="w") as output_file:
                output_file.write(test_output)

            return_message = "Test {test_name} Passed, See report in Sandbox attachments".format(
                test_name=test_name)
        except subprocess.CalledProcessError as error:
            with open(test_results_filename,
                      mode="a" if os.path.exists(test_results_filename) else
                      "w") as output_file:
                output_file.write(
                    "Test Failed with output:\n{1}\nAnd error code [{0}]".
                    format(error.returncode, error.output))

            return_message = "Test {test_name} Failed, See report in Sandbox attachments".format(
                test_name=test_name)
        except Exception as ex:
            with open(test_results_filename, mode="w") as output_file:
                output_file.write(ex.message)

            return_message = "Unhandled exception, check attachment for output"
        finally:
            test_results_filename = test_name + "-result-" + current_timestamp
            shutil.make_archive(base_name=test_results_filename,
                                format="zip",
                                root_dir=artifacts_folder_name)

            attach_file_result_code = self._attach_file_to_reservation(
                context, test_results_filename + ".zip",
                test_results_filename + ".zip")
            shutil.rmtree(path=artifacts_folder_name, ignore_errors=True)
            os.remove(test_name + ".py")
            if test_results_filename:
                os.remove(test_results_filename + ".zip")
            if not 200 <= attach_file_result_code < 300:
                return "Error Attaching File to reservation"
            else:
                return return_message
Пример #30
0
def download_cover(url, name):
    image = urllib.URLopener()
    image.retrieve(url, "cover/" + name)
    im = Image.open("cover/" + name)
    im_resize = im.resize((650, 350), Image.ANTIALIAS)
    im_resize.save("cover/" + name)