示例#1
0
 def download_test(self, test_name, test_hash):
     test_path = config.PATH_TESTS + test_hash
     # Download only if the file doesn't already exist
     if not path.exists(test_path):
         self.logger.info("[Submission {}] Downloading file {} with hash {} from URL: {}".format(
             self.id, test_name, test_hash, self.tests_url + test_name))
         common.download_file(self.tests_url + test_name, test_path)
示例#2
0
def download_poster(self, size, destination, choose):
    '''
    download associated movie poster

    size: w92, w154, w185, w342, w500, or original
          see http://help.themoviedb.org/kb/api/configuration
    name: name to save it as
    '''

    if choose:
        print 'Creating image selection palette(s)...'
        posters = _get_all_posters(self.posters, size)
        poster_qty = len(posters)
        if poster_qty <= 1:
            print 'No palette created,', poster_qty, 'image(s) available'
            return download_file(self.poster.geturl(size), destination)
        draw_mosaic(posters)
        choice = get_input('Choose an image to use for movie poster: ',
                           '(^$)|(^(Q|q)$)|(^\d{1,2}$)',
                           1, poster_qty)
        if re.match('^(Q|q)$', choice):
            exit()
        if not choice:
            poster_url = self.poster.geturl(size)
        else:
            poster_url = self.posters[int(choice)-1].geturl(size)
    else:
        poster_url = self.poster.geturl(size)
    image = download_file(poster_url, destination)
    if destination == 'temp':
        return image
    is_reduced = reduce_size(destination, 90)
    if is_reduced:
        notify('warning', 'image quality reduced and useless data removed for '
                + os.path.splitext(os.path.basename(destination))[0], sys.stderr)
示例#3
0
 def download_utility_file(self, url, destination):
     # Only download if not downloaded already
     if not path.exists(destination):
         self.logger.info("[Submission {}]   >> downloading utility file {}".format(self.id, url.split('/')[-1]))
         try:
             common.download_file(url, destination)
         except RuntimeError:
             return False
     return True
示例#4
0
def update_image_manifests(start_sol, end_sol):
  manifest = common.load_manifest()
  print "Downloading image manifests for sols %d-%d."%(start_sol, end_sol)
  for sol in manifest["sols"]:
    if sol["sol"] >= start_sol and sol["sol"] <= end_sol:
      url = sol["catalog_url"]
      outfile = common.JSON_DIR + "images_sol%d.json"%sol['sol']
      # print "Downloading image manifest for sol %d."%sol["sol"]
      common.download_file(outfile, url)
示例#5
0
def _save_poster(location, destination, basename, max_size):
    # If there is no art, carry on
    if not location:
        notify('warning', 'no image available for ' + basename)
        return
    download_file(location, destination)
    is_reduced = reduce_size(destination, max_size)
    if is_reduced:
        notify('warning', 'image quality reduced and useless data removed for '
                + basename, sys.stderr)
示例#6
0
def _get_all_posters(poster_list, size):
    poster_files = []
    for i,poster in enumerate(poster_list):
        show_images_retrieved(i, len(poster_list))
        poster_url = poster.geturl(size)
        poster_files.append(download_file(poster_url, 'temp'))
    return poster_files
示例#7
0
def get_full_images(start_sol, end_sol, instruments):
  update_image_manifests(start_sol, end_sol)
  if not os.path.isdir(IMG_DIR):
    os.makedirs(IMG_DIR)
  sol_dirs = os.listdir(IMG_DIR)
  r_insts = [{"prefix":INSTRUMENTS[inst]["inst_prefix"]['r'], "dir":INSTRUMENTS[inst]["dir"]} for inst in instruments]
  l_insts = [{"prefix":INSTRUMENTS[inst]["inst_prefix"]['l'], "dir":INSTRUMENTS[inst]["dir"]} for inst in instruments]
  for image_list in common.load_image_lists():
    sol = image_list["sol"]
    sol_dir = "sol%d"%sol
    count_downloaded = 0
    count_skipped = 0
    if not sol_dir in sol_dirs:
      os.makedirs(IMG_DIR + sol_dir)
    instrument_dirs = os.listdir(IMG_DIR + sol_dir)
    for image in image_list["images"]:
      correct_instrument = None
      for inst in r_insts:
        if image["instrument"].startswith(inst["prefix"]):
          correct_instrument = inst
          instrument_dir = inst["dir"] + '/' + "r"
          break
      if not correct_instrument:
        for inst in l_insts:
          if image["instrument"].startswith(inst["prefix"]):
            correct_instrument = inst
            instrument_dir = inst["dir"] + '/' + "l"
            break
      if not inst["dir"] in instrument_dirs:
        os.makedirs(IMG_DIR + sol_dir + "/" + inst["dir"] + "/r")
        os.makedirs(IMG_DIR + sol_dir + "/" + inst["dir"] + "/l")
        instrument_dirs.append(inst["dir"])
      if correct_instrument != None and image["sampleType"] == "full":
        url = image["urlList"]
        local_path = IMG_DIR + sol_dir + "/" + instrument_dir + "/" + url.split("/")[-1]
        if os.path.isfile(local_path):
          count_skipped += 1
          #print "Present; won't download: " + local_path
        else:
          common.download_file(local_path, url)
          count_downloaded += 1
    print "Sol %d: downloaded %d images and skipped %d images."%(int(sol), count_downloaded, count_skipped)
示例#8
0
	def import_doc(self, doc):
		# example url: http://repozitorij.upr.si/IzpisGradiva.php?id=976&lang=slv
		# three steps are needed:
		# 1) extract pdf url
		# 2) follow it to get to cookied yes-page
		# 3) download the final pdf
		jdata = copy.copy(self.JSON_TEMPLATE)
		print doc.agent_repository_url
		r = common.requests_get_retry(doc.agent_repository_url)
		soup = bs4.BeautifulSoup(r.content.decode('utf-8'), "html.parser")


		vrsta = ""
		vrstatag = soup.find_all(text="Tipologija:")
		if vrstatag:
			vrsta += vrstatag[0].parent.next_sibling.text.lower()
		vrstatag = soup.find_all(text="Vrsta gradiva:")
		if vrstatag:
			vrsta += vrstatag[0].parent.next_sibling.text.lower()
		if "magistr" not in vrsta and "diplom" not in vrsta and "doktor" not in vrsta:
			# we could also expand to download other types of works
			logger.info("Not sought after work type: %s" % vrsta)
			return (STATE_PERM_FAIL, None)

		datoteke = soup.find_all(text="Datoteke:")
		urlname = datoteke[0].parent.next_sibling.select("a")[0].text.strip(" ")
		print urlname
		if not urlname.lower().endswith(".pdf"):
			# here there are a lot of pointers to third parties, but we only support direct links to pdfs
			return (STATE_PERM_FAIL, None)
		url = datoteke[0].parent.next_sibling.select("a")[0]["href"]
		if not url.startswith("http"):
			url = self.BASE_URL + url
		'''
		# I couldn't make this work
		with requests.Session() as s:
		    r = s.get(url)
		    soup = bs4.BeautifulSoup(r.content.decode('utf-8'), "html.parser")
		    tags = soup.select("form input")
		    key = tags[0]['value']
		    print key
		    r = s.post(url, data="key=" + key, allow_redirects=True)
		    print r.status_code
		    print r.url
#		    print r.content
#		    r = s.post(URL2, data="username and password data payload")

		'''
		# let's cheat and assume that for the works we're interested in, urlname is the real name
		(download_status, file) = common.download_file(urlname)
		if download_status == STATE_OK:
			return (STATE_OK, file)
		else:
			return (download_status, file)
示例#9
0
 def init(cls):
     try:
         path = download_file('https://download.pytorch.org/test_data/legacy_modules.t7')
     except unittest.SkipTest:
         return
     tests = load_lua(path)
     for name, test in tests['modules'].items():
         test_name = 'test_' + name.replace('nn.', '')
         setattr(cls, test_name, cls._module_test(name, test))
     for name, test in tests['criterions'].items():
         test_name = 'test_' + name.replace('nn.', '')
         setattr(cls, test_name, cls._criterion_test(name, test))
示例#10
0
 def init(cls):
     try:
         path = download_file('https://download.pytorch.org/test_data/legacy_modules.t7')
     except unittest.SkipTest:
         return
     long_size = 8 if sys.platform == 'win32' else None
     tests = load_lua(path, long_size=long_size)
     for name, test in tests['modules'].items():
         if name == "HardShrink":
             continue
         test_name = 'test_' + name.replace('nn.', '')
         setattr(cls, test_name, cls._module_test(name, test))
     for name, test in tests['criterions'].items():
         if name == "HardShrink":
             continue
         test_name = 'test_' + name.replace('nn.', '')
         setattr(cls, test_name, cls._criterion_test(name, test))
示例#11
0
文件: agent_rul.py 项目: univizor/u2
	def import_doc(self, doc):
		# example url: http://repozitorij.upr.si/IzpisGradiva.php?id=976&lang=slv
		# three steps are needed:
		# 1) extract pdf url
		# 2) follow it to get to cookied yes-page
		# 3) download the final pdf
		jdata = copy.copy(self.JSON_TEMPLATE)
		print doc.agent_repository_url
		r = common.requests_get_retry(doc.agent_repository_url)
		soup = bs4.BeautifulSoup(r.content.decode('utf-8'), "html.parser")

		vrsta = ""
		vrstatag = soup.find_all(text="Tipologija:")
		if vrstatag:
			vrsta += vrstatag[0].parent.next_sibling.text.lower()
		vrstatag = soup.find_all(text="Vrsta gradiva:")
		if vrstatag:
			vrsta += vrstatag[0].parent.next_sibling.text.lower()
		if "magistr" not in vrsta and "diplom" not in vrsta and "doktor" not in vrsta:
			# we could also expand to download other types of works
			logger.info("Not sought after work type: %s" % vrsta)
			return (STATE_PERM_FAIL, None)
			

		datoteke = soup.select(".izpisDatotek tr td a")
		if not datoteke:
			logger.warning("No download file available")
			return (STATE_PERM_FAIL, None)

		urlname = datoteke[1]["href"]
		if urlname.startswith("Dokument"):
			urlname = self.BASE_URL + urlname
		elif not urlname.lower().endswith(".pdf"):
			# here there are a lot of pointers to third parties, but we only support direct links to pdfs
			# TODO support third party downloads
			logger.warning("No pdf availalbe")
			return (STATE_PERM_FAIL, None)

		# let's cheat and assume that for the works we're interested in, urlname is the real name
		(download_status, file) = common.download_file(urlname)
		if download_status == STATE_OK:
			return (STATE_OK, file)
		else:
			return (download_status, file)
示例#12
0
def import_repo(repo_url=None, repo='common', env='unstable'):
    parser = RepoDirIndexParser(repo_url)
    index = requests.get(repo_url, stream=True, timeout=120)
    for chunk in index.iter_content(64*1024):
        parser.feed(chunk)
    changes_files = parser.changes
    log.info("Found %s packages to import", len(changes_files))
    for url in changes_files:
            file = os.path.join(common.config['storage']['temp_dir'], url.split('/')[-1])
            result = common.download_file(url, file)
            if not result.ok:
                try:
                    import_package(file, repo, env)
                except ImportException as e:
                    log.error("Cannot import %s: %s", file, e)
                os.unlink(file)
            else:
                log.error("Cannot download %s", file)

    for arch in ('amd64', 'all', 'i386'):
        log.info("Updating '%s/%s/%s' repo metadata", repo, env, arch)
        repo_manage.update_repo_metadata(repo, env, arch)

    log.info("Import from %s completed, uploaded %s packages to %s %s", repo_url, len(changes_files), repo, env)
示例#13
0
def _get_all_posters(location):
    '''
    download all available posters that share series with the one at location
    return number of available posters
    '''

    if not location:
        return 0

    baseurl = '/'.join(location.split('/')[:-1])
    filename = location.split('/')[-1]
    basename, ext = filename.split('.')
    seriesno, posterno = basename.split('-')
    poster_files = []
    i = 1
    try:
        while True:
            location = baseurl + '/' + seriesno + '-' + str(i) + '.' + ext
            poster_files.append(download_file(location, 'temp'))
            show_images_retrieved(i)
            i = i + 1
    except urllib2.HTTPError as e:
        pass
    return poster_files
示例#14
0
def call(base_path, domain):
    # vlink账号
    appid = config.vlink_appid
    token = config.vlink_token

    today = date.today()
    # 2. 获取当前月的第一天
    first = today.replace(day=1)
    # 3. 减一天,得到上个月的最后一天
    last_month = first - timedelta(days=1)

    # 表名(动态变化的)
    table_name = "record_callin_" + last_month.strftime("%Y%m")

    try:
        conn = MySQLdb.Connect(host=config.db_host, user=config.db_user, passwd=config.db_pwd, db=config.db_name,
                               compress=1,
                               cursorclass=MySQLdb.cursors.DictCursor, charset='utf8', connect_timeout=5)
    except:
        # print 'connect db is error!!'
        common.debug("connect db is error!!")
        common.sendEmail()
        sys.exit(0)

    if (common.table_exists(conn, table_name) != 1):
        common.log(conn, 'month', table_name+"表不存在")
        sys.exit(0)

    common.log(conn, 'month', "按月下载音频程序启动...当前表 "+table_name)
    query = "select id,voice_url,source_url,operator,create_time from %s where voice_url is null " % table_name
    cursor = conn.cursor()
    cursor.execute(query)
    data = cursor.fetchall()
    num = len(data)

    # 没有数据时直接关闭链接 退出
    if num is 0:
        common.log(conn, 'month', "按月下载音频程序结束:无下载的数据")
        cursor.close()
        conn.close()
        sys.exit(0)

    total = 0
    for row in data:
        url = row['source_url']
        if row['operator'] =='vlink':
            url = config.vlink_domain+url
        if common.check_file_ext(url) is False:
            continue
        operator = row['operator']  # 运营商
        if operator == "vlink":
            # 获取动态签名
            timestamp = str(int(time.time()))
            sign = common.getSign(timestamp)
            # url = config.vlink_domain+url
            prefix = "?appId=%s&timestamp=%s&sign=%s" %(appid, timestamp, sign)
            url = url+prefix
        print(url)
        # 文件存储目录名(e.g. 20191212),以通话记录时间他创建目录
        time_arr = time.strptime(str(row['create_time']), "%Y-%m-%d %H:%M:%S")
        file_dir = time.strftime("%Y%m%d", time_arr)
        file_url = common.download_file(url, base_path, file_dir)
        try:
            if file_url:
                total = total + 1
                file_url = domain + '/' + file_url if domain else file_url
                query = "update %s set voice_url='%s' where id=%d" % (table_name, file_url, row['id'])
            else:
                query = "update %s set voice_url=null where id=%d" % (table_name, row['id'])
            time.sleep(0.1)
            cursor.execute(query)
            conn.commit()
        except Exception as e:
            conn.rollback()
            print('rollback')
    common.log(conn, 'month', "按月下载音频程序结束:需下载数据"+str(num)+"条,处理"+str(total)+"条数据,问题条数:("+str(num-total)+")")
    cursor.close()
    conn.close()
示例#15
0
def update_main_manifest():
  if not os.path.isdir(common.JSON_DIR):
    os.makedirs(common.JSON_DIR)
  print "Downloading main image manifest."
  common.download_file(common.MANIFEST_PATH, "http://mars.jpl.nasa.gov/msl-raw-images/image/image_manifest.json")
示例#16
0
def call(base_path, domain):
    # vlink账号
    appid = config.vlink_appid
    token = config.vlink_token

    # 昨天的日期(年-月-日)
    yesterday = (date.today() + timedelta(days=-1)).strftime("%Y-%m-%d")  # 昨天日期
    start_time = yesterday + " 00:00:00"
    end_time = yesterday + " 23:59:59"

    # 表名(动态变化的)
    table_name = "record_callin_" + (date.today() + timedelta(days=-1)).strftime("%Y%m")
    # 文件存储目录名(e.g. 20191212)
    # file_dir = yesterday.replace('-', '')
    try:
        conn = MySQLdb.Connect(host=config.db_host, user=config.db_user, passwd=config.db_pwd, db=config.db_name,
                               compress=1,
                               cursorclass=MySQLdb.cursors.DictCursor, charset='utf8', connect_timeout=5)
    except:
        # print 'connect db is error!!'
        common.debug("connect db is error!!")
        sys.exit(0)

    common.log(conn, 'day', "按日下载音频程序启动...当前表 "+table_name)
    query = "select id,source_url,operator from %s where voice_url is null and create_time between '%s' and '%s'" % (
        table_name, start_time, end_time)
    cursor = conn.cursor()
    cursor.execute(query)
    data = cursor.fetchall()
    num = len(data)

    # 没有数据时直接关闭链接 退出
    if num is 0:
        common.log(conn, 'day', "按日下载音频程序结束:无下载的数据")
        cursor.close()
        conn.close()
        sys.exit(0)

    total = 0
    for row in data:
        url = row['source_url']
        if row['operator'] =='vlink':
            url = config.vlink_domain+url
        if common.check_file_ext(url) is False:
            continue
        operator = row['operator']  # 运营商
        if operator == "vlink":
            # 获取动态签名
            timestamp = str(int(time.time()))
            sign = common.getSign(timestamp)
            # url = config.vlink_domain+url
            prefix = "?appId=%s&timestamp=%s&sign=%s" %(appid, timestamp, sign)
            url = url+prefix
        # 文件存储目录名(e.g. 20191212),以通话记录时间他创建目录
        time_arr = time.strptime(str(row['create_time']), "%Y-%m-%d %H:%M:%S")
        file_dir = time.strftime("%Y%m%d", time_arr)
        file_url = common.download_file(url, base_path, file_dir)
        try:
            if file_url:
                total = total + 1
                file_url = domain + '/' + file_url if domain else file_url
                query = "update %s set voice_url='%s' where id=%d" % (table_name, file_url, row['id'])
            else:
                query = "update %s set voice_url=null where id=%d" % (table_name, row['id'])
            time.sleep(0.1)
            cursor.execute(query)
            conn.commit()
        except Exception as e:
            conn.rollback()
            print('rollback')
    common.log(conn, 'day', "按日下载音频程序结束:需下载数据"+str(num)+"条,处理"+str(total)+"条数据,问题条数:("+str(num-total)+")")
    cursor.close()
    conn.close()
示例#17
0
文件: agent_vsvo.py 项目: univizor/u2
	def import_doc(self, doc):
		(download_status, file) = common.download_file(doc.agent_repository_url)
		return (download_status, file)
示例#18
0
文件: agent_fis.py 项目: univizor/u2
	def import_doc(self, doc):
		(download_status, file) = common.download_file(doc.agent_repository_url)
		if download_status == STATE_OK:
			return (STATE_OK, file)
		else:
			return (download_status, file)
示例#19
0
import common

inizio = int(input("Anno di inizio: "))
fine = int(input("Anno di fine: "))
for (anno, links) in common.get_links("wpfb-cat-119").items():
    if anno >= inizio and anno <= fine:
        for link in links:
            common.download_file(link[0], link[1] + ".pdf")
示例#20
0
def main():
    """Create a TensorRT engine for ONNX-based YOLOv3-608 and run inference."""

    # Try to load a previously generated YOLOv3-608 network graph in ONNX format:
    #获取onnx模型和相应引擎文件的路径
    onnx_file_path = 'yolov3.onnx'
    engine_file_path = "yolov3.trt"
    # Download a dog image and save it to the following file path:
    #下载相关的图片数据
    input_image_path = common.download_file(
        'dog.jpg',
        'https://github.com/pjreddie/darknet/raw/f86901f6177dfc6116360a13cc06ab680e0c86b0/data/dog.jpg',
        checksum_reference=None)

    # Two-dimensional tuple with the target network's (spatial) input resolution in HW ordered
    #网络的输入图片weidth和height
    input_resolution_yolov3_HW = (608, 608)
    # Create a pre-processor object by specifying the required input resolution for YOLOv3
    #PreprocessYOLO参考data_processing.py的实现
    #加载图片并进行相应的预处理
    preprocessor = PreprocessYOLO(input_resolution_yolov3_HW)
    # Load an image from the specified input path, and return it together with  a pre-processed version
    #从相应路径加载一张图片,将加载的原图和预处理后的图像一起返回
    #具体参考data_processing.py的实现
    image_raw, image = preprocessor.process(input_image_path)
    # Store the shape of the original input image in WH format, we will need it for later
    #存储原始图片的维度
    shape_orig_WH = image_raw.size

    # Output shapes expected by the post-processor
    #输出层的维度
    output_shapes = [(1, 255, 19, 19), (1, 255, 38, 38), (1, 255, 76, 76)]
    # Do inference with TensorRT
    #进行trt的推理
    trt_outputs = []
    #get_engine参考本文件的实现
    #获取引擎文件并创建相关的推理上下文
    with get_engine(onnx_file_path, engine_file_path
                    ) as engine, engine.create_execution_context() as context:
        #分配相应的内存缓冲区
        inputs, outputs, bindings, stream = common.allocate_buffers(engine)
        # Do inference
        print('Running inference on image {}...'.format(input_image_path))
        # Set host input to the image. The common.do_inference function will copy the input to the GPU before executing.
        #将相应数据传到主机内存
        inputs[0].host = image
        #进行相应的推理
        trt_outputs = common.do_inference_v2(context,
                                             bindings=bindings,
                                             inputs=inputs,
                                             outputs=outputs,
                                             stream=stream)

    # Before doing post-processing, we need to reshape the outputs as the common.do_inference will give us flat arrays.
    #得到推理的输出
    trt_outputs = [
        output.reshape(shape)
        for output, shape in zip(trt_outputs, output_shapes)
    ]

    postprocessor_args = {
        "yolo_masks":
        [(6, 7, 8), (3, 4, 5),
         (0, 1, 2)],  # A list of 3 three-dimensional tuples for the YOLO masks
        "yolo_anchors": [
            (10, 13),
            (16, 30),
            (33, 23),
            (30, 61),
            (62,
             45),  # A list of 9 two-dimensional tuples for the YOLO anchors
            (59, 119),
            (116, 90),
            (156, 198),
            (373, 326)
        ],
        "obj_threshold":
        0.6,  # Threshold for object coverage, float value between 0 and 1
        "nms_threshold":
        0.5,  # Threshold for non-max suppression algorithm, float value between 0 and 1
        "yolo_input_resolution":
        input_resolution_yolov3_HW
    }
    #接下来就是相关的后处理内容了
    postprocessor = PostprocessYOLO(**postprocessor_args)

    # Run the post-processing algorithms on the TensorRT outputs and get the bounding box details of detected objects
    boxes, classes, scores = postprocessor.process(trt_outputs,
                                                   (shape_orig_WH))
    # Draw the bounding boxes onto the original input image and save it as a PNG file
    obj_detected_img = draw_bboxes(image_raw, boxes, scores, classes,
                                   ALL_CATEGORIES)
    output_image_path = 'dog_bboxes.png'
    obj_detected_img.save(output_image_path, 'PNG')
    print('Saved image with bounding boxes of detected objects to {}.'.format(
        output_image_path))
示例#21
0
def main():

    global options, config

    options, args = parse_commandline()
    if not args and not options.all:
        raise OptionError("If you really want to build all the apps, use --all", "all")

    config = common.read_config(options)

    if config['build_server_always']:
        options.server = True
    if options.resetserver and not options.server:
        raise OptionError("Using --resetserver without --server makes no sense", "resetserver")

    log_dir = 'logs'
    if not os.path.isdir(log_dir):
        logging.info("Creating log directory")
        os.makedirs(log_dir)

    tmp_dir = 'tmp'
    if not os.path.isdir(tmp_dir):
        logging.info("Creating temporary directory")
        os.makedirs(tmp_dir)

    if options.test:
        output_dir = tmp_dir
    else:
        output_dir = 'unsigned'
        if not os.path.isdir(output_dir):
            logging.info("Creating output directory")
            os.makedirs(output_dir)

    if config['archive_older'] != 0:
        also_check_dir = 'archive'
    else:
        also_check_dir = None

    repo_dir = 'repo'

    build_dir = 'build'
    if not os.path.isdir(build_dir):
        logging.info("Creating build directory")
        os.makedirs(build_dir)
    srclib_dir = os.path.join(build_dir, 'srclib')
    extlib_dir = os.path.join(build_dir, 'extlib')

    # Read all app and srclib metadata
    allapps = metadata.read_metadata(xref=not options.onserver)

    apps = common.read_app_args(args, allapps, True)
    for appid, app in apps.items():
        if (app['Disabled'] and not options.force) or not app['Repo Type'] or not app['builds']:
            del apps[appid]

    if not apps:
        raise FDroidException("No apps to process.")

    if options.latest:
        for app in apps.itervalues():
            for build in reversed(app['builds']):
                if build['disable'] and not options.force:
                    continue
                app['builds'] = [build]
                break

    if options.wiki:
        import mwclient
        site = mwclient.Site((config['wiki_protocol'], config['wiki_server']),
                             path=config['wiki_path'])
        site.login(config['wiki_user'], config['wiki_password'])

    # Build applications...
    failed_apps = {}
    build_succeeded = []
    for appid, app in apps.iteritems():

        first = True

        for thisbuild in app['builds']:
            wikilog = None
            try:

                # For the first build of a particular app, we need to set up
                # the source repo. We can reuse it on subsequent builds, if
                # there are any.
                if first:
                    if app['Repo Type'] == 'srclib':
                        build_dir = os.path.join('build', 'srclib', app['Repo'])
                    else:
                        build_dir = os.path.join('build', appid)

                    # Set up vcs interface and make sure we have the latest code...
                    logging.debug("Getting {0} vcs interface for {1}"
                                  .format(app['Repo Type'], app['Repo']))
                    vcs = common.getvcs(app['Repo Type'], app['Repo'], build_dir)

                    first = False

                logging.debug("Checking " + thisbuild['version'])
                if trybuild(app, thisbuild, build_dir, output_dir,
                            also_check_dir, srclib_dir, extlib_dir,
                            tmp_dir, repo_dir, vcs, options.test,
                            options.server, options.force,
                            options.onserver, options.refresh):

                    if app.get('Binaries', None):
                        # This is an app where we build from source, and
                        # verify the apk contents against a developer's
                        # binary. We get that binary now, and save it
                        # alongside our built one in the 'unsigend'
                        # directory.
                        url = app['Binaries']
                        url = url.replace('%v', thisbuild['version'])
                        url = url.replace('%c', str(thisbuild['vercode']))
                        logging.info("...retrieving " + url)
                        of = "{0}_{1}.apk.binary".format(app['id'], thisbuild['vercode'])
                        of = os.path.join(output_dir, of)
                        common.download_file(url, local_filename=of)

                    build_succeeded.append(app)
                    wikilog = "Build succeeded"
            except BuildException as be:
                logfile = open(os.path.join(log_dir, appid + '.log'), 'a+')
                logfile.write(str(be))
                logfile.close()
                print("Could not build app %s due to BuildException: %s" % (appid, be))
                if options.stop:
                    sys.exit(1)
                failed_apps[appid] = be
                wikilog = be.get_wikitext()
            except VCSException as vcse:
                reason = str(vcse).split('\n', 1)[0] if options.verbose else str(vcse)
                logging.error("VCS error while building app %s: %s" % (
                    appid, reason))
                if options.stop:
                    sys.exit(1)
                failed_apps[appid] = vcse
                wikilog = str(vcse)
            except Exception as e:
                logging.error("Could not build app %s due to unknown error: %s" % (
                    appid, traceback.format_exc()))
                if options.stop:
                    sys.exit(1)
                failed_apps[appid] = e
                wikilog = str(e)

            if options.wiki and wikilog:
                try:
                    # Write a page with the last build log for this version code
                    lastbuildpage = appid + '/lastbuild_' + thisbuild['vercode']
                    newpage = site.Pages[lastbuildpage]
                    txt = "Build completed at " + time.strftime("%Y-%m-%d %H:%M:%SZ", time.gmtime()) + "\n\n" + wikilog
                    newpage.save(txt, summary='Build log')
                    # Redirect from /lastbuild to the most recent build log
                    newpage = site.Pages[appid + '/lastbuild']
                    newpage.save('#REDIRECT [[' + lastbuildpage + ']]', summary='Update redirect')
                except:
                    logging.error("Error while attempting to publish build log")

    for app in build_succeeded:
        logging.info("success: %s" % (app['id']))

    if not options.verbose:
        for fa in failed_apps:
            logging.info("Build for app %s failed:\n%s" % (fa, failed_apps[fa]))

    logging.info("Finished.")
    if len(build_succeeded) > 0:
        logging.info(str(len(build_succeeded)) + ' builds succeeded')
    if len(failed_apps) > 0:
        logging.info(str(len(failed_apps)) + ' builds failed')

    sys.exit(0)
示例#22
0
def main():

    global options, config

    # Parse command line...
    parser = OptionParser(usage="Usage: %prog [options] [APPID[:VERCODE] [APPID[:VERCODE] ...]]")
    parser.add_option("-v", "--verbose", action="store_true", default=False,
                      help="Spew out even more information than normal")
    parser.add_option("-q", "--quiet", action="store_true", default=False,
                      help="Restrict output to warnings and errors")
    (options, args) = parser.parse_args()

    config = common.read_config(options)

    tmp_dir = 'tmp'
    if not os.path.isdir(tmp_dir):
        logging.info("Creating temporary directory")
        os.makedirs(tmp_dir)

    unsigned_dir = 'unsigned'
    if not os.path.isdir(unsigned_dir):
        logging.error("No unsigned directory - nothing to do")
        sys.exit(0)

    verified = 0
    notverified = 0

    vercodes = common.read_pkg_args(args, True)

    for apkfile in sorted(glob.glob(os.path.join(unsigned_dir, '*.apk'))):

        apkfilename = os.path.basename(apkfile)
        appid, vercode = common.apknameinfo(apkfile)

        if vercodes and appid not in vercodes:
            continue
        if vercodes[appid] and vercode not in vercodes[appid]:
            continue

        try:

            logging.info("Processing " + apkfilename)

            remoteapk = os.path.join(tmp_dir, apkfilename)
            if os.path.exists(remoteapk):
                os.remove(remoteapk)
            url = 'https://f-droid.org/repo/' + apkfilename
            logging.info("...retrieving " + url)
            common.download_file(url, dldir=tmp_dir)

            compare_result = common.compare_apks(
                os.path.join(unsigned_dir, apkfilename),
                remoteapk,
                tmp_dir)
            if compare_result:
                raise FDroidException(compare_result)

            logging.info("...successfully verified")
            verified += 1

        except FDroidException, e:
            logging.info("...NOT verified - {0}".format(e))
            notverified += 1
示例#23
0
    def download_gallery_images(self,
                                gallery_url,
                                download_path,
                                options,
                                status,
                                root=False):
        """
            Download images from a deviantart gallery
        """
        #
        #   Download and process the webpage
        current_skips = 0
        subfolder_data = common.fetch_webpage(session=self.session,
                                              url=gallery_url, timeout=60)
        subfolder = BeautifulSoup(subfolder_data)

        if gallery_url.find("?offset") == -1:
            print "\n\tProcessing Gallery - %30s" % (gallery_url),
        else:
            print "R",

        links = subfolder.find_all('a', {'class': 'thumb',
                                         'data-super-img': True})
        for xlink in links:
            if options.downloadlimit > 0 and \
                    status.return_downloads() >= options.downloadlimit:
                print "X"
                return status
            image_file = xlink["data-super-img"]
            file_to_download = image_file.replace("-t", "").strip()
            file_to_download = common.clean_filename(
                file_to_download,
                max_length=240)
            #
            #   Does directory exist?  If not create it
            #
            if not os.path.exists(
                    download_path):
                os.makedirs(download_path)

                #
                #       Check for file already existing,
                #        if so, don't download
                #
            if root and os.path.split(file_to_download)[1].lower().strip() in\
                self.root_checker:
                status.add_skipped(filename=file_to_download,
                                   options=options)
                current_skips += 1
                if options.skiplimit != 0 and \
                    current_skips >= options.skiplimit:
                    print "S"
                    return status
                continue

            if os.path.exists(
                    download_path +  # + gallery_name + os.sep +
                    os.path.split(file_to_download)[1]):
                status.add_skipped(filename=file_to_download,
                                   options=options)
                current_skips += 1
                if options.skiplimit != 0 and \
                    current_skips >= options.skiplimit:
                    print "S"
                    return status
            else:
                if common.download_file(
                        session=self.session,
                        url=file_to_download,
                        filename=os.path.split(file_to_download)[1],
                        download_folder=download_path,
                        timeout=45):
                    status.add_download(filename=file_to_download,
                                        options=options)
                else:
                    status.add_error(filename=file_to_download,
                                     options=options)
        time.sleep(.10)

        next_data = subfolder.find_all('li', {'class': 'next'})
        if next_data:
            next_data = next_data[0].find("a", {"class": "away"})
            if next_data != None:
                next_data = next_data.get("href")
                next_gallery_url = \
                    gallery_url[0:(gallery_url.find(r"/gallery"))]\
                    + next_data
                time.sleep(.5)
                status = self.download_gallery_images(next_gallery_url,
                                                      download_path,
                                                      options,
                                                      status,
                                                      root=root)
        return status
示例#24
0
    def download(self, options):
        """
        #   As of 4/24/2014
        #
        #   Examples of
        #
        """
        print "AC Paradise"
        if options.startingplace != 0:
            counter = options.startingplace
        else:
            counter = 1
        status = common.status()
        while True:
            cosplay_index_links = self.download_acp_cosplayer_index(\
                url=website_cosplayer_index % (options.url_to_fetch,
                                               counter),
                timeout=45)
            if len(cosplay_index_links) == 0:
                #
                #   No download links, stop processing, and return totals
                #
#                return (total_downloaded, total_skipped)
                return status.return_counts()
            else:
                for x in cosplay_index_links:
                    (costume_name, display_page_number) =\
                        self.extract_ci_details(x)
                    costume_name = common.clean_filename(costume_name)

                    print "\nCostume name : %s - %s" % (costume_name,
                                                        website_base_url%\
                                                        display_page_number)
                    costume_webpage = common.fetch_webpage(\
                                session=self.session,
                                url=website_base_url % display_page_number,
                                timeout=45)
                    costume_soup = BeautifulSoup(costume_webpage)
                    costume_links = costume_soup.find_all("img")
                    for y in costume_links:
                        if str(y).find(website_photo_base) != -1:
                            #
                            #   Remove thumbnail
                            #
                            file_to_download = y["src"].replace("-t", "")
                            file_to_download = file_to_download.strip()
                            file_to_download = common.clean_filename(\
                                                file_to_download,
                                                max_length=240)

                                #
                                #   Does directory exist?  If not create it
                                #
                            if not os.path.exists(\
                                        options.download_folder +
                                        costume_name):
                                os.makedirs(options.download_folder + \
                                            costume_name + os.sep)

                                #
                                #       Check for file already existing,
                                #        if so, don't download
                                #
                            if os.path.exists(\
                                        options.download_folder +
                                        costume_name + os.sep +
                                        os.path.split(file_to_download)[1]):
                                status.add_skipped(file_to_download,
                                                   options)
                            else:
                                #
                                #   Download file
                                #
                                if common.download_file(\
                                	    session=self.session,
                                        url=file_to_download,
                                        filename=os.path.split(file_to_download)[1],
                                        download_folder=options.download_folder +
                                        costume_name + os.sep, timeout=45):
                                    status.add_download(file_to_download,
                                                        options)
                                else:
                                    status.add_error(file_to_download,
                                                     options)

            counter += 1

            #
            #   Increment page count
            #
        return status.return_counts()
示例#25
0
def import_package(changefile=None, repo=None, env='unstable'):
    pkg_files = []
    base_url = 'http://dist.yandex.ru/{}/{}'.format(repo, env)
    changes = ChangeFile.ChangeFile()
    changes.load_from_file(changefile)
    try:
        p = common.db_repos[repo].find_one({'Source': changes['source'], 'Version': changes['version']})
        if p:
            log.warning("%s_%s is already uploaded to repo '%s', environment '%s'",
                        changes['source'], changes['version'], repo, p['environment'])
            if p['environment'] != env:
                log.warning("Dmoving %s_%s in repo '%s' from '%s' to '%s'",
                            changes['source'], changes['version'], repo, p['environment'], env)
                repo_manage.copy_package(pkg=changes['source'], ver=changes['version'], repo=repo,
                                          src=p['environment'], dst=env, skipUpdateMeta=True)
            return None
        else:
            log.info("Importing %s_%s to %s/%s", changes['source'], changes['version'], repo, env)
    except KeyError as e:
        log.error("Cannot find field %s in %s, skipping package", e[0], file)
        raise ImportException("Cannot find field %s in %s, skipping package", e[0].format(file))

    for f in (x[2] for x in changes.getFiles()):
        if f.endswith('.deb') or f.endswith('.udeb'):
            if f.find('_amd64') >= 0:
                url = '/'.join((base_url, 'amd64', f))
            elif f.find('_all') >= 0:
                url = '/'.join((base_url, 'all', f))
            elif f.find('_i386') >= 0:
                url = '/'.join((base_url, 'i386', f))
            else:
                log.warning("%s: unknown arch!", f)
                sys.exit(1)
        else:
            url = '/'.join((base_url, 'source', f))

        if not _checkFile(url):
            log.error("%s (%s): file not found", url, f)
            raise ImportException("{} not found".format(url))
        else:
            pkg_files.append(url)

    downloaded = []
    for url in pkg_files:
        file = os.path.join(common.config['storage']['temp_dir'], url.split('/')[-1])
        result = common.download_file(url, file)
        if not result.ok:
            [os.unlink(x) for x in downloaded]
            raise ImportException("Cannot download {}: {}".format(url, result['msg']))
        downloaded.append(file)

    try:
        repo_manage.upload_package(repo, env, downloaded, changes, skipUpdateMeta=True)
    except repo_manage.UploadPackageError as e:
        log.error("Cannot upload package: %s", e)
        [os.unlink(x) for x in downloaded]
        raise ImportException("Cannot upload package: {}".format(e))

    # cleanup
    for file in downloaded:
        os.unlink(file)