Example #1
0
def copyfileobj(src, dst, length=None, exception=OSError):
    """Copy length bytes from fileobj src to fileobj dst.
       If length is None, copy the entire content.
    """
    if length == 0:
        return
    if length is None:
        shutil.copyfileobj(src, dst)
        return

    # BUFSIZE = 16 * 1024
    blocks, remainder = divmod(length, BUFSIZE)
    # for b in range(blocks):
    for _ in range(blocks):
        buf = src.read(BUFSIZE)
        if len(buf) < BUFSIZE:
            raise exception("unexpected end of data")
        dst.write(buf)

    if remainder != 0:
        buf = src.read(remainder)
        if len(buf) < remainder:
            raise exception("unexpected end of data")
        dst.write(buf)
    return
Example #2
0
 def export_config(self, base_dir, library_usage_stats):
     for key, relpath in self.metadata['config_dir']:
         f = self.start_file(key, relpath)
         path = os.path.join(base_dir, relpath.replace('/', os.sep))
         try:
             with lopen(path, 'wb') as dest:
                 shutil.copyfileobj(f, dest)
         except EnvironmentError:
             os.makedirs(os.path.dirname(path))
             with lopen(path, 'wb') as dest:
                 shutil.copyfileobj(f, dest)
         f.close()
     gpath = os.path.join(base_dir, 'global.py')
     try:
         with lopen(gpath, 'rb') as f:
             raw = f.read()
     except EnvironmentError:
         raw = b''
     try:
         lpath = library_usage_stats.most_common(1)[0][0]
     except Exception:
         lpath = None
     c = create_global_prefs(StringConfig(raw, 'calibre wide preferences'))
     c.set('installation_uuid', str(uuid.uuid4()))
     c.set('library_path', lpath)
     raw = c.src
     if not isinstance(raw, bytes):
         raw = raw.encode('utf-8')
     with lopen(gpath, 'wb') as f:
         f.write(raw)
     gprefs = JSONConfig('gui', base_path=base_dir)
     gprefs['library_usage_stats'] = dict(library_usage_stats)
Example #3
0
def generate_roles_data_from_directory(directory, roles, validate=True):
    """Generate a roles data file using roles from a local path

    :param directory local filesystem path to the roles
    :param roles ordered list of roles
    :param validate validate the metadata format in the role yaml files
    :returns string contents of the roles_data.yaml
    """
    available_roles = get_roles_list_from_directory(directory)
    check_role_exists(available_roles, roles)
    output = StringIO()

    header = ["#" * 79,
              "# File generated by TripleO",
              "#" * 79,
              ""]
    output.write("\n".join(header))

    for role in roles:
        defined_role = role.split(':')[0]
        file_path = os.path.join(directory, "{}.yaml".format(defined_role))
        if validate:
            validate_role_yaml(role_path=file_path)
        with open(file_path, "r") as f:
            if ':' in role:
                generated_role = role.split(':')[1]
                content = generate_role_with_colon_format(f.read(),
                                                          defined_role,
                                                          generated_role)
                output.write(content)
            else:
                shutil.copyfileobj(f, output)

    return output.getvalue()
Example #4
0
def imageToCache(src,name):
    response = requests.get(src, stream=True)
    target = os.path.join(CACHE_PATH,name)
    with open(target, 'wb') as out_file:
        shutil.copyfileobj(response.raw, out_file)
    del response
    return target
Example #5
0
	def _downloadAChapter(self, pathToStore, url, chapter):
		pageNumber = 0
		baseImageUrl = url + "/" + str(chapter) + "/"
		basePathToStore = pathToStore + "/" + str(chapter) + "/"

		nextImage = True
		downloadSuccess = False
		self._log("downloading chapter {} :".format(chapter), "INFO")
		while nextImage:
			pageNumberFormatted = self._formatPageNumber(pageNumber)
			imageUrl = baseImageUrl + pageNumberFormatted + ".jpg"
			self._log("try to download {}".format(imageUrl), "DEBUG")
			r = requests.get(imageUrl, stream=True)
			if r.status_code == 200:
				self._log("download page image {} from chapter {}".format(pageNumberFormatted, chapter), "DEBUG")
				self.logger.printSameLine("*")
				if not downloadSuccess:
					downloadSuccess = True
				pageNumber += 1
				imagePath =  basePathToStore + pageNumberFormatted + ".jpg"
				with open(imagePath, 'wb') as imageFile:
					r.raw.decode_content = True
					shutil.copyfileobj(r.raw, imageFile)
			else:
				if pageNumber == 0:
					pageNumber += 1
				else:
					if downloadSuccess:
						self.logger.printSameLine("",True)
					nextImage = False
		
		return downloadSuccess
Example #6
0
def sanitize_metadata(stream, content_type, strip_metadata):
    text_plain = content_type == 'text/plain'

    s = None
    t = None
    clean_file = False

    if strip_metadata and not text_plain:
        t = tempfile.NamedTemporaryFile(delete = False)
        copyfileobj(stream, t)
        t.flush()
        file_meta = metadata_handler(t.name)

        if not file_meta.is_clean():
            file_meta.remove_all()
            f = open(t.name)
            s = StringIO()
            s.write(f.read())
            f.close()
            s.reset()
            secure_unlink(t.name, do_verify = False)
            t.close()
        else:
            secure_unlink(t.name, do_verify = False)
            t.close()

    return s
    def store(self, media_file, file=None, url=None, meta=None):
        """Store the given file or URL and return a unique identifier for it.

        :type media_file: :class:`~mediadrop.model.media.MediaFile`
        :param media_file: The associated media file object.
        :type file: :class:`cgi.FieldStorage` or None
        :param file: A freshly uploaded file object.
        :type url: unicode or None
        :param url: A remote URL string.
        :type meta: dict
        :param meta: The metadata returned by :meth:`parse`.
        :rtype: unicode or None
        :returns: The unique ID string. Return None if not generating it here.

        """
        file_name = safe_file_name(media_file, file.filename)
        file_path = self._get_path(file_name)

        temp_file = file.file
        temp_file.seek(0)
        permanent_file = open(file_path, 'wb')
        copyfileobj(temp_file, permanent_file)
        temp_file.close()
        permanent_file.close()

        return file_name
def unzip_file(file_path):
    """The presumption here is that there is only one data file that we care about in the zip file.  Some zip files
    contain more than one data file, but they are duplicates.  Typically, when this happens, there is one SGML file and
    one XML file.  In these cases, we will discard the SGML file."""

    with zipfile.ZipFile(file_path, "r") as file:
        file_list = file.infolist()
        file_count = len(file_list)
        if file_count < 1:
            raise Exception("No data files found in archive '" + file_path + "'.")
        if file_count > 1:
            if file_count > 2:
                raise Exception("Found more than 2 data files in archive '" + file_path + "'.")
            if file_list[0].filename.lower().endswith(".xml") and file_list[1].filename.lower().endswith(".sgm"):
                extract_file = file_list[0]
            elif file_list[0].filename.lower().endswith(".sgm") and file_list[1].filename.lower().endswith(".xml"):
                extract_file = file_list[1]
            else:
                raise Exception("Found something other than 1 XML and 1 SGML file in archive '" + file_path + "'.")
        else:
            extract_file = file_list[0]
        extract_file_path = os.path.join(TEMP_DIRECTORY, os.path.basename(extract_file.filename))
        write_log_entry("  Extracting data file to '" + extract_file_path + "'.")
        with open(extract_file_path, mode="wb") as output_file:
            shutil.copyfileobj(file.open(extract_file, mode="r"), output_file)
    return extract_file_path
Example #9
0
File: db.py Project: bitctrl/odoo
def dump_db(db_name, stream, backup_format='zip'):
    """Dump database `db` into file-like object `stream` if stream is None
    return a file object with the dump """

    _logger.info('DUMP DB: %s format %s', db_name, backup_format)

    cmd = ['pg_dump', '--no-owner']
    cmd.append(db_name)

    if backup_format == 'zip':
        with odoo.tools.osutil.tempdir() as dump_dir:
            filestore = odoo.tools.config.filestore(db_name)
            if os.path.exists(filestore):
                shutil.copytree(filestore, os.path.join(dump_dir, 'filestore'))
            with open(os.path.join(dump_dir, 'manifest.json'), 'w') as fh:
                db = odoo.sql_db.db_connect(db_name)
                with db.cursor() as cr:
                    json.dump(dump_db_manifest(cr), fh, indent=4)
            cmd.insert(-1, '--file=' + os.path.join(dump_dir, 'dump.sql'))
            odoo.tools.exec_pg_command(*cmd)
            if stream:
                odoo.tools.osutil.zip_dir(dump_dir, stream, include_dir=False, fnct_sort=lambda file_name: file_name != 'dump.sql')
            else:
                t=tempfile.TemporaryFile()
                odoo.tools.osutil.zip_dir(dump_dir, t, include_dir=False, fnct_sort=lambda file_name: file_name != 'dump.sql')
                t.seek(0)
                return t
    else:
        cmd.insert(-1, '--format=c')
        stdin, stdout = odoo.tools.exec_pg_command_pipe(*cmd)
        if stream:
            shutil.copyfileobj(stdout, stream)
        else:
            return stdout
Example #10
0
def build_program(main_file_data, library_header, shared_library, gcc_prefix, cflags, package=__name__):
    with tempfile.TemporaryDirectory() as tempdir:
        shead_name = os.path.basename(library_header)
        shlib_name = os.path.basename(shared_library)
        assert shlib_name.startswith("lib") and shlib_name.endswith(".so") and shead_name.endswith(".h")
        shlib_short = shlib_name[3:-3]  # strip "lib" and ".so"

        shared_lib_path = os.path.join(tempdir, shlib_name)
        lib_header_path = os.path.join(tempdir, shead_name)
        main_file_path = os.path.join(tempdir, "themis_main.c")
        main_output_path = os.path.join(tempdir, "themis_main")

        with open(shared_lib_path, "wb") as fout:
            with pkg_resources.resource_stream(package, shared_library) as fin:
                shutil.copyfileobj(fin, fout)
        with open(lib_header_path, "wb") as fout:
            with pkg_resources.resource_stream(package, library_header) as fin:
                shutil.copyfileobj(fin, fout)
        with open(main_file_path, "w") as fout:
            fout.write(main_file_data)

        subprocess.check_call([gcc_prefix + "gcc", *cflags.split(), "-I", tempdir, "-L", tempdir, "-l", shlib_short,
                               main_file_path, "-o", main_output_path])
        subprocess.check_call([gcc_prefix + "strip", main_output_path])
        with open(main_output_path, "rb") as fin:
            main_output_data = fin.read()
    return main_output_data
Example #11
0
def settings_audio_add(request):
    params = request.json_body

    transaction.manager.begin()

    fileobj = env.file_storage.fileobj(component='compulink_video_producer')
    fileobj.persist()

    transaction.manager.commit()  # ugly hack

    transaction.manager.begin()
    fileobj.persist()

    srcfile, _ = env.file_upload.get_filename(params['id'])
    dstfile = env.file_storage.filename(fileobj, makedirs=True)

    with open(srcfile, 'r') as fs, open(dstfile, 'w') as fd:
        copyfileobj(fs, fd)

    vba = VideoBackgroundAudioFile()
    vba.file_name = params['name']
    vba.file_obj_id = fileobj.id
    vba.file_mime_type = params['mime_type']
    vba.file_size = params['size']
    vba.persist()

    transaction.manager.commit()
    vba.persist()

    return vba.serialize()
Example #12
0
 def commit(self, filename, basedir="/"):
     skip = len(path.commonprefix([filename, basedir]))
     sink = self.open(filename[skip:])
     source = open(filename, "r")
     shutil.copyfileobj(source, sink)
     source.close()
     sink.close()
def dlImage(url,logf=None):
    if url.startswith(start_img_fail):
        if logf:
            logf.write("Skipping image in failed s3 bucket.\n")
        else:
            print "Skipping image in failed s3 bucket."
        return None
    pos_slash=[pos for pos,c in enumerate(url) if c=="/"]
    file_img=url[pos_slash[-1]:]
    outpath=os.path.join(tmp_img_dl_dir,file_img)
    mkpath(outpath)
    #print "Downloading image from {} to {}.".format(url,outpath)
    try:
        r = requests.get(url, stream=True, timeout=imagedltimeout)
        if r.status_code == 200:
            with open(outpath, 'wb') as f:
                r.raw.decode_content = True
                shutil.copyfileobj(r.raw, f)
            return outpath
    except Exception as inst:
        if logf:
            logf.write("Download failed for img that should be saved at {} from url {}.\n".format(outpath,url))
        else:
            print "Download failed for img that should be saved at {} from url {}.".format(outpath,url)
        print inst 
        return None
Example #14
0
def cat_strand(files, strand, ext):
    if not files:
        raise ValueError("No files in directory")

    warn_possibly_made(files)
    #gets substring upto first difference in all selected files
    for x, file_chr in enumerate(izip(*files)):
        if len(set(file_chr)) != 1:
            break

    #get the file
    out_file = files[0][:x]

    #remove characters after last _
    out_file = "_".join(out_file.split("_")[:-1]) + "_" + strand + ext

    #now that we have the outfile lets cat the actual files
    print "processing " + str(files) + " to " + out_file

    if not os.path.exists(out_file):
        with open(out_file, 'wb') as destination:
            for file_name in files:
                with open(file_name, 'rb') as file_obj:
                    shutil.copyfileobj(file_obj, destination)
    else:
        print "%s already made, not overwriting" % out_file
	def readEpgDatFile(self, filename, deleteFile=False):
		if not hasattr(self.epgcache, 'load'):
			print>>log, "[EPGImport] Cannot load EPG.DAT files on unpatched enigma. Need CrossEPG patch."
			return
		try:
			os.unlink(HDD_EPG_DAT)
		except:
			pass # ignore...
		try:
			if filename.endswith('.gz'):
				print>>log, "[EPGImport] Uncompressing", filename
				import shutil
				fd = gzip.open(filename, 'rb')
				epgdat = open(HDD_EPG_DAT, 'wb')
				shutil.copyfileobj(fd, epgdat)
				del fd
				epgdat.close()
				del epgdat
			else:
				if filename != HDD_EPG_DAT:
					os.symlink(filename, HDD_EPG_DAT)
			print>>log, "[EPGImport] Importing", HDD_EPG_DAT
			self.epgcache.load()
			if deleteFile:
				try:
					os.unlink(filename)
				except:
					pass # ignore...
		except Exception, e:
			print>>log, "[EPGImport] Failed to import %s:" % filename, e
def paxel(url,output,blocks=6):
    size = GetUrlFileSize(url)
    ranges = SpliteBlocks(size, blocks)
    
    threadname = ["thread_%d" %i for i in range(0,blocks)]
    filename = ["tmpfile_%d" %i for i in range(0,blocks)]
    
    tasks = []
    
    for i in range(0,blocks):
        task = AxelPython(threadname[i],url,filename[i],ranges[i])
        
        task.setDaemon(True)
        task.start()
        tasks.append(task)
    time.sleep(2)
    while islive(tasks):
        downloaded = sum([task.downloaded for task in tasks])
        process  = downloaded / float(size) * 100
        show = u'\r Filesize:%d Downloaded:%d Completed:%.2f%%' %(size,downloaded,process)
        sys.stdout.write(show)
        sys.stdout.flush()
        time.sleep(0.5)
    sys.stdout.write(u'\rFilesize:{0}  Download:{0} Completed:100% \n'.format(size))
    sys.stdout.flush()
    
    with open(output,'wb+') as filehandle:
        for i in filename:
            with open(i,'rb') as f:
                shutil.copyfileobj(f,filehandle,102400)
            try:
                os.remove(i)
            except OSError:
                pass
Example #17
0
    def commit(self, db, id_):
        if not self.changed:
            return True
        old_extensions, new_extensions, paths = set(), set(), {}
        for row in range(self.formats.count()):
            fmt = self.formats.item(row)
            ext, path = fmt.ext.lower(), fmt.path
            if 'unknown' in ext.lower():
                ext = None
            if path:
                new_extensions.add(ext)
                paths[ext] = path
            else:
                old_extensions.add(ext)
        for ext in new_extensions:
            with SpooledTemporaryFile(SPOOL_SIZE) as spool:
                with open(paths[ext], 'rb') as f:
                    shutil.copyfileobj(f, spool)
                spool.seek(0)
                db.add_format(id_, ext, spool, notify=False,
                        index_is_id=True)
        dbfmts = db.formats(id_, index_is_id=True)
        db_extensions = set([f.lower() for f in (dbfmts.split(',') if dbfmts
            else [])])
        extensions = new_extensions.union(old_extensions)
        for ext in db_extensions:
            if ext not in extensions and ext in self.original_val:
                db.remove_format(id_, ext, notify=False, index_is_id=True)

        self.changed = False
        return True
Example #18
0
    def set_fileobj(self, fileobj):
        """"Set the file object to write the download to.

        Args:
            fileobj: A file-like object.
        """
        if self.fileobj is not None:
            raise ValueError("fileobj was already set! Old: {}, new: "
                             "{}".format(self.fileobj, fileobj))
        self.fileobj = fileobj
        try:
            self._read_timer.stop()
            log.downloads.debug("buffer: {} bytes".format(self._buffer.tell()))
            self._buffer.seek(0)
            shutil.copyfileobj(self._buffer, fileobj)
            self._buffer.close()
            if self.reply.isFinished():
                # Downloading to the buffer in RAM has already finished so we
                # write out the data and clean up now.
                self.on_reply_finished()
            else:
                # Since the buffer already might be full, on_ready_read might
                # not be called at all anymore, so we force it here to flush
                # the buffer and continue receiving new data.
                self.on_ready_read()
        except OSError as e:
            self._die(e.strerror)
Example #19
0
def urlretrieve_netrc(url, filename=None):
    '''
    writes a temporary file with the contents of url. This works
    similar to urllib2.urlretrieve, but uses netrc as fallback on 401,
    and has no reporthook or data option. Also urllib2.urlretrieve
    malfunctions behind proxy, so we avoid it.

    :param url: What to retrieve
    :param filename: target file (default is basename of url)
    :returns: (filename, response_headers)
    :raises: IOError and urlopen errors
    '''
    fname = None
    fhand = None
    try:
        resp = urlopen_netrc(url)
        if filename:
            fhand = open(filename, 'wb')
            fname = filename
        else:
            # Make a temporary file
            fdesc, fname = tempfile.mkstemp()
            fhand = os.fdopen(fdesc, "wb")
            # Copy the http response to the temporary file.
        shutil.copyfileobj(resp.fp, fhand)
    finally:
        if fhand:
            fhand.close()
    return (fname, resp.headers)
Example #20
0
    def download_item_change(self):
        self.com.sig.emit("started")
        self.repaint()
        self.ui.options_list_widget.clear()

        # urllib.request.urlretrieve("http://animetake.com/images/.png", "images" + os.sep + "onepiece.png")
        # pix = QPixmap("images" + os.sep + "onepiece.png")#%s" % str(anime_name).replace(' ', '-'))
        # self.ui.image_label.setPixmap(pix)

        if self.ui.res_list_widget.currentItem():
            name = self.ui.res_list_widget.currentItem().text().split(" -->")[0]
            ep = self.episode_list[name]
            name, release_date, link = ep.get_attrs()
            download = DownloadOptions(link)
            download.join()
            img_link = download.get_img_link()
            file_name = img_link.replace("http://www.animetake.com/images/", "")
            if os.path.exists("images" + os.sep + file_name):
                self.ui.image_label.setPixmap("images" + os.sep + file_name)
            else:
                reqst = Request("http://www.animetake.com/images/%s" % file_name, headers={"User-Agent": "Mozilla/5.0"})
                # urllib.request.urlretrieve(reqst,
                #                           'images' + os.sep + file_name)
                with urllib.request.urlopen(reqst) as response, open(file_name, "wb") as out_file:
                    shutil.copyfileobj(response, out_file)
                self.ui.image_label.setPixmap(QPixmap("images" + os.sep + out_file.name))
            self.options = download.get_download_options()
            for name, link in self.options.items():
                self.ui.options_list_widget.addItem(name)
            self.com.sig.emit("ended")
Example #21
0
def tst_open_read(src_dir, mnt_dir):
    name = name_generator()
    with open(pjoin(src_dir, name), 'wb') as fh_out, \
         open(TEST_FILE, 'rb') as fh_in:
        shutil.copyfileobj(fh_in, fh_out)

    assert filecmp.cmp(pjoin(mnt_dir, name), TEST_FILE, False)
Example #22
0
def install_spec(cli_args, kwargs, abstract_spec, spec):
    """Do the actual installation."""

    # handle active environment, if any
    def install(spec, kwargs):
        env = ev.get_env(cli_args, 'install', required=False)
        if env:
            env.install(abstract_spec, spec, **kwargs)
            env.write()
        else:
            spec.package.do_install(**kwargs)

    try:
        if cli_args.things_to_install == 'dependencies':
            # Install dependencies as-if they were installed
            # for root (explicit=False in the DB)
            kwargs['explicit'] = False
            for s in spec.dependencies():
                install(s, kwargs)
        else:
            kwargs['explicit'] = True
            install(spec, kwargs)

    except spack.build_environment.InstallError as e:
        if cli_args.show_log_on_error:
            e.print_context()
            if not os.path.exists(e.pkg.build_log_path):
                tty.error("'spack install' created no log.")
            else:
                sys.stderr.write('Full build log:\n')
                with open(e.pkg.build_log_path) as log:
                    shutil.copyfileobj(log, sys.stderr)
        raise
def salva_audio_ura(form):
    import shutil
    try:
        caminho_raiz = '/aldeia/etc/asterisk/cliente/'
        if not os.path.exists(caminho_raiz):
            os.system('sudo mkdir ' + caminho_raiz)
            os.system('sudo chown -R www-data:www-data /aldeia/audio/')
            
        os.system('sudo ln -s /aldeia/etc/asterisk/cliente/ /var/www/')
        id_edit = request.vars['id_edit']
        sql_busca = db(db.f_audios.id == id_edit).select()
        if len(sql_busca) > 0:
            path_wav = caminho_raiz+ 'wav_'+ sql_busca[0]['nome'].replace(' ','_').replace('.','_').lower() + '.wav'
            path_sln = caminho_raiz+ sql_busca[0]['nome'].replace(' ','_').replace('.','_').lower() + '.sln'
            if os.path.isfile(path_wav):
                os.remove(path_wav)
            if os.path.isfile(path_sln):
                os.remove(path_sln)
        filename='wav_' + request.vars['nome'].replace(' ','_').replace('.','_').lower() + '.wav'
        file=request.vars['dados_audio'].file 
        shutil.copyfileobj(file,open(caminho_raiz+filename,'wb'))
        convert_audio = 'sudo sox '+caminho_raiz +'wav_' + request.vars['nome'].replace(' ','_').replace('.','_').lower() + '.wav' +' -r 8000 -c 1 '+caminho_raiz +request.vars['nome'].replace(' ','_').replace('.','_').lower()+'.sln'
        os.system(convert_audio)
        os.system('sudo chmod +x /aldeia/etc/asterisk/cliente/*')
        os.system('sudo chown -R www-data:www-data /aldeia/etc/asterisk/cliente/*')
        form.vars['caminho'] = request.vars['nome'].replace(' ','_').replace('.','_').lower() + '.sln'
        return 'OK'
    
    except shutil.Error as erro:
        return erro
Example #24
0
def get_images_from_urls(id_url_generator, max_count=5, output_directory="images/raw"):
    """Download JPEG images from a generator of image IDs and urls.

    Files are saved to `output_directory`, named according to their ID.

    Args:
        id_url_generator (generator): Pairs of strings (id, url).
        max_count (int): The maximum number of pictures to download. This may
            not be the same as the number of images actually downloaded, if the
            Flickr API returns duplicate images or invalid responses.
        output_directory (str): An existing folder to save images to. Does not
            include a trailing slash.
    """
    ensure_directory(output_directory)
    already_downloaded = image_filenames_as_dict(output_directory)
    i = 1
    with requests.Session() as s:
        for uid, url in id_url_generator:
            if uid in already_downloaded:
                print "{}: Already downloaded {}".format(i, uid)
            else:
                print "{}: Downloading {}".format(i, url)
                response = s.get(url, stream=True)
                if response.status_code == 200 and response.headers["Content-Type"] == "image/jpeg":
                    filename = "{}/{}.jpeg".format(output_directory, uid)
                    with open(filename, "wb") as out_file:
                        shutil.copyfileobj(response.raw, out_file)
                        already_downloaded[uid] = filename
            if i < max_count:
                i += 1
            else:
                break
 def download(self, url, fileName=None):
   if os.path.isfile(fileName) and not self.force_download:
     print 'skipping (already downloaded)'
     return
   def getFileName(url,openUrl):
       if 'Content-Disposition' in openUrl.info():
           # If the response has Content-Disposition, try to get filename from it
           cd = dict(map(
               lambda x: x.strip().split('=') if '=' in x else (x.strip(),''),
               openUrl.info()['Content-Disposition'].split(';')))
           if 'filename' in cd:
               filename = cd['filename'].strip("\"'")
               if filename: return filename
       # if no filename was found above, parse it out of the final URL.
       return os.path.basename(urlparse.urlsplit(openUrl.url)[2])
   try:
     r = urllib2.urlopen(urllib2.Request(url))
   except Exception as err:
     print 'Download failed > ', url
     return
   try:
       fileName = fileName or getFileName(url,r)
       with open(fileName, 'wb') as f:
           shutil.copyfileobj(r,f)
   finally:
       r.close()
Example #26
0
    def copyFile(self, newpath):
        #Walk the directory tree backwards until "spec" is found
        relpath = ''
        path = [self.fpath, '']
        while True:
            path = os.path.split(path[0])
            relpath = os.path.join(path[1], relpath)
            if "include" in relpath:
                break
        newpath = os.path.join(newpath, relpath)
        #Create the directory if it doesnt exist
        if not os.path.exists(newpath):
            os.makedirs(newpath)
            print "Creating Dir: " + newpath
        #Copy the file and fix the filename if its a Tfile
        if not self.isT():
            #If its a QoS file eg. DomainParticpantQos.hpp then replace it with TEntityQos
            if self.isQos():
                print "Copying QoS file " + self.fname + " to " + newpath
                nf = open(os.path.join(newpath, self.fname), 'w')
                shutil.copyfileobj(self.fixQoSFileContents(), nf)

            else:
                print "Copying plain file " + self.fname + " to " + newpath
                shutil.copy(os.path.join(self.fpath, self.fname), os.path.join(newpath, self.fname))

        else:
            nf = open(os.path.join(newpath, self.fixFilename()), 'w')
            shutil.copyfileobj(self.fixFileContents(), nf)
            print "Copying T file " + self.fname + " to " + newpath + self.fixFilename()
Example #27
0
def get_matrix_filename(series_id, platform_id):
    filenames = list(matrix_filenames(series_id, platform_id))
    mirror_filenames = (os.path.join(conf.SERIES_MATRIX_MIRROR, filename) for filename in filenames)
    mirror_filename = first(filename for filename in mirror_filenames if os.path.isfile(filename))
    if mirror_filename:
        return mirror_filename

    for filename in filenames:
        print 'Loading URL', conf.SERIES_MATRIX_URL + filename, '...'
        try:
            res = urllib2.urlopen(conf.SERIES_MATRIX_URL + filename)
        except urllib2.URLError:
            pass
        else:
            mirror_filename = os.path.join(conf.SERIES_MATRIX_MIRROR, filename)
            print 'Cache to', mirror_filename

            directory = os.path.dirname(mirror_filename)
            if not os.path.exists(directory):
                os.makedirs(directory)
            with open(mirror_filename, 'wb') as f:
                shutil.copyfileobj(res, f)

            return mirror_filename

    raise LookupError("Can't find matrix file for series %s, platform %s"
                      % (series_id, platform_id))
Example #28
0
	def __init__(self, command, src, dst):
		"""Execute 'command' with src as stdin and writing to stream
		dst. If either stream is not a fileno() stream, temporary files
		will be used as required.
		Either stream may be None if input or output is not required.
		Call the wait() method to wait for the command to finish.
		'command' may be a string (passed to os.system) or a list (os.execvp).
		"""

		if src is not None and not hasattr(src, 'fileno'):
			import shutil
			new = _Tmp()
			src.seek(0)
			shutil.copyfileobj(src, new)
			src = new

		Process.__init__(self)

		self.command = command
		self.dst = dst
		self.src = src
		self.tmp_stream = None

		self.callback = None
		self.killed = 0
		self.errors = ""

		self.done = False	# bool or exception
		self.waiting = False
Example #29
0
def download(url, message_id, fileName=None):
    def getFileName(url,openUrl):

        if not os.path.exists(IMAGE_DIR):
            os.makedirs(IMAGE_DIR)
        if 'Content-Disposition' in openUrl.info():
            # If the response has Content-Disposition, try to get filename from it
            cd = dict(map(
                lambda x: x.strip().split('=') if '=' in x else (x.strip(),''),
                openUrl.info()['Content-Disposition'].split(';')))
            if 'filename' in cd:
                filename = cd['filename'].strip("\"'")
                if filename: return filename
        # if no filename was found above, parse it out of the final URL.
        return os.path.basename(urlparse.urlsplit(openUrl.url)[2])

    r = urllib2.urlopen(urllib2.Request(url))
    fileName = fileName or getFileName(url,r)
    if not os.path.exists(IMAGE_DIR + fileName):
        r = urllib2.urlopen(urllib2.Request(url))
        try:
            with open(IMAGE_DIR + fileName, 'wb') as f:
                shutil.copyfileobj(r, f)
        finally:
            r.close()

    downloadedFile = message_id + '_' + fileName
    shutil.copyfile(IMAGE_DIR + fileName, IMAGE_DIR + downloadedFile)
    
    return downloadedFile
Example #30
0
    def upload(self):
        uploadfile = request.POST['uploadfile']
        fasta_file = open(os.path.join(permanent_store,
                                           uploadfile.filename.lstrip(os.sep)),
                                           'w')

        shutil.copyfileobj(uploadfile.file, fasta_file)
        uploadfile.file.close()
        fasta_file.close()
        handle = open( fasta_file.name )
#        if false:
        it = Bio.Fasta.Iterator(handle, Bio.Fasta.SequenceParser())
        seq = it.next()
        output_string = "" 
        while seq:
            output_string += seq.description 
            output_string += "<br /> " 
            log.debug("seq : %s" ,  seq.description) 
            seq = it.next()
        handle.close()
        self.testmethod()
        self.formatdb(fasta_file.name) 

        log.debug('Hello Kenglish' )
        print "Hello Kenglish" 

        return 'Successfully uploaded: %s, description: %s, <br /> results: <br /> %s ' % \
            (uploadfile.filename, request.POST['description'], output_string)
Example #31
0
def import_channel(source_id,
                   target_id=None,
                   download_url=None,
                   editor=None,
                   logger=None):
    """
    Import a channel from another Studio instance. This can be used to
    copy online Studio channels into local machines for development,
    testing, faster editing, or other purposes.

    :param source_id: The UUID of the channel to import from the source Studio instance.
    :param target_id: The UUID of the channel on the local instance. Defaults to source_id.
    :param download_url: The URL of the Studio instance to import from.
    :param editor: The email address of the user you wish to add as an editor, if any.

    """

    global log
    if logger:
        log = logger
    else:
        log = logging.getLogger(__name__)

    # Set up variables for the import process
    log.info("\n\n********** STARTING CHANNEL IMPORT **********")
    start = datetime.datetime.now()
    target_id = target_id or source_id

    # Test connection to database
    log.info("Connecting to database for channel {}...".format(source_id))

    tempf = tempfile.NamedTemporaryFile(suffix=".sqlite3", delete=False)
    conn = None
    try:
        if download_url:
            response = requests.get('{}/content/databases/{}.sqlite3'.format(
                download_url, source_id))
            for chunk in response:
                tempf.write(chunk)
        else:
            filepath = "/".join(
                [settings.DB_ROOT, "{}.sqlite3".format(source_id)])
            # Check if database exists
            if not default_storage.exists(filepath):
                raise IOError("The object requested does not exist.")
            with default_storage.open(filepath) as fobj:
                shutil.copyfileobj(fobj, tempf)

        tempf.close()
        conn = sqlite3.connect(tempf.name)
        cursor = conn.cursor()

        # Start by creating channel
        log.info("Creating channel...")
        channel, root_pk = create_channel(conn, target_id)
        if editor:
            channel.editors.add(models.User.objects.get(email=editor))
            channel.save()

        # Create root node
        root = models.ContentNode.objects.create(
            sort_order=models.get_next_sort_order(),
            node_id=root_pk,
            title=channel.name,
            kind_id=content_kinds.TOPIC,
            original_channel_id=target_id,
            source_channel_id=target_id,
        )

        # Create nodes mapping to channel
        log.info("   Creating nodes...")
        with transaction.atomic():
            create_nodes(cursor, target_id, root, download_url=download_url)
            # TODO: Handle prerequisites

        # Delete the previous tree if it exists
        old_previous = channel.previous_tree
        if old_previous:
            old_previous.parent = get_deleted_chefs_root()
            old_previous.title = "Old previous tree for channel {}".format(
                channel.pk)
            old_previous.save()

        # Save tree to target tree
        channel.previous_tree = channel.main_tree
        channel.main_tree = root
        channel.save()
    finally:
        conn and conn.close()
        tempf.close()
        os.unlink(tempf.name)

    # Print stats
    log.info("\n\nChannel has been imported (time: {ms})\n".format(
        ms=datetime.datetime.now() - start))
    log.info("\n\n********** IMPORT COMPLETE **********\n\n")
Example #32
0
def save_imges(url, path):
    r = requests.get(url, stream=True)
    if r.status_code == 200:
        with open(path, 'wb') as f:
            r.raw.decode_content = True
            shutil.copyfileobj(r.raw, f)
Example #33
0
def save_image(folder, name, data):
    file_name = os.path.join(folder, name + '.jpg')
    with open(file_name, 'wb') as fout:
        shutil.copyfileobj(data, fout)
Example #34
0
def _get_file_data(fname, url):
    with contextlib.closing(urlopen(url)) as opener:
        with open(fname, 'wb') as data:
            copyfileobj(opener, data)
def download_imageset(current_imageset):
    error = False
    if not os.path.exists(os.path.join(os.getcwd(), filename,
                                       current_imageset)):
        os.makedirs(os.path.join(os.getcwd(), filename, current_imageset))
    loginpage = requests.get(BaseUrl)
    csrftoken = loginpage.cookies['csrftoken']

    cookies = {'csrftoken': csrftoken}
    csrfmiddlewaretoken = csrftoken
    data = {
        'username': user,
        'password': password,
        'csrfmiddlewaretoken': csrfmiddlewaretoken
    }
    loggedinpage = requests.post('{}user/login/'.format(BaseUrl),
                                 data=data,
                                 cookies=cookies,
                                 allow_redirects=False,
                                 headers={'referer': BaseUrl})

    try:
        sessionid = loggedinpage.cookies['sessionid']
    except KeyError:
        print('Login failed')
        sys.exit(1)
    cookies = {'sessionid': sessionid}
    page = requests.get("{}images/imagelist/{}/".format(
        BaseUrl, current_imageset),
                        cookies=cookies)
    if page.status_code == 404:
        print(
            "In Imageset {} was an error. The server returned page not found.".
            format(current_imageset))
        errorlist.append(current_imageset)
        return
    images = page.text.replace('\n', '')
    images = images.split(',')
    for index, image in enumerate(images):
        if image == '':
            continue
        r = requests.get(BaseUrl + image[1:],
                         data=data,
                         cookies=cookies,
                         allow_redirects=False,
                         headers={'referer': BaseUrl},
                         stream=True)
        if r.status_code == 404:
            print(
                "In Imageset {} was an error. The server returned page not found."
                .format(current_imageset))
            errorlist.append(current_imageset)

            error = True
            continue
        image = image.split('?')[1]
        with open(os.path.join(filename, current_imageset, image), 'wb') as f:
            r.raw.decode_content = True
            shutil.copyfileobj(r.raw, f)
            sys.stdout.flush()
            print(
                "{}Image {} / {} has been downloaded from imageset {}".format(
                    "\r", index + 1,
                    len(images) - 1, current_imageset),
                end="")
    if not error:
        print('\nImageset {} has been downloaded.'.format(current_imageset))
Example #36
0
    def update_metadata(self, tmdb_id=None, save=True):
        logger.info('Updating metadata for %s (%d)' % (self.slug, self.pk))

        if not tmdb_id:
            metadata = self.search_metadata()

            if not metadata:
                return False

            tmdb_id = metadata[0].get('id') # Default to first match

        # Get details
        details = tmdb.metadata(tmdb_id=tmdb_id)

        if not details:
            return False
                    
        logger.info('Updating matched metadata: %s (%d)' % (
            details.get('title'), details.get('id')
        ))

        # Update instance
        try:
            self.overview = details.get('overview')
            self.release = details.get('release_date')
            self.original_title = details.get('original_title')
            self.original_language = details.get('original_language')
            self.overview = details.get('overview')
            self.vote_average = details.get('vote_average')
            self.backdrop_path = details.get('backdrop_path')
            # print self.backdrop_path
            self.tmdb_id = details.get('id')
            
            if save:
                self.save()

        except:
            logger.exception('Could not save details to database')
            return False

        # Save poster
        if not details.get('poster_path'):
            logger.warning('No poster available')

        else:
            try:
                poster_file = path.join(POSTER_PATH, '%s.jpg' % (self.slug))

                logger.info('Saving poster to %s' % (poster_file))

                r = get(TMDB_POSTER_URL % (details.get('poster_path')), stream=True)
                r.raise_for_status()
                
                if save:
                    with open(poster_file, 'wb') as f:
                        r.raw.decode_content = True
                        copyfileobj(r.raw, f)
            except:
                logger.exception('Could not save poster')

        return True
Example #37
0
def download_file(url, outputFile):
    with requests.get(url, stream=True) as r:
        with open(outputFile, 'wb') as f:
            shutil.copyfileobj(r.raw, f)
Example #38
0
 def backup(self, dst):
     with open(self.component.filename(self.plget(self.payload)),
               'rb') as fd:
         copyfileobj(fd, dst, length=BUF_SIZE)
Example #39
0
import sys
import shutil
import tempfile
import urllib.request

from pyspark import SparkContext
from pyspark.mllib.feature import Word2Vec

url = sys.argv[1]

with urllib.request.urlopen(url) as response:
    with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
        shutil.copyfileobj(response, tmp_file)

sc = SparkContext(appName='Word2Vec')
inp = sc.textFile(tmp_file.name).map(lambda row: row.split(" "))

word2vec = Word2Vec()
model = word2vec.fit(inp)

synonyms = model.findSynonyms('china', 40)

for word, cosine_distance in synonyms:
    print("{}: {}".format(word, cosine_distance))


# Local Variables:
# mode:python
# mode:outline-minor
# mode:auto-fill
# fill-column: 75
Example #40
0
 def copyfile(self, source, outputfile):
     shutil.copyfileobj(source, outputfile)
Example #41
0
 def do_GET(self):
     self.send_response(200)
     self.send_header('Content-Type', 'application/x-tar')
     self.end_headers()
     shutil.copyfileobj(stream, self.wfile)
def get_sentinel2_image(url,
                        outputdir,
                        overwrite=False,
                        partial=False,
                        noinspire=False,
                        reject_old=False):
    """
    Collect the entire dir structure of the image files from the
    manifest.safe file and build the same structure in the output
    location.

    Returns:
        True if image was downloaded
        False if partial=False and image was not fully downloaded
            or if reject_old=True and it is old-format
            or if noinspire=False and INSPIRE file is missing
    """
    img = os.path.basename(url)
    target_path = os.path.join(outputdir, img)
    target_manifest = os.path.join(target_path, "manifest.safe")

    return_status = True
    if not os.path.exists(target_path) or overwrite:

        manifest_url = url + "/manifest.safe"

        if reject_old:
            # check contents of manifest before downloading the rest
            content = urlopen(manifest_url)
            with NamedTemporaryFile() as f:
                shutil.copyfileobj(content, f)
                if not is_new(f.name):
                    return False

        os.makedirs(target_path, exist_ok=True)
        content = urlopen(manifest_url)
        with open(target_manifest, 'wb') as f:
            shutil.copyfileobj(content, f)
        with open(target_manifest, 'r') as manifest_file:
            manifest_lines = manifest_file.read().split()
        for line in manifest_lines:
            if 'href' in line:
                rel_path = line[line.find('href=".') + 7:]
                rel_path = rel_path[:rel_path.find('"')]
                abs_path = os.path.join(target_path, *rel_path.split('/')[1:])
                if not os.path.exists(os.path.dirname(abs_path)):
                    os.makedirs(os.path.dirname(abs_path))
                try:
                    download_file(url + rel_path, abs_path)
                except HTTPError as error:
                    print("Error downloading {} [{}]".format(
                        url + rel_path, error))
                    continue
        granule = os.path.dirname(
            os.path.dirname(get_S2_image_bands(target_path, "B01")))
        for extra_dir in ("AUX_DATA", "HTML"):
            if not os.path.exists(os.path.join(target_path, extra_dir)):
                os.makedirs(os.path.join(target_path, extra_dir))
            if not os.path.exists(os.path.join(granule, extra_dir)):
                os.makedirs(os.path.join(granule, extra_dir))
        if not manifest_lines:
            print()
    elif reject_old and not is_new(target_manifest):
        print(f'Warning: old-format image {outputdir} exists')
        return_status = False

    if partial:
        tile_chk = check_full_tile(get_S2_image_bands(target_path, "B01"))
        if tile_chk == 'Partial':
            print("Removing partial tile image files...")
            shutil.rmtree(target_path)
            return_status = False
    if not noinspire:
        inspire_file = os.path.join(target_path, "INSPIRE.xml")
        if os.path.isfile(inspire_file):
            inspire_path = get_S2_INSPIRE_title(inspire_file)
            if os.path.basename(target_path) != inspire_path:
                os.rename(target_path, inspire_path)
        else:
            print(f"File {inspire_file} could not be found.")
            return_status = False

    return return_status
Example #43
0
    if not os.path.exists(options.srcdir):
        print("The specified source dir does not exist" % options.srcdir)
        exit(1)

    if not args:
        if os.name is 'nt':
            args = [os.environ.get("COMSPEC", r"C:\WINDOWS\system32\cmd.exe")]
        else:
            args = [os.environ.get("SHELL", os.path.realpath("/bin/sh"))]
        if "bash" in args[0]:
            bashrc = os.path.expanduser('~/.bashrc')
            if os.path.exists(bashrc):
                tmprc = tempfile.NamedTemporaryFile(mode='w')
                with open(bashrc, 'r') as src:
                    shutil.copyfileobj(src, tmprc)
                tmprc.write('\nexport PS1="[gst-%s] $PS1"' %
                            options.gst_version)
                tmprc.flush()
                # Let the GC remove the tmp file
                args.append("--rcfile")
                args.append(tmprc.name)
    python_set = python_env(options)
    try:
        exit(
            subprocess.call(args,
                            cwd=options.srcdir,
                            close_fds=False,
                            env=get_subprocess_env(options)))
    except subprocess.CalledProcessError as e:
        exit(e.returncode)
Example #44
0
def download(url: str, path: str) -> None:
    with atomic_write(path, "wb") as dst, urllib.request.urlopen(url) as src:
        shutil.copyfileobj(src, dst)
Example #45
0
    def _export_terrain(export_request, sourcedir, exportdir, palettes,
                        game_version, compression_level):
        """
        Convert and export a terrain graphics file.

        :param export_request: Export request for a terrain graphics file.
        :param sourcedir: Directory where all media assets are mounted. Source subfolder and
                          source filename should be stored in the export request.
        :param exportdir: Directory the resulting file(s) will be exported to. Target subfolder
                          and target filename should be stored in the export request.
        :param game_version: Game edition and expansion info.
        :param palettes: Palettes used by the game.
        :param compression_level: PNG compression level for the resulting image file.
        :type export_request: MediaExportRequest
        :type sourcedir: Directory
        :type exportdir: Directory
        :type palettes: dict
        :type game_version: tuple
        :type compression_level: int
        """
        source_file = sourcedir[export_request.get_type().value,
                                export_request.source_filename]

        if source_file.suffix.lower() == ".slp":
            from ...value_object.read.media.slp import SLP
            media_file = source_file.open("rb")
            image = SLP(media_file.read())

        elif source_file.suffix.lower() == ".dds":
            # TODO: Implement
            pass

        elif source_file.suffix.lower() == ".png":
            from shutil import copyfileobj
            src_path = source_file.open('rb')
            dst_path = exportdir[export_request.targetdir,
                                 export_request.target_filename].open('wb')
            copyfileobj(src_path, dst_path)
            return

        else:
            raise Exception(
                f"Source file {source_file.name} has an unrecognized extension: "
                f"{source_file.suffix.lower()}")

        if game_version[0].game_id in ("AOC", "SWGB"):
            from .terrain_merge import merge_terrain
            texture = Texture(image, palettes)
            merge_terrain(texture)

        else:
            from .texture_merge import merge_frames
            texture = Texture(image, palettes)
            merge_frames(texture)

        MediaExporter.save_png(
            texture,
            exportdir[export_request.targetdir],
            export_request.target_filename,
            compression_level,
        )
Example #46
0
 def write_to_file(self, outfn):
     with open(outfn, 'w') as f:
         self.fout.seek(0)
         shutil.copyfileobj(self.fout, f)
Example #47
0
    def _rotator(self, source, dest):
        with gzip.open(dest, 'wb') as gzip_file, \
                open(source, 'rb') as source_file:
            shutil.copyfileobj(source_file, gzip_file)

        os.remove(source)
Example #48
0
def main(options, args):
    log = logging.getLogger("superzippy")

    packages = args[0:-1]
    entry_point = args[-1]

    # Append any requirements.txt files to the packages list.
    packages += ["-r %s" % i for i in options.requirements]

    # Create the virtualenv directory
    virtualenv_dir = tempfile.mkdtemp()
    _dirty_files.append(virtualenv_dir)

    #### Create virtual environment

    log.debug("Creating virtual environment at %s.", virtualenv_dir)
    output_target = None if options.verbose >= 3 else DEVNULL

    return_value = subprocess.call(["virtualenv", virtualenv_dir],
                                   stdout=output_target,
                                   stderr=subprocess.STDOUT)

    if return_value != 0:
        log.critical("virtualenv returned non-zero exit status (%d).",
                     return_value)
        return 1

    ##### Install package and dependencies

    pip_path = os.path.join(virtualenv_dir, "bin", "pip")

    for i in packages:
        log.debug("Installing package with `pip install %s`.", i)

        command = [pip_path, "install"] + shlex.split(i)
        return_value = subprocess.call(command,
                                       stdout=output_target,
                                       stderr=subprocess.STDOUT)

        if return_value != 0:
            log.critical("pip returned non-zero exit status (%d).",
                         return_value)
            return 1

    if not packages:
        log.warn("No packages specified.")

    #### Uninstall extraneous packages (pip and setuptools)
    return_value = subprocess.call(
        [pip_path, "uninstall", "--yes", "pip", "setuptools"],
        stdout=output_target,
        stderr=subprocess.STDOUT)

    if return_value != 0:
        log.critical("pip returned non-zero exit status (%d).", return_value)
        return 1

    #### Move site packages over to build directory

    # TODO: We should look at pip's source code and figure out how it decides
    # where site-packages is and use the same algorithm.

    build_dir = tempfile.mkdtemp()
    _dirty_files.append(build_dir)

    site_package_dir = None
    for root, dirs, files in os.walk(virtualenv_dir):
        if "site-packages" in dirs:
            found = os.path.join(root, "site-packages")

            # We'll only use the first one, but we want to detect them all.
            if site_package_dir is not None:
                log.warn(
                    "Multiple site-packages directories found. `%s` will be "
                    "used. `%s` was found afterwards.", site_package_dir,
                    found)
            else:
                site_package_dir = found

    # A couple .pth files are consistently left over from the previous step,
    # delete them.
    extraneous_pth_files = ["easy-install.pth", "setuptools.pth"]
    for i in extraneous_pth_files:
        path = os.path.join(site_package_dir, i)
        if os.path.exists(path):
            os.remove(path)

    shutil.move(site_package_dir, build_dir)

    #### Perform any necessary raw copies.
    raw_copies = options.raw_copy_rename

    for i in options.raw_copy:
        if i[-1] == "/":
            i = i[0:-1]

        raw_copies.append((i, os.path.basename(i)))

    for file_path, dest_name in raw_copies:
        log.debug("Performing raw copy of `%s`, destination name: `%s`.",
                  file_path, dest_name)

        dest = os.path.join(build_dir, "site-packages", dest_name)

        try:
            shutil.copytree(file_path, dest)
        except OSError as e:
            if e.errno == errno.ENOTDIR:
                shutil.copy(file_path, dest)
            else:
                raise

    ##### Install bootstrapper

    log.debug("Adding bootstrapper to the archive.")

    bootstrap_files = {
        "__init__.py": "__init__.py",
        "bootstrapper.py": "__main__.py",
        "zipsite.py": "zipsite.py",
        "module_locator.py": "module_locator.py"
    }

    for k, v in bootstrap_files.items():
        source = pkg_resources.resource_stream("superzippy.bootstrapper", k)
        dest = open(os.path.join(build_dir, v), "wb")

        shutil.copyfileobj(source, dest)

        source.close()
        dest.close()

    ##### Install configuration

    log.debug("Adding configuration file to archive.")

    with open(os.path.join(build_dir, "superconfig.py"), "w") as f:
        f.write("entry_point = '%s'" % entry_point)

    ##### Zip everything up into final file

    log.debug("Zipping up %s.", build_dir)

    if options.output:
        output_file = options.output
    elif packages:
        last_package = shlex.split(packages[-1])[0]

        if os.path.isdir(last_package):
            # Figure out the name of the package the user pointed at on their
            # system.
            setup_program = subprocess.Popen([
                "/usr/bin/env", "python",
                os.path.join(last_package, "setup.py"), "--name"
            ],
                                             stdout=subprocess.PIPE,
                                             stderr=DEVNULL)
            if setup_program.wait() != 0:
                log.critical("Could not determine name of package at %s.",
                             last_package)
                return 1

            # Grab the output of the setup program
            package_name_raw = setup_program.stdout.read()

            # Decode the output into text. Whatever our encoding is is
            # probably the same as what the setup.py program spat out.
            package_name_txt = package_name_raw.decode(sys.stdout.encoding
                                                       or "UTF-8")

            # Strip any leading and trailing whitespace
            package_name = package_name_txt.strip()

            # Verify that what we got was a valid package name (this handles
            # most cases where an error occurs in the setup.py program).
            if re.match("[A-Za-z0-9_-]+", package_name) is None:
                log.critical(
                    "Could nto determine name of package. setup.py "
                    "is reporting an illegal name of %s", package_name)
                return 1

            output_file = package_name + ".sz"
        else:
            # Just use the name of a package we're going to pull down from
            # the cheese shop, but cut off any versioning information (ex:
            # bla==2.3 will become bla).
            for k, c in enumerate(last_package):
                if c in ("=", ">", "<"):
                    output_file = last_package[0:k] + ".sz"
                    break
            else:
                output_file = last_package + ".sz"

    else:
        log.critical("No output file or packages specified.")
        return 1

    try:
        zipdir.zip_directory(build_dir, output_file)
    except IOError:
        log.critical("Could not write to output file at '%s'.",
                     output_file,
                     exc_info=sys.exc_info())
        return 1

    #### Make that file executable

    with open(output_file, "rb") as f:
        data = f.read()

    with open(output_file, "wb") as f:
        f.write(b"#!/usr/bin/env python\n" + data)

    os.chmod(output_file, 0o755)

    return 0
Example #49
0
 def copy(src, dest):
     try:
         shutil.copyfileobj(src, dest)
     finally:
         src.close(), dest.close()
Example #50
0
token = sys.argv[2]

config_ids = None
device_ids = None
package_ids = None
result_ids = None

if len(sys.argv) > 3 and len(sys.argv[3]) > 0:
    config_ids = [int(x.strip()) for x in sys.argv[3].split(',')]

if len(sys.argv) > 4 and len(sys.argv[4]) > 0:
    device_ids = [int(x.strip()) for x in sys.argv[4].split(',')]

if len(sys.argv) > 5 and len(sys.argv[5]) > 0:
    package_ids = [int(x.strip()) for x in sys.argv[5].split(',')]

if len(sys.argv) > 6 and len(sys.argv[6]) > 0:
    result_ids = [int(x.strip()) for x in sys.argv[6].split(',')]

c = CDRouter(base, token=token)

b, filename = c.exports.bulk_export(config_ids=config_ids,
                                    device_ids=device_ids,
                                    package_ids=package_ids,
                                    result_ids=result_ids)

with open(filename, 'wb') as fd:
    shutil.copyfileobj(b, fd)

print(filename)
import requests
from bs4 import BeautifulSoup
import shutil

headers={
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36"
}

res = requests.get("https://www.52av.one/thread-116234-1-1.html",headers=headers)

soup = BeautifulSoup(res.text,"html.parser")

for img in soup.select(".zoom"):
    fname = img["file"].split("/")[-1]
    res2 = requests.get(img["file"], stream=True)
    f = open(fname, "wb")
    shutil.copyfileobj(res2.raw, f)
    f.close()
    del res2
Example #52
0
def gzFile(file):
    with open(file, 'rb') as f_in, gzip.open(file + '.gz', 'wb') as f_out:
        copyfileobj(f_in, f_out)
    os.remove(file)
	def common(self, data=True):
		if options.authorization:
			try:
				auth = self.headers['Proxy-Authorization']
				if not auth.startswith('Basic '):
					raise KeyError("Only Basic authentication: %s" % auth)
				auth = auth[len('Basic '):]
				auth = base64.decodestring(auth)
				username, password = auth.split(':', 1)
				username, password = urllib2.unquote(username), urllib2.unquote(password)
				if username != options.username:
					msg = "Username: %s != %s" % (username, options.username)
					if options.verbose:
						self.log_error(msg)
					raise KeyError(msg)
				if password != options.password:
					msg = "Password: %s != %s" % (password, options.password)
					if options.verbose:
						self.log_error(msg)
					raise KeyError(msg)
			except KeyError as exc:
				self.send_response(httplib.PROXY_AUTHENTICATION_REQUIRED)
				self.send_header('WWW-Authenticate', 'Basic realm="%s"' % (options.realm,))
				self.send_header('Content-type', 'text/html')
				self.end_headers()
				self.wfile.write('<html><body><h1>Error: Proxy authorization needed</h1>%s</body></html>' % (exc,))
				return
		# rewrite url
		url = urlparse.urlsplit(self.path)
		u = list(url)
		# The proxy gets a verbatim copy of the URL, which might contain the
		# target site credentials. urllib doesn't handle this, strip it.
		if '@' in u[1]:
			u[1] = u[1].split('@', 1)[1]
		# Fake DNS resolve of configured hostname to localhost
		if options.translate:
			if url.hostname == options.translate:
				u[1] = u[1].replace(options.translate, 'localhost')
		url = urlparse.urlunsplit(u)
		try:
			req = urllib2.Request(url=url, headers=self.headers)
			if options.verbose:
				for k, v in self.headers.items():
					self.log_message("> %s: %s" % (k, v))
			fp = urllib2.urlopen(req)
		except urllib2.HTTPError as fp:
			if options.verbose:
				self.log_error("%d %s" % (fp.code, fp.msg))

		self.send_response(fp.code)
		via = '1.0 %s' % (httpd.server_name,)
		for k, v in fp.headers.items():
			if k.lower() == 'via':
				via = "%s, %s" % (via, v)
			elif k.lower() in ('server', 'date'):  # Std-Hrds by BaseHTTPReqHand
				continue
			elif k.lower() == 'transfer-encoding':
				continue
			else:
				if options.verbose:
					self.log_message("< %s: %s" % (k, v))
				self.send_header(k, v)
		self.send_header('Via', via)
		self.end_headers()
		if data:
			shutil.copyfileobj(fp, self.wfile)
		fp.close()
Example #54
0
        else:
            destinationDir = self.args.out_dir
        if not os.path.exists(destinationDir):
            os.makedirs(destinationDir)
        for member in zipFile.namelist():
            filename = os.path.basename(member)
            source = io.BytesIO(zipFile.open(member).read())
            try:
                gz_source = gzip.GzipFile(fileobj=source, mode="rb")
                gz_source.read()
                source = gz_source
            except IOError, e:
                pass
            source.seek(0)
            destination = file(os.path.join(destinationDir, filename), "wb")
            shutil.copyfileobj(source, destination)
            source.close()
            destination.close()
        return (response)

    def outputSessionDetails(self, session):
        subject = session['subject']
        series = session['series']
        print """
SUBJECT: %-30s AGE:       %-30s
GENDER:  %-30s HADEDNESS: %-30s
RACE:    %-30s ETHNICITY: %-30s
ENGLISH: %-30s LANGUAGE:  %-30s
WEIGHT:  %-30s HEIGHT:    %-30s
SESSION: %s (Accession ID %s)
""" % (subject['label'] + " (" + subject['id'] + ")",
Example #55
0
FILENAMES = ['output.txt', 'yarsac.py']
FILE_DIGESTS = [
    '82d28b55de7a0a68efbdbb7411e835f4d0109f34',
    'f8cb9145a816450fc5d4fabf50f74d90e6e1deaf'
]


def sha1(fname):
    hash_sha1 = hashlib.sha1()
    with open(fname, "rb") as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_sha1.update(chunk)
    return hash_sha1.hexdigest()


r = requests.get('https://static.ctf.insecurity-insa.fr/' + ARCHIVE_NAME,
                 stream=True)

if r.status_code == 200:
    with open(ARCHIVE_NAME, 'wb') as f:
        r.raw.decode_content = True
        shutil.copyfileobj(r.raw, f)

tar = tarfile.open(ARCHIVE_NAME)
tar.extractall()
tar.close()

print('Exploit OK' if all(
    sha1(filename) == digest
    for filename, digest in zip(FILENAMES, FILE_DIGESTS)) else 'Exploit error')
def copyLargeFile(src, dest, buffer_size=16000):
    with open(src, 'rb') as fsrc:
        with open(dest, 'wb') as fdest:
            shutil.copyfileobj(fsrc, fdest, buffer_size)
Example #57
0
 def decompress(self, path, name):
     decompressed_file = name.replace(".gz", ".txt")
     with gzip.open(os.path.join(path, name), 'rb') as file_in:
         with open(os.path.join(path, decompressed_file), 'wb') as file_out:
             shutil.copyfileobj(file_in, file_out)
     return decompressed_file
to_unzip = []

for f in os.listdir('./'):
    if f.endswith('.zip'):
        zipfiles.append(f)


for f in zipfiles:
    print('File {}'.format(f))
    with ZipFile('./' + f) as zipf:
        ar = zipf.namelist()
        fname = os.path.basename(ar[0])
        if fname not in os.listdir('./'):
            to_unzip.append(f)
            print('contains {}'.format(fname))
            print('added to unzip')
        else:
            print('contains {}'.format(fname))
            print('already unzipped')

for f in to_unzip:
    print('Unzipping {}'.format(f))
    with ZipFile('./' + f) as zipf:
        ar = zipf.namelist()
        fname = os.path.basename(ar[0])
        print(fname)
        source = zipf.open(ar[0])
        tar = open(os.path.join('./', fname), 'wb')
        with source, tar:
            shutil.copyfileobj(source, tar)
def download_file(url, destination):
    with requests.get(url, stream=True) as r:
        with open(destination, 'wb') as f:
            shutil.copyfileobj(r.raw, f)
    return destination
Example #60
0
    def _remap_chunks(acc, c):
        if c[0] == 'IDAT':
            acc.append(rebuilt_idat_chunk)
        else:
            acc.append(c)

        return acc

    result_chunks = reduce(_remap_chunks, distilled_chunks, [])
    result = StringIO()
    png.write_chunks(result, result_chunks)
    return result


def build_idat_chunk(chunks):
    deflated = ''.join(
        [chunk for (chunk_type, chunk) in chunks if chunk_type == 'IDAT'])
    decompressed = decompress(deflated)
    return ('IDAT', compress(decompressed))


if __name__ == "__main__":
    from shutil import copyfileobj
    import sys

    result = distill(filename=sys.argv[1])
    result.seek(0)
    with open(sys.argv[2], 'w+') as output_file:
        copyfileobj(result, output_file)