Beispiel #1
0
def procesarZIP(archivo):
    try:
        z = ZipFile(archivo)
    except:
        return

    nombre = z.filename.split('.')[0]
    ruta_temporal = '/tmp/%s/' % nombre
    z.extractall(ruta_temporal)

    for a in z.filelist:
        if a.filename[-3:] == "dbf":
            archivo_dbf = "%s%s" % (ruta_temporal, a.filename)
            puntos = shp2dataframe(archivo_dbf)
            # print "%s %s" % (nombre, puntos['REND'].mean())

            # ya quito los nulos
            no_nulos = filter(None, puntos.REND.values)
            promedio = promedioRendimiento(no_nulos)

        if a.filename[-3:] == "shp":
            archivo_shp = "%s%s" % (ruta_temporal, a.filename)
            centro_x, centro_y = obtenerCentroide(archivo_shp)

    print "%s,%s,%s,%s" % (nombre.split('/')[-1], promedio, centro_x, centro_y)

    borrarArbol(ruta_temporal)
Beispiel #2
0
def MyUnzipFile(in_file_path, target_root_path):
    """Unzip file.

>>> unzip_file("d:\\zout.zip", "d:\\")"""
    zin = ZipFile(in_file_path, "r")
    zin.extractall(target_root_path)
    zin.close()
Beispiel #3
0
def build(worker_dir, sha, repo_url, destination, concurrency):
  """Download and build sources in a temporary directory then move exe to destination"""
  tmp_dir = tempfile.mkdtemp()
  os.chdir(tmp_dir)

  with open('sf.gz', 'wb+') as f:
    f.write(requests.get(github_api(repo_url) + '/zipball/' + sha).content)
  zip_file = ZipFile('sf.gz')
  zip_file.extractall()
  zip_file.close()

  for name in zip_file.namelist():
    if name.endswith('/src/'):
      src_dir = name
  os.chdir(src_dir)

  custom_make = os.path.join(worker_dir, 'custom_make.txt')
  if os.path.exists(custom_make):
    with open(custom_make, 'r') as m:
      make_cmd = m.read().strip()
    subprocess.check_call(make_cmd, shell=True)
  else:
    subprocess.check_call(MAKE_CMD + ' -j %s' % (concurrency), shell=True)

  shutil.move('stockfish'+ EXE_SUFFIX, destination)
  os.chdir(worker_dir)
  shutil.rmtree(tmp_dir)
Beispiel #4
0
def download_demo(demo_name):
    "Downloads and extracts the demo zip file"
    print(green("- Downloading"), end="")
    response = urllib2.urlopen(DEMOS[demo_name]["url"])

    # Extract real name of zipfile
    content_disposition = response.headers.getheader("content-disposition")
    real_name = os.path.splitext(content_disposition.split("; ")[1].split("=")[1])[0]

    fname = "{0}.zip".format(demo_name)

    # Download zipfile
    with open(fname, "wb") as tmpfile:
        while True:
            packet = response.read(2 ** 16)
            if not packet:
                print(green("done"))
                break
            tmpfile.write(packet)
            sys.stdout.write(green("."))
            sys.stdout.flush()
        response.close()

    print(green("- Extracting"))
    # Extract zipfile
    zipfile = ZipFile(fname)
    zipfile.extractall()

    # Clean up and rename to the correct demo name
    os.remove(fname)
    os.rename(real_name, demo_name)
Beispiel #5
0
class UnZip():
    """Unzip the given file into a temporary directory."""
    def __init__(self, input_file):
        self.input_file = input_file

    def unzip(self):
        self.zip_file = ZipFile(self.input_file, 'r')
        self.tempdir = tempfile.mkdtemp()
        self.zip_file.extractall(self.tempdir)
        return self.tempdir

    def cleanup(self):
        self.zip_file.close()
        for root, dirs, files in os.walk(self.tempdir, topdown=False):
            for name in files:
                os.remove(os.path.join(root, name))
            for name in dirs:
                os.rmdir(os.path.join(root, name))
        os.rmdir(self.tempdir)

    def __enter__(self):
        return self.unzip()

    def __exit__(self, *args):
        self.cleanup()
        return False
Beispiel #6
0
def parse_template(template_name):
    """Resolve template name into absolute path to the template
    and boolean if absolute path is temporary directory.
    """
    if template_name.startswith('http'):
        if '#' in template_name:
            url, subpath = template_name.rsplit('#', 1)
        else:
            url = template_name
            subpath = ''
        with tempfile.NamedTemporaryFile() as tmpfile:
            urlretrieve(url, tmpfile.name)
            if not is_zipfile(tmpfile.name):
                raise ConfigurationError("Not a zip file: %s" % tmpfile)
            zf = ZipFile(tmpfile)
            try:
                path = tempfile.mkdtemp()
                zf.extractall(path)
                return os.path.join(path, subpath), True
            finally:
                zf.close()

    registry = TemplatesRegistry()
    if registry.has_template(template_name):
        path = registry.path_of_template(template_name)
    elif ':' in template_name:
        path = resolve_dotted_path(template_name)
    else:
        path = os.path.realpath(template_name)

    if not os.path.isdir(path):
        raise ConfigurationError('Template directory does not exist: %s' % path)
    return path, False
Beispiel #7
0
def download_file(url, name, root_destination='~/data/', zipfile=False,
                  replace=False):
    """Download a file from dropbox, google drive, or a URL.

    This will download a file and store it in a '~/data/` folder,
    creating directories if need be. It will also work for zip
    files, in which case it will unzip all of the files to the
    desired location.

    Parameters
    ----------
    url : string
        The url of the file to download. This may be a dropbox
        or google drive "share link", or a regular URL. If it
        is a share link, then it should point to a single file and
        not a folder. To download folders, zip them first.
    name : string
        The name / path of the file for the downloaded file, or
        the folder to zip the data into if the file is a zipfile.
    root_destination : string
        The root folder where data will be downloaded.
    zipfile : bool
        Whether the URL points to a zip file. If yes, it will be
        unzipped to root_destination + name.
    replace : bool
        If True and the URL points to a single file, overwrite the
        old file if possible.
    """
    # Make sure we have directories to dump files
    home = op.expanduser('~')
    tmpfile = home + '/tmp/tmp'
    if not op.isdir(home + '/data/'):
        print('Creating data folder...')
        os.makedirs(home + '/data/')

    if not op.isdir(home + '/tmp/'):
        print('Creating tmp folder...')
        os.makedirs(home + '/tmp/')

    download_path = _convert_url_to_downloadable(url)

    # Now save to the new destination
    out_path = root_destination.replace('~', home) + name
    if not op.isdir(op.dirname(out_path)):
        print('Creating path {} for output data'.format(out_path))
        os.makedirs(op.dirname(out_path))

    if zipfile is True:
        _fetch_file(download_path, tmpfile)
        myzip = ZipFile(tmpfile)
        myzip.extractall(out_path)
        os.remove(tmpfile)
    else:
        if len(name) == 0:
            raise ValueError('Cannot overwrite the root data directory')
        if replace is False and op.exists(out_path):
            raise ValueError('Path {} exists, use `replace=True` to '
                             'overwrite'.format(out_path))
        _fetch_file(download_path, out_path)
    print('Successfully moved file to {}'.format(out_path))
Beispiel #8
0
def _prepare():
    """ Create two dirs, one with one dataset and one with two datasets
    """
    need_internet()
    
    global _prepared
    if _prepared and os.path.isfile(_prepared[2]):
        return _prepared
    # Prepare sources
    fname1 = get_remote_file('images/dicom_sample1.zip')
    fname2 = get_remote_file('images/dicom_sample2.zip')
    dname1 = os.path.join(test_dir, 'dicom_sample1')
    dname2 = os.path.join(test_dir, 'dicom_sample2')
    # Extract zipfiles
    z = ZipFile(fname1)
    z.extractall(dname1)
    z.extractall(dname2)
    z = ZipFile(fname2)
    z.extractall(dname2)
    # Get arbitrary file names
    fname1 = os.path.join(dname1, os.listdir(dname1)[0])
    fname2 = os.path.join(dname2, os.listdir(dname2)[0])
    # Cache and return
    _prepared = dname1, dname2, fname1, fname2
    return dname1, dname2, fname1, fname2
Beispiel #9
0
def createDevEnv(baseDir, type):
  fileBuffer = StringIO()
  createBuild(baseDir, type=type, outFile=fileBuffer, devenv=True, releaseBuild=True)

  from zipfile import ZipFile
  zip = ZipFile(StringIO(fileBuffer.getvalue()), 'r')
  zip.extractall(os.path.join(baseDir, 'devenv'))
  zip.close()

  print 'Development environment created, waiting for connections from active extensions...'
  metadata = readMetadata(baseDir, type)
  connections = [0]

  import SocketServer, time, thread

  class ConnectionHandler(SocketServer.BaseRequestHandler):
    def handle(self):
      connections[0] += 1
      self.request.sendall('HTTP/1.0 OK\nConnection: close\n\n%s' % metadata.get('general', 'basename'))

  server = SocketServer.TCPServer(('localhost', 43816), ConnectionHandler)

  def shutdown_server(server):
    time.sleep(10)
    server.shutdown()
  thread.start_new_thread(shutdown_server, (server,))
  server.serve_forever()

  if connections[0] == 0:
    print 'Warning: No incoming connections, extension probably not active in the browser yet'
  else:
    print 'Handled %i connection(s)' % connections[0]
def _download_xbrl_file(info_dicts,p):
    no = p
    directory_path = os.getcwd()+'/xbrl_files/'
    while(no < len(info_dicts)):
        info_dict = info_dicts[no]
        no += proc

        # 証券CDごとのディレクトリ作成
        company_path = directory_path + info_dict['cd'] + '/'
        ir_path = company_path + info_dict['id']
        make_directory(company_path)

        # 証券コード×IR情報ごとのディレクトリ作成
        if os.path.exists(ir_path):
            continue
        make_directory(ir_path)
        print('Process('+str(p + 1) + '):downloading:' + info_dict['update']+'_'+info_dict['title'])

        url = info_dict['url']
        r = requests.get(url)
        if r.ok:
            #Requestによりファイルを取得して、Unzipする
            r = requests.get(url)
            z = ZipFile(io.BytesIO(r.content))
            z.extractall(ir_path) # unzip the file and save files to path.
Beispiel #11
0
    def extract_zip_file(self, transformation, submitted_file):
        log.info('Extracting zip file %s' % transformation.filename)

        #create transformations directory if it does not exist
        transformations_dir = self.get_transformations_dir()
        if not os.path.isdir(transformations_dir):
            log.info('Creating transformations directory')
            os.mkdir(transformations_dir)

        #delete package directory if it exists
        package_dir = os.path.join(transformations_dir, transformation.package_id)
        if os.path.isdir(package_dir):
            log.info('Deleting package directory %s' % package_dir)
            shutil.rmtree(package_dir)

        #create directory again and change to it
        log.info('Creating package directory %s' % package_dir)
        os.mkdir(package_dir)
        os.chdir(package_dir)

        zipfile = ZipFile(submitted_file)
        log.info('Extracting file %s to directory %s' % (transformation.filename, package_dir))
        zipfile.extractall()

        #obtain data
        submitted_file.seek(0)
        transformation.data = submitted_file.read()
        transformation.timestamp = datetime.now()

        log.info('File %s extracted' % transformation.filename)
        return package_dir
 def update(self, update):
     dpath = self.xplanedir + '/Resources/Downloads'
     installpath = self.xplanedir + '/Resources/plugins/PythonScripts'
     
     # Broadcast message to all plugins
     XPLMSendMessageToPlugin(XPLM_NO_PLUGIN_ID, 0x8000000 | 8090 , long(1))
     PI_SendMessageToScript(self, None, 0x8000000 | 8090, 1)
     sleep(1)
     
     if not os.path.exists(dpath):
         os.mkdir(dpath)
     
     if update['update_type'] == 'direct' and update['update_filename']:
         urllib.urlretrieve(update['update_url'], dpath + '/'  +  update['update_filename'])
         copy(dpath + '/'  +  update['update_filename'], installpath + '/'  +  update['update_filename'])            
         print dpath + '/'  +  update['update_filename'], installpath + '/'  +  update['update_filename']
         
     elif update['update_type'] == 'zip':
         zipfile = dpath + '/._xpjpcUPDATE.zip'
         # Download update
         urllib.urlretrieve(update['update_url'], zipfile)
         zip = ZipFile(zipfile, 'r')
         
         # Check zip file
         if not zip.testzip():
             # Unzip
             unzipdir = dpath + '/' + zip.namelist()[0]
             zip.extractall(dpath)
             zip.close()
             # Move files
             self.tcopy(unzipdir, installpath)
             rmtree(unzipdir)
             os.remove(zipfile)
Beispiel #13
0
def create_app(name, engine):
    """
        Create a Skeleton application (needs internet connection to github)
    """
    try:
        if engine.lower() == "sqlalchemy":
            url = urlopen(SQLA_REPO_URL)
            dirname = "Flask-AppBuilder-Skeleton-master"
        elif engine.lower() == "mongoengine":
            url = urlopen(MONGOENGIE_REPO_URL)
            dirname = "Flask-AppBuilder-Skeleton-me-master"
        zipfile = ZipFile(BytesIO(url.read()))
        zipfile.extractall()
        os.rename(dirname, name)
        click.echo(click.style("Downloaded the skeleton app, good coding!", fg="green"))
        return True
    except Exception as e:
        click.echo(click.style("Something went wrong {0}".format(e), fg="red"))
        if engine.lower() == "sqlalchemy":
            click.echo(
                click.style(
                    "Try downloading from {0}".format(SQLA_REPO_URL), fg="green"
                )
            )
        elif engine.lower() == "mongoengine":
            click.echo(
                click.style(
                    "Try downloading from {0}".format(MONGOENGIE_REPO_URL), fg="green"
                )
            )
        return False
Beispiel #14
0
 def _dl_ui():
     url = urlopen(self._dist_url)
     z = ZipFile(StringIO(url.read()))
     names = [i for i in z.namelist() if '.DS_exStore' not in i and '__MACOSX' not in i]
     z.extractall(self.update_dir, members=names)
     log.info("Downloaded files for UI commit " + str(self.git_version).replace("\n", ""))
     return self.branch
def handle_uploaded_file(f, extract_path, request, user):
    zipfilepath = settings.COURSE_UPLOAD_DIR + f.name

    with open(zipfilepath, 'wb+') as destination:
        for chunk in f.chunks():
            destination.write(chunk)

    try:
        zip_file = ZipFile(zipfilepath)
        zip_file.extractall(path=extract_path)
    except BadZipfile:
        messages.error(request, _("Invalid zip file"), extra_tags="danger")
        return False, 500

    mod_name = ''
    print(os.listdir(extract_path))
    for dir in os.listdir(extract_path)[:1]:
        mod_name = dir

    # check there is at least a sub dir
    if mod_name == '':
        messages.info(request, _("Invalid course zip file"), extra_tags="danger")
        return False, 400

    try:
        course, response = process_course(extract_path, f, mod_name, request, user)
    except Exception as e:
        messages.error(request, e.message, extra_tags="danger")
        return False, 500
    finally:
        # remove the temp upload files
        shutil.rmtree(extract_path, ignore_errors=True)

    return course, 200
Beispiel #16
0
def getWP():

    print 'downloading wordpress........'
    open(cwd + '/temp/wp.zip', 'wb').write(urllib.urlopen('http://fr.wordpress.org/wordpress-3.5-fr_FR.zip').read())

    zip = ZipFile(cwd + '/temp/wp.zip')
    print 'done'
    print 'Extracting wordpress...'

    zip.extractall(cwd + '/temp/')
    print 'done'

    print 'Moving the wordpress installation to C:/wamp/www/' + projectName
    # - Copy the WP installation
    
    shutil.copytree(cwd+ '/temp/wordpress/', wampPath + projectName)
    # - remove the junk
    #copytree(cwd + '/temp/wordpress/', wampPath + projectName)
   
    print 'done'
    print 'Getting ready for the first run.. Opening your web browser...'
    new = 2 # open in a new tab, if possible

    # open a public URL, in this case, the webbrowser docs
    url = "localhost/" + projectName + "/wp-admin/setup-config.php"
    webbrowser.open(url,new=new)
Beispiel #17
0
def get_source(options):
    """ Downloads pyload source from bitbucket tip or given rev"""
    if options.rev: options.url = "https://bitbucket.org/spoob/pyload/get/%s.zip" % options.rev

    pyload = path("pyload")

    if len(pyload.listdir()) and not options.clean:
        return
    elif pyload.exists():
        pyload.rmtree()

    urlretrieve(options.src, "pyload_src.zip")
    zip = ZipFile("pyload_src.zip")
    zip.extractall()
    path("pyload_src.zip").remove()

    folder = [x for x in path(".").dirs() if x.name.startswith("spoob-pyload-")][0]
    folder.move(pyload)

    change_mode(pyload, 0644)
    change_mode(pyload, 0755, folder=True)

    for file in pyload.files():
        if file.name.endswith(".py"):
            file.chmod(0755)

    (pyload / ".hgtags").remove()
    (pyload / ".gitignore").remove()
    #(pyload / "docs").rmtree()

    f = open(pyload / "__init__.py", "wb")
    f.close()
 def setUpClass(cls):
     # We zip the GRASSData folder to be nice to GitHub, unzip it
     cls.grassDBasePath = os.path.abspath('./tests/data/GRASSData')
     grassDBaseZip = "%s.zip" % (cls.grassDBasePath,)
     if not os.access(grassDBaseZip, os.R_OK):
         raise IOError(errno.EACCES, "Unable to read GRASS data zip %s" %
                   grassDBaseZip)
     grassDBaseDir = os.path.split(grassDBaseZip)[0]
     if not os.access(grassDBaseDir, os.W_OK):
         raise IOError(errno.EACCES, "Unable to write to GRASS data parent dir %s" %
                       grassDBaseDir)
     zip = ZipFile(grassDBaseZip, 'r')
     extractDir = os.path.split(cls.grassDBasePath)[0]
     zip.extractall(path=extractDir)
     
     gisbase = os.environ['GISBASE']
     grassConfig = GRASSConfig(gisbase=gisbase, dbase=cls.grassDBasePath, location='DR5_5m', mapset='taehee')
     cls.grassdatalookup = GrassDataLookup(grass_config=grassConfig)
     
     cls.inPatchID = 309999
     cls.inZoneID = 73
     cls.inHillID = 73
     cls.easting = 349325.089515
     cls.northing = 4350341.816966
     cls.patchMap = "patch_5m"
     cls.zoneMap = "hillslope"
     cls.hillslopeMap = "hillslope"
Beispiel #19
0
 def testPNGFileToConvertOdpToHTML(self):
   """Test run_generate method from odp with png to html.
    This test if returns good png files"""
   generate_result = self.proxy.run_generate('test_png.odp',
                                     encodestring(
                                     open(join('data', 'test_png.odp')).read()),
                                     None, 'html',
                                     'application/vnd.oasis.opendocument.presentation')
   response_code, response_dict, response_message = generate_result
   self.assertEquals(response_code, 200)
   self.assertEquals(type(response_dict), DictType)
   self.assertNotEquals(response_dict['data'], '')
   self.assertEquals(response_dict['mime'], 'application/zip')
   output_url = join(self.tmp_url, "zip.zip")
   open(output_url, 'w').write(decodestring(response_dict['data']))
   self.assertTrue(is_zipfile(output_url))
   zipfile = ZipFile(output_url)
   try:
     png_path = join(self.tmp_url, "img0.png")
     zipfile.extractall(self.tmp_url)
     content_type = self._getFileType(encodestring(open(png_path).read()))
     self.assertEquals(content_type, 'image/png')
     m = magic.Magic()
     self.assertTrue("8-bit/color RGB" in m.from_file(png_path))
   finally:
     zipfile.close()
   if exists(output_url):
     remove(output_url)
def automerge(params):
    print "Resourcepack Auto-Merge Begin"
    os.mkdir('myresourcepack')
    
    pack1 = ZipFile(params[0])
    pack2 = ZipFile(params[1])

    print "Extracting "+pack1.filename+"..."
    pack1.extractall('myresourcepack/')
    print "Extracting "+pack2.filename+"..."
    pack2.extractall('myresourcepack/')
    
    print "Creating pack.mcmeta file..."
    
    packMeta1 = getPackMeta(pack1)
    packMeta2 = getPackMeta(pack2)
    resultMeta = mergePackMeta(packMeta1,packMeta2)
    with open('myresourcepack/pack.mcmeta','w') as f:
        f.write(json.dumps(resultMeta))
    print "pack.mcmeta created"
    
    print "Creating final zip..."
    # Begin: Thanks to http://stackoverflow.com/a/3612455
    target_dir = 'myresourcepack'
    zip = ZipFile('myresourcepack.zip', 'w')
    rootlen = len(target_dir) + 1
    for base, dirs, files in os.walk(target_dir):
        for file in files:
            fn = os.path.join(base, file)
            zip.write(fn, fn[rootlen:])
    # End stackoverflow thing
    print "Auto-Merge done!"
def _unpack(archive_file):
    """Unpacks and extracts files from an archive

    This function will unpack and extra files from the file archive_file. It
    will return the directory to which the files were unpacked.

    An AttributeError is raised when the archive is not supported (when the
    name does not end with '.zip' or '.tar.gz')

    Returns string.
    """
    logger.info("Unpacking archive '{0}'".format(archive_file))
    if archive_file.endswith('.zip'):
        archive = ZipFile(archive_file)
        rootdir = archive.namelist()[0]
    elif archive_file.endswith('.tar.gz'):
        archive = tarfile.open(archive_file)
        rootdir = archive.getnames()[0]
    else:
        raise AttributeError("Unsupported archive. Can't unpack.")

    logger.info("Archive root folder is '{0}'".format(rootdir))

    try:
        shutil.rmtree(rootdir)
    except OSError:
        pass
    logger.info("Extracting to '{0}'".format(rootdir))
    archive.extractall()
    archive.close()
    return rootdir
Beispiel #22
0
 def load(self):
   """Creates one Temporary Document and write data into it.
   Return the url for the document.
   """
   # creates only the url of the file.
   file_path = tempfile.NamedTemporaryFile(suffix=".%s" % self.source_format,
                               dir=self.directory_name).name
   # stores the data in temporary file
   open(file_path, 'wb').write(self.original_data)
   # If is a zipfile is need extract all files from whitin the compressed file
   if is_zipfile(file_path):
     zipfile = ZipFile(file_path)
     zip_filename_list = zipfile.namelist()
     if 'mimetype' not in zip_filename_list and \
         '[Content_Types].xml' not in zip_filename_list:
       zipfile_path = file_path
       zipfile.extractall(path=self.directory_name)
       zipfile.close()
       filename_list = listdir(self.directory_name)
       if 'index.html' in filename_list:
         file_path = join(self.directory_name, 'index.html')
       else:
         mimetype_list = ['text/html', 'application/xhtml+xml']
         for filename in filename_list:
           if mimetypes.guess_type(filename)[0] in mimetype_list:
             file_path = join(self.directory_name, filename)
             break
       if zipfile_path != file_path:
         remove(zipfile_path)
   return file_path
    def archive_download(self, url, cache_path, checksum):
        rc = 0
        # Download
        arcfile = os.path.join(cache_path, os.path.basename(url))
        tdir = os.path.dirname(arcfile)
        filetool.directory(tdir)
        tfp = open(arcfile, "wb")
        #(fname, urlinfo) = urllib.urlretrieve(url, arcfile)
        urlobj = urllib.urlopen(url)
        assert urlobj.getcode() == 200, "Could not the download contrib archive: %s" % url
        hashobj = self.copy_and_hash(urlobj.fp, tfp)
        assert hashobj.hexdigest()==checksum, "Checksum of archive does not validate (should be: %s): %s" % (checksum, arcfile)
        urlobj.close()
        tfp.close()

        # Extract
        if url.endswith('.zip'):
            zipf = ZipFile(arcfile, 'r')
            zipf.extractall(tdir)
            zipf.close()
        else: # .tar, .tgz, .tar.gz, .tar.bz2
            tar = tarfile.open(arcfile)
            tar.extractall(tdir)
            tar.close

        # Eliminate archive top-dir
        _, archive_dirs, _ = os.walk(tdir).next()
        assert archive_dirs, "The downloaded archive is not in single top-dir format: %s" % arcfile
        archive_top = os.path.join(tdir, archive_dirs[0]) # just take the first dir entry
        for item in os.listdir(archive_top):
            shutil.move(os.path.join(archive_top, item), tdir)
        os.rmdir(archive_top)
        os.unlink(arcfile)

        return rc
Beispiel #24
0
def download_and_unzip(url, pth='./'):
    if not os.path.exists(pth):
        print('Creating the directory: {}'.format(pth))
        os.makedirs(pth)
    print('Attempting to download the file: ', url)
    file_name = os.path.join(pth, url.split('/')[-1])
    try:
        f, header = urlretrieve(url, file_name)
    except:
        msg = 'Cannot download file: {}'.format(url)
        raise Exception(msg)

    # Unzip the file, and delete zip file if successful.
    if 'zip' in os.path.basename(file_name) or 'exe' in os.path.basename(file_name):
        z = ZipFile(file_name)
        try:
            print('Extracting the zipfile...')
            z.extractall(pth)
        except:
            p = 'Could not unzip the file.  Stopping.'
            raise Exception(p)
        z.close()
    elif 'tar' in os.path.basename(file_name):
        ar = tarfile.open(file_name)
        ar.extractall(path=pth)
        ar.close()
    print('Deleting the zipfile...')
    os.remove(file_name)
    print('Done downloading and extracting...')
        def acs(file_num, geo_type_dir):
            base_url = 'http://www2.census.gov/acs2010_5yr/summaryfile/2006-2010_ACSSF_By_State_By_Sequence_Table_Subset/'
            path = os.path.join(
                state_name.replace(' ', ''), geo_type_dir,
                '20105%s%04d000.zip' % (state_abbr, file_num)
            )
            reader = remote_fallback_reader(
                '../data/acs2006_2010_5yr/summaryfile/2006-2010_ACSSF_By_State_By_Sequence_Table_Subset',
                base_url,
                path
            )

            z = ZipFile(reader)
            n = z.namelist()
            z.extractall('/tmp/')
            files = ['e20105%s%04d000.txt' % (state_abbr, file_num),
                'm20105%s%04d000.txt' % (state_abbr, file_num),]
            for f in files:
                z.extract(f, '/tmp/')

                # count the number of columns, to determine how many columns to fill
                firstline = open('/tmp/' + f).readline()
                if not firstline:
                    # some files are empty, so just continue to the next
                    os.unlink('/tmp/' + f)
                    continue
                cols = ['fileid', 'filetype', 'stusab', 'chariter', 'cifsn', 'logrecno']
                cols.extend(map(
                    lambda c: 'col%s' % str(c+1),
                    range(firstline.count(',')+1-6) # subtract 6 to account for the 6 geo header columns
                ))
                cursor.copy_from(open('/tmp/%s' % f), 'census_row', sep=',',
                    columns=cols)
                os.unlink('/tmp/' + f)
Beispiel #26
0
def main(args):
    manifest = args[1]
    print("creating work directory")
    workdir = tempfile.mkdtemp(prefix="work-dir", dir=".")
    print("extracting manifest")
    zip_file = ZipFile(manifest, "r")
    zip_file.extractall(workdir)
    zip_file = ZipFile(os.path.join(workdir, "consumer_export.zip"), "r")
    zip_file.extractall(workdir)

    print("reading products")
    products = glob.glob(os.path.join(workdir, "export", "products",
        "*.json"))

    content = []
    for product in products:
        print product
        content.extend(load_content(product))


    # These credentials should be good for a normal Katello/SAM deployment:
    conn = psycopg2.connect("dbname=canadianTenPin user=postgres")

    scan_content(conn, content)

    conn.commit()

    print("cleaning up")
    shutil.rmtree(workdir)
    print("all done.")
    def _copyZipDir(self, zipFilePath, inDirPath, path):
        '''
        Copy a directory from a ZIP archive to a filesystem directory

        @param zipFilePath: string
            The path of the ZIP archive
        @param inDirPath: string
            The path to the file in the ZIP archive
        @param path: string
            Destination path where the ZIP directory is copied
        '''
        # make sure the ZIP file path is normalized and uses the OS separator
        zipFilePath = normOSPath(zipFilePath)
        # make sure the ZIP file path is normalized and uses the ZIP separator
        inDirPath = normZipPath(inDirPath)
        zipFile = ZipFile(zipFilePath)
        if isdir(path):
            if self._isSyncFile(zipFilePath, path):
                return
            rmtree(path)
        entries = [ent for ent in zipFile.namelist() if ent.startswith(inDirPath)]
        tmpDir = TemporaryDirectory()
        zipFile.extractall(tmpDir.name, entries)
        tmpDirPath = join(tmpDir.name, normOSPath(inDirPath))
        os.makedirs(path)
        for entry in os.listdir(tmpDirPath):
            move(join(tmpDirPath, entry), path)
Beispiel #28
0
def ImportTfZip( config = None, ConfigSet = "ImportTfZip" ):
    '''
    Imports events and config from file (or stdin)
    @param config: The config object to use. If None, then we try to use defaults or stdin
    @param ConfigSet: The section of the configuration to use
    @raise KeyError: If supplied ConfigSet is not in config
    @return: (weeksums, config) 
    '''  

    # fetch config values
    ZipDataDir = config.get( ConfigSet, "ZipDataDir" ).rstrip("/\\")
    ZipFilename = config.get( ConfigSet, "ZipFile" )

    zf = ZipFile( ZipFilename )
    zf.extractall( ZipDataDir )

    ImportTfSection = "ImportTf" 
    SheetName = config.get( ImportTfSection, "Sheetname" )
    AllEvents = []
    for DataFile in zf.namelist():    
        config.set( ImportTfSection, "InFile", "%s/%s"%(ZipDataDir, DataFile) )
        config.set( ImportTfSection, "CsvFile", "%s/%s.%s.%s"%(ZipDataDir, DataFile,SheetName,u'csv') )

        m = re.findall( "[0-9]{4}", DataFile )
        if m:
            config.set( ImportTfSection, "Year", m[0] )
        
        try:
            events, config = ImportTf( config, ImportTfSection )
            AllEvents += events
        except ValueError, e :
            # no events
            sys.stderr.write( "%s\n"% e.message )            
Beispiel #29
0
def unzip_snap_mp4(abspath, quiet=False):
    zipped_snap = ZipFile(abspath)

    # unzip /path/to/zipfile.mp4 to /path/to/zipfile
    unzip_dir = os.path.splitext(abspath)[0]
    zipped_snap.extractall(unzip_dir)

    # move /path/to/zipfile.mp4 to /path/to/zipfile.zip
    os.rename(abspath, unzip_dir + '.zip')

    for f in os.listdir(unzip_dir):
        # mv /path/to/zipfile/media~* /path/to/zipfile.mp4
        if f.split('~')[0] == 'media':
            os.rename(os.path.join(unzip_dir, f), unzip_dir + '.mp4')

        # mv /path/to/zipfile/overlay~* /path/to/zipfile_overlay.png
        elif f.split('~')[0] == 'overlay':
            os.rename(os.path.join(unzip_dir, f),
                      unzip_dir + '_overlay.png')

    try:
        os.rmdir(unzip_dir)
    except OSError:
        print('Something other than a video or overlay was in {0}. \
               Cannot remove directory, not empty.'
              .format(unzip_dir + '.zip'))

    if not quiet:
        print('Unzipped {0}'.format(abspath))
Beispiel #30
0
def extract_then_load(pk, zfilepath, test_method=None):
    strip_excess = False
    stripz_output = None
    messages = []

    #: open zip file and get paths
    arcpy.AddMessage('uploaded {}.'.format(zfilepath))
    zfile = ZipFile(zfilepath)
    zfilenames = zfile.namelist()
    zfile_exts = [name.split('.')[1] for name in zfilenames]
    zfile_name = zfilenames[0].split('.')[0]
    zfile_folder = join(arcpy.env.scratchFolder, zfile_name)
    shapefile = join(zfile_folder, zfile_name + '.shp')

    arcpy.AddMessage('verify that all files are present')
    #: verify that all files are present
    for ext in required_files:
        if ext not in zfile_exts:
            raise Exception('Missing .{} file'.format(ext))

    zfile.extractall(zfile_folder)

    arcpy.AddMessage('validating geometry')
    #: validate geometry
    checkgeom_output = 'in_memory/checkgeometry'
    arcpy.CheckGeometry_management(shapefile, checkgeom_output)

    if int(arcpy.GetCount_management(checkgeom_output).getOutput(0)) > 0:
        with arcpy.da.SearchCursor(checkgeom_output, ['PROBLEM']) as scur:
            arcpy.AddError('Geometry Error: {}'.format(scur.next()[0]))
            raise Exception('Geometry Error: {}'.format(scur.next()[0]))

    arcpy.AddMessage('validating geometry type')
    #: validate geometry type for category
    described = arcpy.Describe(shapefile)

    if described.shapeType != 'Polygon':
        arcpy.AddError(
            'Incorrect shape type of {}. Fire perimeters are polygons.'.format(
                described.shapeType))
        raise Exception(
            'Incorrect shape type of {}. Fire perimeters are polygons.'.format(
                described.shapeType))

    if described.hasZ or described.hasM:
        strip_excess = True

    arcpy.AddMessage('reprojecting if necessary')
    #: reproject if necessary
    reprojected_fc = None
    input_sr = described.spatialReference
    if input_sr.name != utm.name:
        #: Project doesn't support the in_memory workspace
        arcpy.AddMessage('Reprojected data from {} to {}'.format(
            input_sr.factoryCode, utm.factoryCode))
        messages.append('Reprojected data from {} to {}'.format(
            input_sr.factoryCode, utm.factoryCode))
        reprojected_fc = '{}/project'.format(arcpy.env.scratchGDB)
        shapefile = arcpy.Project_management(shapefile, reprojected_fc, utm)

    arcpy.AddMessage('Removing m and z if necessary')
    if strip_excess and input_sr.name == utm.name:
        arcpy.AddMessage('Removing m and z')

        stripz_output = 'in_memory/stripz'
        shapefile = arcpy.management.CopyFeatures(shapefile, stripz_output)

    arcpy.AddMessage('unioning all shapes')
    #: union all shapes in shapefile
    mergedGeometry = None
    features = 0
    with arcpy.da.SearchCursor(shapefile, ['SHAPE@']) as scur:
        for shape, in scur:
            features = features + 1
            if mergedGeometry is None:
                mergedGeometry = shape
                continue

            mergedGeometry = mergedGeometry.union(shape)

    if features == 0:
        arcpy.AddError('Shapefile is empty')
        raise Exception('Shapefile is empty')

    if features > 1:
        arcpy.AddMessage('Unioned {} features into one.'.format(features))
        messages.append('Unioned {} features into one.'.format(features))

    arcpy.AddMessage('cleaning up temp data')
    #: delete temp data
    if reprojected_fc is not None and arcpy.Exists(reprojected_fc):
        arcpy.Delete_management(reprojected_fc)

    if stripz_output is not None and arcpy.Exists(stripz_output):
        arcpy.Delete_management(stripz_output)

    if arcpy.Exists(zfile_folder):
        arcpy.Delete_management(zfile_folder)

    arcpy.AddMessage('inserting geometry')
    #: insert geometry into database
    db_method = store_geometry_for
    if test_method is not None:
        db_method = test_method

    status, message = db_method(pk, mergedGeometry.WKT)
    arcpy.AddMessage('db response {}, {}'.format(status, message))

    if message is not None:
        messages.append(message)

    return (status, messages)
    def execute_task(self, task):
        # Note that this task is not thread safe: at most one can be executed at a time

        # Create temp folder
        with tempfile.TemporaryDirectory() as tmpdir:
            # Unzip
            try:
                with task.zip_file.open('rb') as fp:
                    submission = ZipFile(fp)
                    submission.extractall(tmpdir)
            except:
                return self.TaskResult.error('Failed to unzip provided file',
                                             traceback.format_exc())

            # Rewrite big files with a warning (GitHub accepts up to 100MiB, but we will use a much lower limit of 1MiB
            # to make sure the full repo won't too big either)
            for root, dirs, files in os.walk(tmpdir):
                for name in files:
                    file_path = os.path.join(root, name)
                    size_MiB = os.path.getsize(file_path) / 1024 / 1024
                    if size_MiB > 1:
                        logger.warning(
                            f'File {file_path} is too big ({size_MiB:.1f}MiB)')
                        with open(file_path, 'w') as fp:
                            fp.write(
                                f'!!! The original file was too large !!!\nPlease find the complete ZIP on the platform'
                            )

            # Prepare commands
            competitor = task.competitor
            environment = competitor.environment
            submitter = competitor.submitter
            destination = f'data/publish_repo/{environment.slug}/{competitor.name}'
            commit_message = f'Publish {competitor.name} v{task.version_number} for {environment.name} by {submitter.username}'
            cmds = [
                ['git', '-C', 'data/publish_repo', 'fetch'],
                [
                    'git', '-C', 'data/publish_repo', 'reset', '--hard',
                    'origin/master'
                ],
                ['rm', '-rf', destination],
                ['mkdir', '-p', destination],
                ['cp', '-r', tmpdir + '/.', destination],
                ['git', '-C', 'data/publish_repo', 'add', '-A'],
                [
                    'git', '-C', 'data/publish_repo', 'commit',
                    '--allow-empty', '-m', commit_message
                ],
                ['git', '-C', 'data/publish_repo', 'push'],
            ]

            # Run them sequentially
            logs = ''
            for cmd in cmds:
                status, extra_logs = run_shell(cmd)
                logs += f'$ {cmd}\n{extra_logs}'
                if status != 0:
                    return self.TaskResult.error(
                        f'Operation failed with status {status}', logs)
            _, commit = run_shell(
                ['git', '-C', 'data/publish_repo', 'rev-parse', 'HEAD'])
            task.publish_url = f'{PUBLISHER_WEB_URL}/blob/{commit.strip()}/{environment.slug}/{competitor.name}/player.py'
            return self.TaskResult.success(logs)
Beispiel #32
0
    def POST_AUTH(self, courseid, taskid):  # pylint: disable=arguments-differ
        """ Edit a task """
        if not id_checker(taskid) or not id_checker(courseid):
            raise Exception("Invalid course/task id")

        course, _ = self.get_course_and_check_rights(courseid,
                                                     allow_all_staff=False)
        data = web.input(task_file={})

        # Delete task ?
        if "delete" in data:
            self.task_factory.delete_task(courseid, taskid)
            if data.get("wipe", False):
                self.wipe_task(courseid, taskid)
            raise web.seeother("/admin/" + courseid + "/tasks")

        # Else, parse content
        try:
            try:
                task_zip = data.get("task_file").file
            except:
                task_zip = None
            del data["task_file"]

            problems = self.dict_from_prefix("problem", data)
            limits = self.dict_from_prefix("limits", data)

            data = {
                key: val
                for key, val in data.items() if not key.startswith("problem")
                and not key.startswith("limits")
            }
            del data["@action"]

            if data["@filetype"] not in self.task_factory.get_available_task_file_extensions(
            ):
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    "Invalid file type: {}".format(str(data["@filetype"]))
                })
            file_ext = data["@filetype"]
            del data["@filetype"]

            if problems is None:
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    "You cannot create a task without subproblems"
                })

            # Order the problems (this line also deletes @order from the result)
            data["problems"] = OrderedDict([
                (key, self.parse_problem(val))
                for key, val in sorted(iter(problems.items()),
                                       key=lambda x: int(x[1]['@order']))
            ])
            data["limits"] = limits
            if "hard_time" in data["limits"] and data["limits"][
                    "hard_time"] == "":
                del data["limits"]["hard_time"]

            # Weight
            try:
                data["weight"] = float(data["weight"])
            except:
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    "Grade weight must be a floating-point number"
                })

            # Groups
            if "groups" in data:
                data["groups"] = True if data["groups"] == "true" else False

            # Submision storage
            if "store_all" in data:
                try:
                    stored_submissions = data["stored_submissions"]
                    data["stored_submissions"] = 0 if data[
                        "store_all"] == "true" else int(stored_submissions)
                except:
                    return json.dumps({
                        "status":
                        "error",
                        "message":
                        "The number of stored submission must be positive!"
                    })

                if data["store_all"] == "false" and data[
                        "stored_submissions"] <= 0:
                    return json.dumps({
                        "status":
                        "error",
                        "message":
                        "The number of stored submission must be positive!"
                    })
                del data['store_all']

            # Submission limits
            if "submission_limit" in data:
                if data["submission_limit"] == "none":
                    result = {"amount": -1, "period": -1}
                elif data["submission_limit"] == "hard":
                    try:
                        result = {
                            "amount": int(data["submission_limit_hard"]),
                            "period": -1
                        }
                    except:
                        return json.dumps({
                            "status":
                            "error",
                            "message":
                            "Invalid submission limit!"
                        })

                else:
                    try:
                        result = {
                            "amount": int(data["submission_limit_soft_0"]),
                            "period": int(data["submission_limit_soft_1"])
                        }
                    except:
                        return json.dumps({
                            "status":
                            "error",
                            "message":
                            "Invalid submission limit!"
                        })

                del data["submission_limit_hard"]
                del data["submission_limit_soft_0"]
                del data["submission_limit_soft_1"]
                data["submission_limit"] = result

            # Accessible
            if data["accessible"] == "custom":
                data["accessible"] = "{}/{}".format(data["accessible_start"],
                                                    data["accessible_end"])
            elif data["accessible"] == "true":
                data["accessible"] = True
            else:
                data["accessible"] = False
            del data["accessible_start"]
            del data["accessible_end"]

            # Checkboxes
            if data.get("responseIsHTML"):
                data["responseIsHTML"] = True

            # Network grading
            data["network_grading"] = "network_grading" in data
        except Exception as message:
            return json.dumps({
                "status":
                "error",
                "message":
                "Your browser returned an invalid form ({})".format(
                    str(message))
            })

        # Get the course
        try:
            course = self.course_factory.get_course(courseid)
        except:
            return json.dumps({
                "status":
                "error",
                "message":
                "Error while reading course's informations"
            })

        # Get original data
        try:
            orig_data = self.task_factory.get_task_descriptor_content(
                courseid, taskid)
            data["order"] = orig_data["order"]
        except:
            pass

        directory_path = self.task_factory.get_directory_path(courseid, taskid)
        try:
            WebAppTask(course, taskid, data, directory_path,
                       self.plugin_manager)
        except Exception as message:
            return json.dumps({
                "status":
                "error",
                "message":
                "Invalid data: {}".format(str(message))
            })

        if not os.path.exists(directory_path):
            os.mkdir(directory_path)

        if task_zip:
            try:
                zipfile = ZipFile(task_zip)
            except Exception as message:
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    "Cannot read zip file. Files were not modified"
                })

            try:
                zipfile.extractall(directory_path)
            except Exception as message:
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    "There was a problem while extracting the zip archive. Some files may have been modified"
                })

        self.task_factory.delete_all_possible_task_files(courseid, taskid)
        self.task_factory.update_task_descriptor_content(
            courseid, taskid, data, force_extension=file_ext)

        return json.dumps({"status": "ok"})
Beispiel #33
0
def zdsplode(name, verbose=False):
    start_dir = os.path.abspath(os.getcwd())

    # Match on the filename
    # [base].[ext]
    # where ext is one of zip, tar, gz, tgz, tar.gz, or tar.bz2
    m = re.match(
        r'^(?P<base>.*?)[.](?P<ext>zip|tar|tgz|tar\.gz|tar\.bz2|tar\.bz|gz)$',
        name)
    if not m:
        # Not a compressed file that we're going to try to extract
        return

    if verbose:
        print('Extracting {}'.format(name))

    base, ext = m.groups()

    try:
        if ext == 'zip':
            cfile = ZipFile(name)
        elif ext == 'gz':
            cfile = gzip.open(name, 'r')
        else:
            cfile = tarfile.open(name, 'r:*')
    except (IOError, tarfile.ReadError, BadZipfile):
        print('Error reading file for extraction {}'.format(name))
        return

    try:
        # extract to a dir of its own to start with.
        extract_dir = datetime.datetime.now().isoformat()
        if ext == 'gz':
            os.makedirs(extract_dir)
            f = open(os.path.join(extract_dir, base), 'wb')
            chunk = 1024 * 8
            buff = cfile.read(chunk)
            while buff:
                f.write(buff)
                buff = cfile.read(chunk)
            f.close()
        else:
            cfile.extractall(extract_dir)
    except OSError:
        print('Error extracting {}'.format(name))
        return
    finally:
        cfile.close()

    # If there's no directory at all, then it was probably an empty archive
    if not os.path.isdir(extract_dir):
        return

    try:
        extract_files = os.listdir(extract_dir)
        if len(extract_files) == 1 and extract_files[0] == base:
            # If there's only one file/dir in the dir, and that file/dir
            # matches the base name of the archive, move the file/dir back one
            # into the parent dir and remove the extract directory.
            # The classic tar.gz -> dir and txt.gz -> file cases.
            shutil.move(os.path.join(extract_dir, extract_files[0]), start_dir)
            shutil.rmtree(extract_dir)

            # Set the name of the extracted dir for recursive decompression
            extract_dir = extract_files[0]
        else:
            # If there's more than one file in the dir, or if that file/dir
            # doesn't match the base name of the archive rename the extract dir
            # to the basename of the archive.
            # The 'barfing files all over pwd' case, the 'archive contains
            # var/log/blah/blah' case, and the 'archive contains a single,
            # differently named file' case.
            shutil.move(os.path.join(extract_dir), base)

            # Set the name of the extracted dir for recursive decompression
            extract_dir = base
    except shutil.Error as e:
        print('Error arranging directories:')
        print(e)
        return

    # See if there's anything left to do
    if not os.path.isdir(extract_dir):
        return

    # Get a list of files for recursive decompression
    sub_files = os.listdir(extract_dir)

    # Extract anything compressed that this archive had in it.
    os.chdir(extract_dir)
    for sub_file in sub_files:
        zdsplode(sub_file)
    os.chdir(start_dir)
Beispiel #34
0
    def restore(self, name: dict):
        if not name:
            print(setColorText("Name empty!", Colors.YELLOW))
            return

        bkName = name.get("bkname", "HyperUBot")
        bkSource = name.get("source")
        bkName += ".hbotbk"

        if not os.path.exists(bkSource):
            print(
                setColorText(f"{bkSource}: No such file or directory",
                             Colors.YELLOW))
            return

        try:
            contents = None
            userbot = [
                "userbot/", "userbot/__init__.py", "userbot/__main__.py"
            ]
            bkZIP = ZipFile(bkSource, "r")
            contents = bkZIP.namelist()
            lpfile = None
            for x in contents:
                if x == "list.paths":
                    lpfile = bkZIP.read(x)
                    break
            list_paths = self.__getListPaths(lpfile)
            result = 0
            for y in contents:
                for uname in userbot:
                    if uname == y:
                        result += 1
            if not self.__comparePaths(contents, list_paths) or \
               not result == len(userbot):
                print(
                    setColorText(f"{bkSource}: Invalid archive format!",
                                 Colors.RED))
                return

            try:
                print("Removing current environment...")
                self._remove(self._list_dirs("."),
                             [os.path.join(".", RECOVERY_NAME)])
            except Exception as e:
                print(
                    setColorText(f"Failed to remove current environment: {e}",
                                 Colors.RED))
                return

            print("Restoring backup archive...")
            bkZIP.extractall(
                path=".",
                members=[z for z in contents if not z == "list.paths"])
            bkZIP.close()
            print(setColorText("Restore completed.", Colors.GREEN))
        except BadZipFile as bze:
            print(setColorText(f"Bad zip archive: {bze}", Colors.RED))
        except LargeZipFile as lze:
            print(
                setColorText(f"Zip archive too large (>64): {lze}",
                             Colors.RED))
        except Exception as e:
            print(
                setColorText(f"Failed to restore backup archive: {e}",
                             Colors.RED))
        return
Beispiel #35
0
    def processGuseStatus(self, wfidsDict, RemoteAPIPassword, gUSEurl):        
        """ Process the status of the workflow, if running call processDetailsinfo, if finished download results using the RemoteAPI 
        
        Arguments:        
        wfidsDict -- dict of folderNumber:workflow ID to loop through
        RemoteAPIPassword -- the RemoteAPI password as defined on the gUSE server
        gUSEurl -- the URL of the gUSE server
        
        Process the status for each workflow. Status is what is returend by RemoteApiDetailsInfo (called returnVal there)
        Based  on this value, if the workflow is running call processDetailsInfo, if it is finished call the RemoteApi 'download' method.
        Once finished remove id from wfidsDict, this method runs until this list is empty
        """

        # create group_id so all insersions and analysis can be tracked
        group_id = datetime.datetime.now().strftime("%Y%m%d-%H%M%S%Z-") + str(random.random())
        # folders to write the result file to (in the beginning the same as number of workflows)
        while True:
            # loop through all folder numbers of active workflows
            for folderNumber in wfidsDict.keys():
                # get the workflow ID for that folder number (folder number will be seen in 'instance_X')
                wfid = wfidsDict[folderNumber]
                # call RemoteAPI m='detailsinfo' for the workflow with wfid
                try:
                    wfstatus = self.RemoteApiDetailsInfo(gUSEurl, RemoteAPIPassword, wfid)
                except Exception as e:
                    print e                    
                    wfstatus = -1
                
                # if there is an exception make wfstatus = -1, then wait for 15 seconds and continue
                # this was a workaround to prevent clogging the gUSE server
                if wfstatus == -1:
                    sleep(15)
                    continue;
                
                # get a new timestamp of when the workflow finished according to the Raccoon2 UI (small delay)
                self.timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
                # if there were no errors with RemoteAPI m='detailsinfo', process the result it returned
                wfstate = self.processDetailsinfo(wfstatus, str(folderNumber), self.app.engine.guseNumberOfInstances.get())
                
                # value returned: 0 - suspended, or not valid data.
                if wfstate == 0:
                    # remove the key-value (folderNumber-wfid) pair for that folder number from the dict and start over looping through the rest
                    del wfidsDict[folderNumber]
                    continue
                
                # value returned: 1 - finished, or error.
                elif wfstate == 1:
                    # download the results into this file
                    resultFileName = self.resultFolderName + os.sep + "instance_" + str(folderNumber) + os.sep + "gUSE-cloud-vina-results.zip"
                    # call RemoteAPI m='download'
                    self.RemoteApiDownload(gUSEurl, RemoteAPIPassword, wfid, resultFileName)
                    
                    # open the  returned zip file and extract only the Vina output files which are stored in "output.zip"
                    # store the extracted zip in a temporary folder which will be deleted at the end
                    z = ZipFile(self.resultFolderName + os.sep + "instance_" + str(folderNumber) + os.sep + 'gUSE-cloud-vina-results.zip', 'r')
                    temporary_folder = "res_" + self.timestamp
                    os.mkdir(temporary_folder)
                    z.extractall(temporary_folder, filter(lambda f: f.endswith('output.zip'), z.namelist()))
                    output_zip_name_path = ""
                    for name in z.namelist():
                        if name.endswith("output.zip"):
                            output_zip_name_path = name
                    z.close()
                    # in case nothing is downloaded - *could be done more efficiently
                    if output_zip_name_path == "":
                        print "Error in downloading results or in the job"
                    # if no error    
                    else:
                        # open the "output.zip" where the results are stored
                        z = ZipFile(temporary_folder + os.sep + output_zip_name_path)
                        # this is the new folder where the results will be extracted into, with the new timestamp
                        results_folder = self.resultFolderName + os.sep + "instance_" + str(folderNumber) + os.sep + "gUSE-cloud-vina-results-" + self.timestamp
                        os.mkdir(results_folder)
                        # extract only files ending with .pdbqt_log.txt or .pdbqt
                        z.extractall(results_folder, filter(lambda f: f.endswith(('.pdbqt_log.txt', '.pdbqt')), z.namelist()))
                        z.close()
                        # remove "temporary_folder" and all its empty and non-empty sub-folders
#                         os.remove(temporary_folder + os.sep + output_zip_name_path)
#                         last_separator = output_zip_name_path.rfind(os.sep)
#                         os.removedirs(temporary_folder + os.sep + output_zip_name_path[0:last_separator])
                                   
                    # maybe remove all input files including results.zip too (*leave for now*)    
                    #os.remove("../ligands.zip")
                    #os.remove("../receptors.zip")
                    #os.remove("../conf.txt")
                    #os.remove("../output_names.txt")
                    #os.remove("../certs.zip")
                    
                    # once downloaded, remove the element from the dict and continue the loop    
                    del wfidsDict[folderNumber]
                    continue
                # start processing the next workflow in 5 seconds
                sleep(5)                          
            if not wfidsDict:
                # print when all workflows have been processed
                currentTime = datetime.datetime.now()
                timeDifference = currentTime - self.startTime
                self.queue.put("VS experiment finished: " + str(timeDifference))
                print "VS experiment finished: " + str(timeDifference)
                break
            else:
                # restart getting details for all workflows in 15 seconds        
                sleep(15)
import shutil
import os

try:
    from urllib.request import urlretrieve
except ImportError:
    from urllib import urlretrieve


class ProgressReporter:
    def __init__(self):
        self.progress = None

    def __call__(self, block_count, block_size, total):
        progress = int((block_count * block_size * 100) / total)
        progress = int(progress / 20) * 20
        if progress != self.progress:
            print("{}%".format(progress))
            self.progress = progress


url = "http://www.steinberg.net/sdk_downloads/vstsdk366_27_06_2016_build_61.zip"  # noqa
urlretrieve(url, "Steinberg.zip", ProgressReporter())

zip = ZipFile('Steinberg.zip')
zip.extractall()

shutil.rmtree("Steinberg", ignore_errors=True)
os.rename("VST3 SDK", "Steinberg")
shutil.rmtree("Steinberg.zip", ignore_errors=True)
Beispiel #37
0
    def POST_AUTH(self, courseid, taskid):  # pylint: disable=arguments-differ
        """ Edit a task """
        if not id_checker(taskid) or not id_checker(courseid):
            raise Exception("Invalid course/problem id")

        course, _ = self.get_course_and_check_rights(courseid,
                                                     allow_all_staff=False)
        data = web.input(problem_file={}, task_file={})
        previous_courseid = courseid

        # Task exists?
        try:
            self.task_factory.get_task(course, taskid)
        except:
            # If doesn't, use bank instead
            courseid = self.bank_name
            course = self.course_factory.get_course(courseid)

        # Delete task ?
        if "delete" in data:
            try:
                self.task_factory.delete_task(courseid, taskid)
            except:
                raise web.seeother("/admin/" + previous_courseid + "/tasks")
            if data.get("wipe", False):
                self.wipe_task(courseid, taskid)
            raise web.seeother("/admin/" + previous_courseid + "/tasks")

        # Else, parse content
        try:
            task_zip = data.get("task_file").file
        except:
            task_zip = None
        del data["task_file"]

        try:
            problem_file = data.get('problem_file')
        except Exception as e:
            problem_file = None
        del data["problem_file"]

        problems = self.dict_from_prefix("problem", data)
        limits = self.dict_from_prefix("limits", data)

        data = {
            key: val
            for key, val in data.items()
            if not key.startswith("problem") and not key.startswith("limits")
        }
        del data["@action"]

        if data["@filetype"] not in self.task_factory.get_available_task_file_extensions(
        ):
            return json.dumps({
                "status":
                "error",
                "message":
                "Invalid file type: {}".format(str(data["@filetype"]))
            })
        file_ext = data["@filetype"]
        del data["@filetype"]
        if problems is None:
            problems = {
                '1': {
                    "type": "code-file",
                    "header": "",
                    "allowed_exts": ".py"
                }
            }
            #return json.dumps({"status": "error", "message": "You cannot create a task without subproblems"})

        # Order the problems (this line also deletes @order from the result)
        data["problems"] = OrderedDict([(key, self.parse_problem(val))
                                        for key, val in problems.items()])
        data["limits"] = limits
        data["limits"]["time"] = 30
        if "hard_time" in data["limits"] and data["limits"]["hard_time"] == "":
            del data["limits"]["hard_time"]

        if problem_file is not None:
            self.upload_pfile(
                courseid, taskid, "public/" + taskid + ".pdf",
                problem_file.file
                if not isinstance(problem_file, str) else problem_file)

        run = os.path.join(self.task_factory._tasks_directory, "run", "run")
        if os.path.exists(run):
            with open(run) as f:
                self.upload_pfile(courseid, taskid, "run", f)
            wanted_path = self.verify_path(courseid, taskid, "run", True)
            os.system("sed -i -e 's/REPLACEWITHTIME/" +
                      str(data["limits"]["real_time"]) + "/g' " + wanted_path)

        # Difficulty
        try:
            data["difficulty"] = int(data["difficulty"])
            if not (data["difficulty"] > 0 and data["difficulty"] <= 5):
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    "Difficulty level must be between 1 and 10"
                })
        except:
            return json.dumps({
                "status":
                "error",
                "message":
                "Difficulty level must be an integer number"
            })

        # Name
        if len(data["name"]) == 0:
            return json.dumps({
                "status": "error",
                "message": "Field 'name' must have non-empty."
            })

        # Weight
        try:
            data["weight"] = 1.0
        except:
            return json.dumps({
                "status":
                "error",
                "message":
                "Grade weight must be a floating-point number"
            })

        try:
            data["authenticity_percentage"] = float(
                data["authenticity_percentage"])
        except:
            return json.dumps({
                "status":
                "error",
                "message":
                "Authenticity percentage must be a floating-point number"
            })

        # Groups
        if "groups" in data:
            data["groups"] = True if data["groups"] == "true" else False

        # Submission storage
        if "store_all" in data:
            try:
                stored_submissions = data["stored_submissions"]
                data["stored_submissions"] = 0 if data[
                    "store_all"] == "true" else int(stored_submissions)
            except:
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    "The number of stored submission must be positive!"
                })

            if data["store_all"] == "false" and data["stored_submissions"] <= 0:
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    "The number of stored submission must be positive!"
                })
            del data['store_all']

        # Submission limits
        if "submission_limit" in data:
            if data["submission_limit"] == "none":
                result = {"amount": -1, "period": -1}
            elif data["submission_limit"] == "hard":
                try:
                    result = {
                        "amount": int(data["submission_limit_hard"]),
                        "period": -1
                    }
                except:
                    return json.dumps({
                        "status": "error",
                        "message": "Invalid submission limit!"
                    })

            else:
                try:
                    result = {
                        "amount": int(data["submission_limit_soft_0"]),
                        "period": int(data["submission_limit_soft_1"])
                    }
                except:
                    return json.dumps({
                        "status": "error",
                        "message": "Invalid submission limit!"
                    })

            del data["submission_limit_hard"]
            del data["submission_limit_soft_0"]
            del data["submission_limit_soft_1"]
            data["submission_limit"] = result

        data["accessible"] = True

        # Checkboxes
        if data.get("responseIsHTML"):
            data["responseIsHTML"] = True

        # Network grading
        data["network_grading"] = "network_grading" in data
        data["code_analysis"] = "code_analysis" in data
        if data["code_analysis"] and os.path.exists(run):
            os.system("sed -i -e 's/#STATIC//g' " + wanted_path)
        # Get the course
        try:
            course = self.course_factory.get_course(courseid)
        except:
            return json.dumps({
                "status":
                "error",
                "message":
                "Error while reading course's informations"
            })

        # Get original data
        try:
            orig_data = self.task_factory.get_task_descriptor_content(
                courseid, taskid)
            data["order"] = orig_data["order"]
        except:
            pass

        directory_path = self.task_factory.get_directory_path(courseid, taskid)
        try:
            WebAppTask(course, taskid, data, directory_path,
                       self.plugin_manager)
        except Exception as message:
            return json.dumps({
                "status":
                "error",
                "message":
                "Invalid data: {}".format(str(message))
            })

        if not os.path.exists(directory_path):
            os.mkdir(directory_path)

        if task_zip:
            try:
                zipfile = ZipFile(task_zip)
            except Exception as message:
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    "Cannot read zip file. Files were not modified"
                })

            try:
                zipfile.extractall(directory_path)
            except Exception as message:
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    "There was a problem while extracting the zip archive. Some files may have been modified"
                })

        self.task_factory.delete_all_possible_task_files(courseid, taskid)
        self.task_factory.update_task_descriptor_content(
            courseid, taskid, data, force_extension=file_ext)

        return json.dumps({"status": "ok"})
Beispiel #38
0
def extract_data():
	zf = ZipFile('covid-world-vaccination-progress.zip')
	zf.extractall()
	zf.close()
Beispiel #39
0
def unzip(zip_uri, is_url, clone_to_dir='.', no_input=False, password=None):
    """Download and unpack a zipfile at a given URI.

    This will download the zipfile to the cookiecutter repository,
    and unpack into a temporary directory.

    :param zip_uri: The URI for the zipfile.
    :param is_url: Is the zip URI a URL or a file?
    :param clone_to_dir: The cookiecutter repository directory
        to put the archive into.
    :param no_input: Suppress any prompts
    :param password: The password to use when unpacking the repository.
    """
    # Ensure that clone_to_dir exists
    clone_to_dir = os.path.expanduser(clone_to_dir)
    make_sure_path_exists(clone_to_dir)

    if is_url:
        # Build the name of the cached zipfile,
        # and prompt to delete if it already exists.
        identifier = zip_uri.rsplit('/', 1)[1]
        zip_path = os.path.join(clone_to_dir, identifier)

        if os.path.exists(zip_path):
            download = prompt_and_delete(zip_path, no_input=no_input)
        else:
            download = True

        if download:
            # (Re) download the zipfile
            r = requests.get(zip_uri, stream=True)
            with open(zip_path, 'wb') as f:
                for chunk in r.iter_content(chunk_size=1024):
                    if chunk:  # filter out keep-alive new chunks
                        f.write(chunk)
    else:
        # Just use the local zipfile as-is.
        zip_path = os.path.abspath(zip_uri)

    # Now unpack the repository. The zipfile will be unpacked
    # into a temporary directory
    try:
        zip_file = ZipFile(zip_path)

        if len(zip_file.namelist()) == 0:
            raise InvalidZipRepository(
                'Zip repository {} is empty'.format(zip_uri))

        # The first record in the zipfile should be the directory entry for
        # the archive. If it isn't a directory, there's a problem.
        first_filename = zip_file.namelist()[0]
        if not first_filename.endswith('/'):
            raise InvalidZipRepository('Zip repository {} does not include '
                                       'a top-level directory'.format(zip_uri))

        # Construct the final target directory
        project_name = first_filename[:-1]
        unzip_base = tempfile.mkdtemp()
        unzip_path = os.path.join(unzip_base, project_name)

        # Extract the zip file into the temporary directory
        try:
            zip_file.extractall(path=unzip_base)
        except RuntimeError:
            # File is password protected; try to get a password from the
            # environment; if that doesn't work, ask the user.
            if password is not None:
                try:
                    zip_file.extractall(path=unzip_base,
                                        pwd=password.encode('utf-8'))
                except RuntimeError:
                    raise InvalidZipRepository(
                        'Invalid password provided for protected repository')
            elif no_input:
                raise InvalidZipRepository(
                    'Unable to unlock password protected repository')
            else:
                retry = 0
                while retry is not None:
                    try:
                        password = read_repo_password('Repo password')
                        zip_file.extractall(path=unzip_base,
                                            pwd=password.encode('utf-8'))
                        retry = None
                    except RuntimeError:
                        retry += 1
                        if retry == 3:
                            raise InvalidZipRepository(
                                'Invalid password provided '
                                'for protected repository')

    except BadZipFile:
        raise InvalidZipRepository(
            'Zip repository {} is not a valid zip archive:'.format(zip_uri))

    return unzip_path
Beispiel #40
0
def lambda_handler(event, context):
    logger.info('Event %s', event)
    OAUTH_token = event['context']['git-token']
    OutputBucket = event['context']['output-bucket']
    # temp_archive = '/tmp/archive.zip'
    # Identify git host flavour
    hostflavour = 'generic'
    if 'X-Hub-Signature' in event['params']['header'].keys():
        hostflavour = 'githubent'
    elif 'X-Gitlab-Event' in event['params']['header'].keys():
        hostflavour = 'gitlab'
    elif 'User-Agent' in event['params']['header'].keys():
        if event['params']['header']['User-Agent'].startswith(
                'Bitbucket-Webhooks'):
            hostflavour = 'bitbucket'
        elif event['params']['header']['User-Agent'].startswith(
                'GitHub-Hookshot'):
            hostflavour = 'github'
    headers = {}
    branch = 'master'
    if hostflavour == 'githubent':
        archive_url = event['body-json']['repository']['archive_url']
        owner = event['body-json']['repository']['owner']['name']
        name = event['body-json']['repository']['name']
        # replace the code archive download and branch reference placeholders
        archive_url = archive_url.replace('{archive_format}',
                                          'zipball').replace(
                                              '{/ref}', '/master')
        # add access token information to archive url
        archive_url = archive_url + '?access_token=' + OAUTH_token
    elif hostflavour == 'github':
        archive_url = event['body-json']['repository']['archive_url']
        owner = event['body-json']['repository']['owner']['login']
        name = event['body-json']['repository']['name']
        # replace the code archive download and branch reference placeholders
        branch_name = event['body-json']['ref'].replace('refs/heads/', '')
        archive_url = archive_url.replace('{archive_format}',
                                          'zipball').replace(
                                              '{/ref}', '/' + branch_name)
        # add access token information to archive url
        archive_url = archive_url + '?access_token=' + OAUTH_token
    elif hostflavour == 'gitlab':
        # https://gitlab.com/jaymcconnell/gitlab-test-30/repository/archive.zip?ref=master
        archive_url = event['body-json']['project']['http_url'].replace(
            '.git', '/repository/archive.zip?ref=master'
        ) + '&private_token=' + OAUTH_token
        owner = event['body-json']['project']['namespace']
        name = event['body-json']['project']['name']
    elif hostflavour == 'bitbucket':
        branch = event['body-json']['push']['changes'][0]['new']['name']
        archive_url = event['body-json']['repository']['links']['html'][
            'href'] + '/get/' + branch + '.zip'
        owner = event['body-json']['repository']['owner']['username']
        name = event['body-json']['repository']['name']
        r = requests.post('https://bitbucket.org/site/oauth2/access_token',
                          data={'grant_type': 'client_credentials'},
                          auth=(event['context']['oauth-key'],
                                event['context']['oauth-secret']))
        if 'error' in r.json().keys():
            logger.error('Could not get OAuth token. %s: %s' %
                         (r.json()['error'], r.json()['error_description']))
            raise Exception('Failed to get OAuth token')
        headers['Authorization'] = 'Bearer ' + r.json()['access_token']
    s3_archive_file = "%s/%s/%s/%s.zip" % (owner, name, branch, name)
    # download the code archive via archive url
    logger.info('Downloading archive from %s' % archive_url)
    r = requests.get(archive_url, verify=verify, headers=headers)
    f = StringIO(r.content)
    zip = ZipFile(f)
    path = '/tmp/code'
    zipped_code = '/tmp/zipped_code'
    try:
        shutil.rmtree(path)
        os.remove(zipped_code + '.zip')
    except:
        pass
    finally:
        os.makedirs(path)
    # Write to /tmp dir without any common preffixes
    zip.extractall(path, get_members(zip))

    # Create zip from /tmp dir without any common preffixes
    shutil.make_archive(zipped_code, 'zip', path)
    logger.info("Uploading zip to S3://%s/%s" %
                (OutputBucket, s3_archive_file))
    s3_client.upload_file(zipped_code + '.zip', OutputBucket, s3_archive_file)
    logger.info('Upload Complete')
Beispiel #41
0
def configureDB_faculty(requests):
    response = {"configureDB_faculty": "active"}
    if requests.method == "GET":
        return render(requests, "Administrator/uploadcsv.html", response)

    bootstrapFile = {}
    try:
        file = requests.FILES.get("file", False)
        action = requests.POST.get("action")
        start_date = requests.POST.get("start_date")
        end_date = requests.POST.get("end_date")

        if not utilities.validateDate(
                start_date) or not utilities.validateDate(end_date):
            raise Exception("Incorrect date format, should be YYYY-MM-DD")

        bootstrapFile['start_date'] = start_date
        bootstrapFile['end_date'] = end_date

        if action != None:
            bootstrap.clear_Database()

        if file.name.endswith('.zip'):
            unzipped = ZipFile(file)
            unzipped.extractall(os.path.abspath('bootstrap_files'))
            bootstrapFile['file_type'] = 'zip'

            for fileName in unzipped.namelist():
                if fileName.lower() == 'faculty_information.xlsx':
                    bootstrapFile['faculty'] = os.path.abspath(
                        'bootstrap_files/' + fileName)
                elif fileName.lower() == 'course_information.xlsx':
                    bootstrapFile['course'] = os.path.abspath(
                        'bootstrap_files/' + fileName)

            if 'faculty' not in bootstrapFile.keys(
            ) or 'course' not in bootstrapFile.keys():
                raise Exception(
                    "Invalid file information within .zip file. Please upload faculty or course information only."
                )

        elif file.name.endswith('.xlsx'):
            if file.name.lower() == 'faculty_information.xlsx':
                bootstrapFile['file_path'] = file.temporary_file_path()
                bootstrapFile['file_type'] = 'excel'
                bootstrapFile['file_information'] = 'faculty'

            elif file.name.lower() == 'course_information.xlsx':
                bootstrapFile['file_path'] = file.temporary_file_path()
                bootstrapFile['file_type'] = 'excel'
                bootstrapFile['file_information'] = 'course'

            else:
                raise Exception(
                    "Invalid file information. Please upload faculty or course information only."
                )

        else:
            raise Exception(
                "Invalid file type. Please upload .xlsx or .zip only")

        # If file is .xlsx or .zip then proceed with processing
        response['results'] = bootstrap.bootstrap_Faculty(bootstrapFile)

    except Exception as e:
        # Uncomment for debugging - to print stack trace wihtout halting the process
        traceback.print_exc()
        response['error_message'] = e.args[0]
        return render(requests, "Administrator/uploadcsv.html", response)

    response['message'] = 'Successful Upload'
    return render(requests, "Administrator/uploadcsv.html", response)
Beispiel #42
0
    def on_message(self, message):
        
                ms =  json.loads(message)
                if list(ms.keys())[0] == "Start":
                   """Начало загрузки"""

                   self.name = ms["Start"]["Name"]
                   try:
                           self.myfile = open("archiv/"+ms["Start"]["Name"], "wb")
                           self.write_message(json.dumps({"Process": "MoreData"}))
                   except IOError:
                           os.mkdir("archiv")
                           self.myfile = open("archiv/"+ms["Start"]["Name"], "wb")
                           self.write_message(json.dumps({"Process": "MoreData"}))
                           
                if list(ms.keys())[0] == "Upload":
                   """Процесс загрузки"""

                   da = ms["Upload"]["Data"]
                   da = da.split(",")[1]
                   file_bytes = io.BytesIO(base64.b64decode(da)).read()
                   self.myfile.write(file_bytes)
                   self.write_message(json.dumps({"Process": "MoreData"}))

                if list(ms.keys())[0] == "Done":
                   """Конец загрузки"""

                   self.myfile.close()
                   self.write_message(json.dumps({"Process": "Process"}))
                      
                if list(ms.keys())[0] == "Process":
                           HD = DATA()
                           lss = ["gz", "tar", "xz"]
                           if self.name.split(".")[-1] in lss:
                                   tar = tarfile.open("archiv/"+self.name, "r")
                                   tar.extractall("data/"+self.name, members=self.track_progress(tar))
                                   tar.close()
                                   HD.parseIMG(self.name)
                           if self.name.split(".")[-1] == "zip":
                                   zip = ZipFile("archiv/"+self.name)
                                   zip.extractall("data/"+self.name)
                           
                                   HD.parseIMG(self.name)
                           #print (HD.file, ms["type"] == 1)
                           if int(ms["type"]) == 4:
                                 for ixx, fx in enumerate(list(HD.file.keys())[:]):
		                           
                                           file_n = HD.file[fx][0]
                                           try:
                                              answ_H = HD.file[fx][1]
                                           except IndexError:
                                              answ_H = ""

                                           iop = cv2.imread(file_n)
                                           #print ("POST V")
                                           answ_V = ViPost(file_n, str(ixx), ms["task"], int(ms["type"]))#""#gg(iop, ms["task"], int(ms["type"]))                   
                                           #print (answ_V["text"],"POST L")
                                           #answ_L = cutimg(iop, str(ixx), ms["task"], int(ms["type"]))
		               
                                           #print (answ_L["text"]) #answ_V[0]
                                           #print (file_n, ms["task"], ms["type"]) #int(ms["type"]))
                                           post = {"file": file_n, 
                                                   "answ": "", "answ_H": answ_H, "_type":ms["type"],
                                                   "answ_L" : "", "answ_V": answ_V["text"], "_task":ms["task"]}
		                                   #"answ": "", "answ_V": "", "task":ms["task"], "type":ms["type"]}
		                                   #"answ_L" : answ_L["text"], "answ_V": "1", "task":ms["task"]}
                                           I = new.posts.insert_one(post).inserted_id

                                 self.ss = new.posts.find()
                                 self.write_message(json.dumps({"Process": "loaddone"}))

                           if int(ms["type"]) == 3:
                                 for ixx, fx in enumerate(list(HD.file.keys())[:]):
                                           file_n = HD.file[fx][0]
                                           try:
                                              answ_H = HD.file[fx][1]
                                           except IndexError:
                                              answ_H = ""

                                           iop = cv2.imread(file_n)
                                           #print ("POST V")
                                           answ_V = ViPost(file_n, str(ixx), ms["task"], int(ms["type"])) 
                                           PP = cutimg(file_n, iop, ixx, [int(x) for x in answ_V["text"]], ms["task"], 3)
                                           I = new.posts.insert_one(PP).inserted_id  #print (PP)
                                 self.ss = new.posts.find()
                                 self.write_message(json.dumps({"Process": "loaddone"}))

                           else:
                                 im_w = len(list(HD.file.keys()))
                                 w = 9
                                 w_num = int(im_w/w)
                                 num = 0
                                 #print ("ErrorS", im_w, w_num, w) 
                                 for ix in range(0, w_num): 
                                     #print (len(list(HD.file.keys())[ix*w:(ix+1)*w]))
                                     t_dict = {}
                                     t_dict["_type"] = ms["type"]
                                     t_dict["_task"] = ms["task"]
                                     for kix in list(HD.file.keys())[ix*w:(ix+1)*w]:
                                           #print (HD.file[kix],ix)
                                           file_n = HD.file[kix][0]
                                           post = {"file": file_n, 
                                                   "answ": "", "answ_H": "",
                                                   "answ_L" : "", "answ_V": "1"}
                                           t_dict[str(ix)+"_"+str(kix)] = post
                                     #print (len(list(t_dict.keys())))
                                     I = new.posts.insert_one(t_dict).inserted_id  
                                 self.ss = new.posts.find()
                                 self.write_message(json.dumps({"Process": "loaddone"}))
                                     
                                          


                if list(ms.keys())[0] == "SeeAllData":
                      if ms["SeeAllData"] == "ProcessNext":
                                      try:
                                          posttoCH = new.posts.find_one(ObjectId(ms["idxx"]))
                                          if posttoCH["_type"] == 1:
                                             for ik, k in enumerate(list(posttoCH.keys())[3:]):
                                                 posttoCH[k]["answ_V"] = ms["answ_v"][ik]
                                             new.posts.update_one({"_id" : ObjectId(ms["idxx"])},
                                                                  {"$set": posttoCH}, upsert=True)
                                          else:

                                             self.tempID = ObjectId(ms["idxx"])
                                             posttoCH = new.posts.find_one(ObjectId(ms["idxx"]))
                                             posttoCH["answ_V"] = deansw(ms["answ_v"])
                                             new.posts.update_one({"_id" : ObjectId(ms["idxx"])},
                                                                  {"$set": posttoCH}, upsert=True)
                                             #posttoCH = new.posts.find_one(ObjectId(ms["idxx"]))
       
                                      except KeyError:
                                          pass

                                      if new.posts.count() != 0:

                                              try:
                                                 obj = self.ss.next()
                                              except StopIteration:
                                                 self.ss.rewind()
                                                 obj = self.ss.next() 
                                              #print (obj["task"])
                                              if obj["_type"] == 1:
                                                 k_answ = []
                                                 l_data = []
                                                 ixxx = 0
                                                 #print (obj["_type"])
                                                 for k in list(obj.keys())[3:]:
                                                    n_file = obj[k]['file']
                                                    ixxx += 1
                                                    if int(obj[k]['answ_V']) == 1:
                                                       k_answ.append(ixxx)
                                                    iop = cv2.imread(n_file)
                                                    print (n_file)#, iop.shape)
                                                    l_data.append(iop)
                                                 iop = newimg(np.array(l_data),3)   
                                                 idxx = obj["_id"]
                                                 answ_v = k_answ 
                                                 answ = k_answ 
                                                 task = obj["_task"]
                                                 #LeoPost(fname, str(idxx))
                                                 s = base64.b64encode(imgbite(iop))
                                                 obj = {"image":s.decode('ascii'), 
                                                        "answ_V": answ_v, 
                                                        "answ": answ,
                                                        "task": task,
                                                        "type":3,
                                                        "_id": str(idxx),
                                                        "name": ""}
                                                 #print (obj["type"])
                                                 self.write_message(json.dumps(obj))
                                              else:
                                                 try:
                                                   obj = self.ss.next()
                                                 except StopIteration:
                                                   self.ss.rewind()
                                                   obj = self.ss.next()    
                                                 idxx = obj["_id"]
                                                 fname = obj["file"]
                                                 answ_v = obj["answ_V"]
                                                 answ = obj["answ"] 
                                                 task = obj["_task"] 
                                                 tp = obj["_type"]      
                                                 #print (fname, answ_v, answ, idxx, task, tp)                
                                                 iop = cv2.imread(fname)
                                                 #LeoPost(fname, str(idxx))
                                 
                                                 s = base64.b64encode(imgbite(iop))
                                                 #print (s) 
                                                 obj = {"image":s.decode('ascii'), 
                                                        "answ_V": answ_v, 
                                                        "answ": answ,
                                                        "task": task,
                                                        "type":tp,
                                                        "_id": str(idxx),
                                                        "name": fname}
                                                 #print (obj["type"])
                                                 self.write_message(json.dumps(obj))
Beispiel #43
0
    import urllib.request as urllib
import time
from zipfile import ZipFile
import os

tagname = "1.3.1"
zipname = "markdown-presenter-" + tagname + ".zip"
zippath = "./" + zipname
url = "https://github.com/jsakamoto/MarkdownPresenter/releases/download/v." + tagname + "/" + zipname

# download zip archive of Markdown Presenter.
urllib.urlretrieve(url, zippath)

# ectract the zip.
zfile = ZipFile(zippath)
zfile.extractall(".")
zfile.close()

# clean up zip.
os.remove(zippath)


# launch default web browser to open Markdown Presenter
# after one shot timer to wait for warming up HTTP daemon.
def launch():
    time.sleep(1)
    webbrowser.open("http://localhost:8000/Presenter.html")


thread.start_new_thread(launch, ())
Beispiel #44
0
def unzip_files():
    for zipf in zipfiles:
        zip_ref = ZipFile(zipf, 'r')
        zip_ref.extractall(zipf.rstrip('.zip.ZIP'))
        zip_ref.close()
Beispiel #45
0
def unzip(fileName):
    print "Extracting: " + fileName,
    zip = ZipFile(fileName)
    zip.extractall()
    zip.close()
    print " [DONE]"
Beispiel #46
0
    def POST_AUTH(self, courseid, taskid):  # pylint: disable=arguments-differ
        """ Edit a task """
        if not id_checker(taskid) or not id_checker(courseid):
            raise Exception("Invalid course/task id")

        course, __ = self.get_course_and_check_rights(courseid,
                                                      allow_all_staff=False)
        data = web.input(task_file={})

        # Delete task ?
        if "delete" in data:
            self.task_factory.delete_task(courseid, taskid)
            if data.get("wipe", False):
                self.wipe_task(courseid, taskid)
            raise web.seeother(self.app.get_homepath() + "/admin/" + courseid +
                               "/tasks")

        # Else, parse content
        try:
            try:
                task_zip = data.get("task_file").file
            except:
                task_zip = None
            del data["task_file"]

            problems = self.dict_from_prefix("problem", data)
            limits = self.dict_from_prefix("limits", data)

            #Tags
            tags = self.dict_from_prefix("tags", data)
            if tags is None:
                tags = {}
            tags = OrderedDict(sorted(tags.items(),
                                      key=lambda item: item[0]))  # Sort by key

            # Repair tags
            for k in tags:
                tags[k]["visible"] = (
                    "visible" in tags[k]
                )  # Since unckecked checkboxes are not present here, we manually add them to avoid later errors
                tags[k]["type"] = int(tags[k]["type"])
                if not "id" in tags[k]:
                    tags[k][
                        "id"] = ""  # Since textinput is disabled when the tag is organisational, the id field is missing. We add it to avoid Keys Errors
                if tags[k]["type"] == 2:
                    tags[k]["id"] = ""  # Force no id if organisational tag

            # Remove uncompleted tags (tags with no name or no id)
            for k in list(tags.keys()):
                if (tags[k]["id"] == ""
                        and tags[k]["type"] != 2) or tags[k]["name"] == "":
                    del tags[k]

            # Find duplicate ids. Return an error if some tags use the same id.
            for k in tags:
                if tags[k][
                        "type"] != 2:  # Ignore organisational tags since they have no id.
                    count = 0
                    id = str(tags[k]["id"])
                    if (" " in id):
                        return json.dumps({
                            "status":
                            "error",
                            "message":
                            _("You can not use spaces in the tag id field.")
                        })
                    if not id_checker(id):
                        return json.dumps({
                            "status":
                            "error",
                            "message":
                            _("Invalid tag id: {}").format(id)
                        })
                    for k2 in tags:
                        if tags[k2]["type"] != 2 and tags[k2]["id"] == id:
                            count = count + 1
                    if count > 1:
                        return json.dumps({
                            "status":
                            "error",
                            "message":
                            _("Some tags have the same id! The id of a tag must be unique."
                              )
                        })

            data = {
                key: val
                for key, val in data.items() if not key.startswith("problem")
                and not key.startswith("limits") and not key.startswith("tags")
                and not key.startswith("/")
            }
            del data["@action"]

            # Determines the task filetype
            if data["@filetype"] not in self.task_factory.get_available_task_file_extensions(
            ):
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    _("Invalid file type: {}").format(str(data["@filetype"]))
                })
            file_ext = data["@filetype"]
            del data["@filetype"]

            # Parse and order the problems (also deletes @order from the result)
            if problems is None:
                data["problems"] = OrderedDict([])
            else:
                data["problems"] = OrderedDict([
                    (key, self.parse_problem(val))
                    for key, val in sorted(iter(problems.items()),
                                           key=lambda x: int(x[1]['@order']))
                ])

            # Task limits
            data["limits"] = limits
            data["tags"] = OrderedDict(
                sorted(tags.items(), key=lambda x: x[1]['type']))
            if "hard_time" in data["limits"] and data["limits"][
                    "hard_time"] == "":
                del data["limits"]["hard_time"]

            # Weight
            try:
                data["weight"] = float(data["weight"])
            except:
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    _("Grade weight must be a floating-point number")
                })

            # Groups
            if "groups" in data:
                data["groups"] = True if data["groups"] == "true" else False

            # Submision storage
            if "store_all" in data:
                try:
                    stored_submissions = data["stored_submissions"]
                    data["stored_submissions"] = 0 if data[
                        "store_all"] == "true" else int(stored_submissions)
                except:
                    return json.dumps({
                        "status":
                        "error",
                        "message":
                        _("The number of stored submission must be positive!")
                    })

                if data["store_all"] == "false" and data[
                        "stored_submissions"] <= 0:
                    return json.dumps({
                        "status":
                        "error",
                        "message":
                        _("The number of stored submission must be positive!")
                    })
                del data['store_all']

            # Submission limits
            if "submission_limit" in data:
                if data["submission_limit"] == "none":
                    result = {"amount": -1, "period": -1}
                elif data["submission_limit"] == "hard":
                    try:
                        result = {
                            "amount": int(data["submission_limit_hard"]),
                            "period": -1
                        }
                    except:
                        return json.dumps({
                            "status":
                            "error",
                            "message":
                            _("Invalid submission limit!")
                        })

                else:
                    try:
                        result = {
                            "amount": int(data["submission_limit_soft_0"]),
                            "period": int(data["submission_limit_soft_1"])
                        }
                    except:
                        return json.dumps({
                            "status":
                            "error",
                            "message":
                            _("Invalid submission limit!")
                        })

                del data["submission_limit_hard"]
                del data["submission_limit_soft_0"]
                del data["submission_limit_soft_1"]
                data["submission_limit"] = result

            # Accessible
            if data["accessible"] == "custom":
                data["accessible"] = "{}/{}/{}".format(
                    data["accessible_start"], data["accessible_soft_end"],
                    data["accessible_end"])
            elif data["accessible"] == "true":
                data["accessible"] = True
            else:
                data["accessible"] = False
            del data["accessible_start"]
            del data["accessible_end"]
            del data["accessible_soft_end"]

            # Checkboxes
            if data.get("responseIsHTML"):
                data["responseIsHTML"] = True

            # Network grading
            data["network_grading"] = "network_grading" in data
        except Exception as message:
            return json.dumps({
                "status":
                "error",
                "message":
                _("Your browser returned an invalid form ({})").format(message)
            })

        # Get the course
        try:
            course = self.course_factory.get_course(courseid)
        except:
            return json.dumps({
                "status":
                "error",
                "message":
                _("Error while reading course's informations")
            })

        # Get original data
        try:
            orig_data = self.task_factory.get_task_descriptor_content(
                courseid, taskid)
            data["order"] = orig_data["order"]
        except:
            pass

        task_fs = self.task_factory.get_task_fs(courseid, taskid)
        task_fs.ensure_exists()

        # Call plugins and return the first error
        plugin_results = self.plugin_manager.call_hook('task_editor_submit',
                                                       course=course,
                                                       taskid=taskid,
                                                       task_data=data,
                                                       task_fs=task_fs)

        # Retrieve the first non-null element
        error = next(filter(None, plugin_results), None)
        if error is not None:
            return error

        try:
            WebAppTask(course, taskid, data, task_fs, None,
                       self.plugin_manager,
                       self.task_factory.get_problem_types())
        except Exception as message:
            return json.dumps({
                "status":
                "error",
                "message":
                _("Invalid data: {}").format(str(message))
            })

        if task_zip:
            try:
                zipfile = ZipFile(task_zip)
            except Exception:
                return json.dumps({
                    "status":
                    "error",
                    "message":
                    _("Cannot read zip file. Files were not modified")
                })

            with tempfile.TemporaryDirectory() as tmpdirname:
                try:
                    zipfile.extractall(tmpdirname)
                except Exception:
                    return json.dumps({
                        "status":
                        "error",
                        "message":
                        _("There was a problem while extracting the zip archive. Some files may have been modified"
                          )
                    })
                task_fs.copy_to(tmpdirname)

        self.task_factory.delete_all_possible_task_files(courseid, taskid)
        self.task_factory.update_task_descriptor_content(
            courseid, taskid, data, force_extension=file_ext)
        course.update_all_tags_cache()

        return json.dumps({"status": "ok"})
Beispiel #47
0
    def test_001_build_android(self):
        Tns.build_android(attributes={"--path": self.app_name})
        assert File.pattern_exists(self.platforms_android, "*.aar")
        assert not File.pattern_exists(self.platforms_android, "*.plist")
        assert not File.pattern_exists(self.platforms_android, "*.android.js")
        assert not File.pattern_exists(self.platforms_android, "*.ios.js")

        # Configs are respected
        assert 'debug' in File.read(
            os.path.join(self.app_name, TnsAsserts.PLATFORM_ANDROID_APP_PATH,
                         'config.json'))

        # And new platform specific file and verify next build is ok (test for issue #2697)
        src = os.path.join(self.app_name, 'app', 'app.js')
        dest_1 = os.path.join(self.app_name, 'app', 'new.android.js')
        dest_2 = os.path.join(self.app_name, 'app', 'new.ios.js')
        File.copy(src=src, dest=dest_1)
        File.copy(src=src, dest=dest_2)

        # Verify incremental native build
        before_build = datetime.datetime.now()
        output = Tns.build_android(attributes={"--path": self.app_name})
        after_build = datetime.datetime.now()
        assert "Gradle build..." in output, "Gradle build not called."
        assert output.count(
            "Gradle build...") is 1, "Only one gradle build is triggered."
        assert (after_build - before_build).total_seconds(
        ) < 20, "Incremental build takes more then 20 sec."

        # Verify platform specific files
        assert File.pattern_exists(self.platforms_android, "*.aar")
        assert not File.pattern_exists(self.platforms_android, "*.plist")
        assert not File.pattern_exists(self.platforms_android, "*.android.js")
        assert not File.pattern_exists(self.platforms_android, "*.ios.js")

        # Verify apk does not contain aar files
        archive = ZipFile(
            os.path.join(self.app_name,
                         TnsAsserts.PLATFORM_ANDROID_APK_DEBUG_PATH,
                         self.debug_apk))
        archive.extractall(self.app_name + "/temp")
        archive.close()
        # Ceanup META-INF folder. It contains com.android.support.... files which are expected to be there due to
        # https://github.com/NativeScript/nativescript-cli/pull/3923
        Folder.cleanup(os.path.join(self.app_name, "temp", "META-INF"))
        assert not File.pattern_exists(self.app_name + "/temp", "*.aar")
        assert not File.pattern_exists(self.app_name + "/temp", "*.plist")
        assert not File.pattern_exists(self.app_name + "/temp", "*.android.*")
        assert not File.pattern_exists(self.app_name + "/temp", "*.ios.*")
        Folder.cleanup(self.app_name + "/temp")

        # Verify incremental native build
        before_build = datetime.datetime.now()
        output = Tns.build_android(attributes={"--path": self.app_name})
        after_build = datetime.datetime.now()
        assert "Gradle build..." in output, "Gradle build not called."
        assert output.count(
            "Gradle build...") is 1, "Only one gradle build is triggered."
        assert (after_build - before_build).total_seconds(
        ) < 20, "Incremental build takes more then 20 sec."

        # Verify clean build force native project rebuild
        before_build = datetime.datetime.now()
        output = Tns.build_android(attributes={
            "--path": self.app_name,
            "--clean": ""
        })
        after_build = datetime.datetime.now()
        build_time = (after_build - before_build).total_seconds()
        assert "Gradle clean..." in output, "Gradle clean is not called."
        assert "Gradle build..." in output, "Gradle build is not called."
        assert output.count(
            "Gradle build...") is 1, "More than 1 gradle build is triggered."
        assert build_time > 10, "Clean build takes less then 15 sec."
        assert build_time < 90, "Clean build takes more than 90 sec."
Beispiel #48
0
# within this directory, create files to store raw and processed data
data_dir = util_files.prep_dirs(dataset_name)
'''
Download data and save to your data directory
'''
# insert the url used to download the data from the source website
url = 'https://gain.nd.edu/assets/323406/resources_2019_19_01_21h59_1_1_.zip'  #check

# download the data from the source
raw_data_file = os.path.join(data_dir, os.path.basename(url))
urllib.request.urlretrieve(url, raw_data_file)

#unzip source data
raw_data_file_unzipped = raw_data_file.split('.')[0]
zip_ref = ZipFile(raw_data_file, 'r')
zip_ref.extractall(raw_data_file_unzipped)
zip_ref.close()
'''
Process data
'''
#read in climate change vulnerability data to pandas dataframe
filename = os.path.join(raw_data_file_unzipped, 'resources', 'vulnerability',
                        'vulnerability.csv')
vulnerability_df = pd.read_csv(filename)

#read in climate change readiness data to pandas dataframe
filename = os.path.join(raw_data_file_unzipped, 'resources', 'readiness',
                        'readiness.csv')
readiness_df = pd.read_csv(filename)

#read in nd-gain score data to pandas dataframe
Beispiel #49
0
class Installer:
    def __init__(self):

        self.window = Tk()
        self.window.iconbitmap('assets/install.ico')

        self.window.title('Installer')

        self.window.resizable(False, False)

        self.window.geometry('550x300')

        self.window.configure(background='white')

        load = Image.open("assets/ins.png")

        space = Label(self.window, bg='white')
        space.pack(side=BOTTOM, pady=2)

        load = load.resize((180, 180), Image.ANTIALIAS)

        img = ImageTk.PhotoImage(load)

        image = Label(self.window, text='kalfoi', image=img, bg='white')
        image.image = img
        image.pack(side=LEFT, padx=20)

        space = Label(self.window, bg='white')
        space.pack(pady=20)

        tit = Label(self.window,
                    text='Instalador                   ',
                    bg='white',
                    font="sans 25 ",
                    fg='grey')
        tit.pack()

        self.op = Label(self.window,
                        text='Selecione as opções desejadas\t     \n',
                        bg='white',
                        font="sans 13 ",
                        fg='grey')
        self.op.pack()

        self.v = IntVar()
        c = Checkbutton(self.window,
                        text="Criar ícone na área de trabalho\t\t\t\t",
                        bg='white',
                        onvalue=1,
                        offvalue=0,
                        variable=self.v)
        c.var = self.v
        c.pack()

        self.w = IntVar()
        d = Checkbutton(self.window,
                        text="Adicionar ao menu Iniciar \t\t\t\t\t",
                        bg='white',
                        onvalue=1,
                        offvalue=0,
                        variable=self.w)
        d.var = self.w
        d.pack()

        self.bt = Button(self.window,
                         text='Instalar',
                         height=2,
                         width=10,
                         cursor='hand2',
                         command=self.load)
        self.bt.pack(side=RIGHT, padx=30)

        self.widgets = [c, d]

    def load(self):
        self.bt['cursor'] = 'watch'
        self.window['cursor'] = 'watch'
        self.window.after(10, self.start)

    def start(self):

        user_path = '\\'.join([i for i in os.getcwd().split('\\')[:3]]) + '\\'

        self.zip = ZipFile('Tubs.zip')

        self.zip.extractall(user_path)

        self.path = user_path + 'Tubs\\'

        lista = [
            (self.v.get(), winshell.desktop()),
            (self.w.get(),
             os.path.abspath(
                 user_path +
                 "\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs"
             ))
        ]

        for i in lista:

            if i[0]:

                link_filepath = os.path.join(i[1], "Gerador.lnk")
                with winshell.shortcut(link_filepath) as link:
                    link.path = self.path + 'interface.exe'
                    link.description = "Gerador de Certificados"
                    link.working_directory = self.path

        self.window['cursor'] = 'arrow'
        self.bt['cursor'] = 'hand2'

        for i in self.widgets:
            i.destroy()

        self.op['text'] = 'Instalação Finalizada\t\t    '

        self.bt['text'] = 'Encerrar'
        self.bt['command'] = self.window.destroy
logging.info("Downloading data...")
response = requests.get(
    'http://fme.discomap.eea.europa.eu/fmedatadownload/MarineLitter/MLWPivotExport.fmw'
    '?CommunityCode=&FromDate=2010-01-01&ToDate=2022-12-31'
    '&opt_showresult=false&opt_servicemode=sync')

downloadlink = re.search(r"<a\s+(?:[^>]*?\s+)?href=([\"'])(.*?)\1>",
                         response.content.decode()).group(2)

logging.info("Saving data...")
zipfile = requests.get(downloadlink)
open(f'{here}/{ZIP_FILE}', 'wb').write(zipfile.content)

logging.info("Uzipping data...")
zipObject = ZipFile(f'{here}/{ZIP_FILE}', 'r')
zipObject.extractall(path=here)

logging.info("Loading data...")
# Data to initialize database with
data = pd.read_csv(f'{here}/CSV_1/MLW_PivotExport/MLW_Data.csv',
                   encoding="ISO-8859-1")

# Delete database file if it exists currently
if os.path.exists(f'{here}/{DB_FILE}'):
    os.remove(f'{here}/{DB_FILE}')

# Create the database
db.create_all()

# populate the database
conn = sqlite3.connect(f'{here}/{DB_FILE}')
from keras.preprocessing.image import ImageDataGenerator
from zipfile import ZipFile
import os

!wget --no-check-certificate \
    https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip \
    -O /tmp/horse-or-human.zip

!wget --no-check-certificate \
    https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip \
    -O /tmp/validation-horse-or-human.zip

# Extracting the files (This was written in Google Colab)
file_name = '/tmp/horse-or-human.zip'
zip_ref = ZipFile(file_name, 'r')
zip_ref.extractall('/tmp/horse-or-human')

file_name = '/tmp/validation-horse-or-human.zip'
zip_ref = ZipFile(file_name, 'r')
zip_ref.extractall('/tmp/validation-horse-or-human')

train_horse_dir = os.path.join('/tmp/horse-or-human/horses')
train_human_dir = os.path.join('/tmp/horse-or-human/humans')
validation_horse_dir = os.path.join('/tmp/validation-horse-or-human/horses')
validation_human_dir = os.path.join('/tmp/validation-horse-or-human/humans')

# Setting up ImageDataGenerators. You can implement Augmentation here, to reduce overfitting.
train_datagen = ImageDataGenerator(rescale=1/255)
train_generator = train_datagen.flow_from_directory(
                  directory='/tmp/horse-or-human',
                  target_size=(300,300),
Beispiel #52
0
def extract_zip(what, where):
    print("extracting {}...".format(what))
    zip_file = ZipFile(what)
    zip_file.extractall(path=where)
    zip_file.close()
import tarfile
import os
import platform

target = platform.system() + "-" + platform.architecture()[0]

arduino_url = "https://downloads.arduino.cc/arduino-cli"
images = {
    "Windows-64bit": "arduino-cli_latest_Windows_64bit.zip",
    "Windows-32bit": "arduino-cli_latest_Windows_32bit.zip",
    "Linux-64bit": "arduino-cli_latest_Linux_64bit.tar.gz"
}

destdir = "tools/bin"

try:
    os.mkdir("tools")
except:
    pass

filename = images[target]

r = urlopen(arduino_url + "/" + images[target])
data = BytesIO(r.read())
if (filename.endswith(".tar.gz") or filename.endswith(".tgz")):
    tar = tarfile.open(fileobj=data, mode="r")
    content = tar.extractall(path=destdir)
elif (filename.endswith(".zip")):
    zipfile = ZipFile(data)
    zipfile.extractall(path=destdir)
Beispiel #54
0
def extract(zipFilename, dm_extraction_dir):
    zipTest = ZipFile(zipFilename)
    zipTest.extractall(dm_extraction_dir)
Beispiel #55
0
    def scan(self, codeURL, runtime):
        if not self.config:
            return  # invalid config
        zippath = self.downloads.joinpath('lambda.zip')
        zippath.write_bytes(requests.get(codeURL).content)
        if not is_zipfile(zippath):
            return  # invalid zip
        zf = ZipFile(zippath)

        # Unzip Lambda source code
        for _ in zf.namelist():
            zf.extractall(self.downloads, members=[_])

        # Configure sonar-project.properties
        if runtime.startswith('python'):
            language = 'py'
        elif runtime.startswith('node'):
            language = 'js'
        elif runtime.startswith('java'):
            language = 'java'
        else:
            return  # unsupported language
        Path(self.downloads, 'sonar-project.properties').write_text(
            SONAR_PROJECT_PROPERTIES.format(self.config['url'],
                                            self.config['login'],
                                            self.config['password'], language))

        # Run sonar-scanner
        cwd = Path('.').resolve()
        cd(self.downloads)
        sh(shsplit(self.config['command']), stdout=DEVNULL, stderr=DEVNULL)
        cd(cwd)
        rmtree(self.downloads, ignore_errors=True)
        self.downloads.mkdir(parents=True, exist_ok=True)

        # Get results
        curl = requests.Session()
        curl.auth = (self.config['login'], self.config['password'])

        while True:
            sleep(3)
            task = json.loads(
                curl.get(
                    f'{self.config["url"]}/api/ce/activity').text)['tasks'][0]
            if task['status'] in ['SUCCESS', 'FAIL']:
                break

        issues = json.loads(
            curl.get(
                f'{self.config["url"]}/api/issues/search?project=lambdaguard').
            text)['issues']
        curl.post(f'{self.config["url"]}/api/projects/delete',
                  data={'project': 'lambdaguard'})

        for issue in issues:
            if issue['status'] != 'OPEN':
                continue
            where = issue['component'].split(':', 1)[1]
            yield {
                'level':
                'high',
                'text':
                f'{issue["message"]}\n{where} on line {issue["textRange"]["startLine"]}.'
            }
Beispiel #56
0
test_list_url = "data/multi-woz/testListFile.json"
data_url = "data/multi-woz/data.json"

domains = ["restaurant", "taxi", "train", "attraction", "hotel"]

data_train = {}
data_test = {}
data_val = {}

if not os.path.isfile(word_vectors_url):
    if not os.path.exists("data"):
        os.makedirs("word-vectors")
    print("Downloading and unzipping the pre-trained word embeddings")
    resp = urlopen(vectors_url)
    zip_ref = ZipFile(BytesIO(resp.read()))
    zip_ref.extractall("word-vectors")
    zip_ref.close()

if os.path.isfile("data/train.json"):
    exit()

print("Preprocessing the data and creating the ontology")

if not os.path.exists("data"):
    os.makedirs("data")
    os.makedirs("data/multi-woz")

if not os.path.exists(data_url):
    print("Downloading and unzipping the MultiWoz dataset")
    answer = int(
        input(
Beispiel #57
0
    def POST(self, courseid, taskid):
        """ Edit a task """
        if not id_checker(taskid) or not id_checker(courseid):
            raise Exception("Invalid course/task id")

        course, _ = self.get_course_and_check_rights(courseid, allow_all_staff=False)

        # Parse content
        try:
            data = web.input(task_file={})

            try:
                task_zip = data.get("task_file").file
            except:
                task_zip = None
            del data["task_file"]

            problems = self.dict_from_prefix("problem", data)
            limits = self.dict_from_prefix("limits", data)

            data = {key: val for key, val in data.iteritems() if not key.startswith("problem") and not key.startswith("limits")}
            del data["@action"]

            if data["@filetype"] not in self.task_factory.get_available_task_file_extensions():
                return json.dumps({"status": "error", "message": "Invalid file type: {}".format(str(data["@filetype"]))})
            file_ext = data["@filetype"]
            del data["@filetype"]

            if problems is None:
                return json.dumps({"status": "error", "message": "You cannot create a task without subproblems"})

            # Order the problems (this line also deletes @order from the result)
            data["problems"] = OrderedDict([(key, self.parse_problem(val))
                                            for key, val in sorted(problems.iteritems(), key=lambda x: int(x[1]['@order']))])
            data["limits"] = limits
            if "hard_time" in data["limits"] and data["limits"]["hard_time"] == "":
                del data["limits"]["hard_time"]

            # Weight
            try:
                data["weight"] = float(data["weight"])
            except:
                return json.dumps({"status": "error", "message": "Grade weight must be a floating-point number"})

            # Groups
            if "groups" in data:
                data["groups"] = True if data["groups"] == "true" else False

            # Accessible
            if data["accessible"] == "custom":
                data["accessible"] = "{}/{}".format(data["accessible_start"], data["accessible_end"])
            elif data["accessible"] == "true":
                data["accessible"] = True
            else:
                data["accessible"] = False
            del data["accessible_start"]
            del data["accessible_end"]

            # Checkboxes
            if data.get("responseIsHTML"):
                data["responseIsHTML"] = True

            # Network grading
            data["network_grading"] = "network_grading" in data
        except Exception as message:
            return json.dumps({"status": "error", "message": "Your browser returned an invalid form ({})".format(str(message))})

        # Get the course
        try:
            course = self.course_factory.get_course(courseid)
        except:
            return json.dumps({"status": "error", "message": "Error while reading course's informations"})

        # Get original data
        try:
            orig_data = self.task_factory.get_task_descriptor_content(courseid, taskid)
            data["order"] = orig_data["order"]
        except:
            pass

        directory_path = self.task_factory.get_directory_path(courseid, taskid)
        try:
            WebAppTask(course, taskid, data, directory_path)
        except Exception as message:
            return json.dumps({"status": "error", "message": "Invalid data: {}".format(str(message))})

        if not os.path.exists(directory_path):
            os.mkdir(directory_path)

        if task_zip:
            try:
                zipfile = ZipFile(task_zip)
            except Exception as message:
                return json.dumps({"status": "error", "message": "Cannot read zip file. Files were not modified"})

            try:
                zipfile.extractall(directory_path)
            except Exception as message:
                return json.dumps(
                    {"status": "error", "message": "There was a problem while extracting the zip archive. Some files may have been modified"})

        self.task_factory.delete_all_possible_task_files(courseid, taskid)
        self.task_factory.update_task_descriptor_content(courseid, taskid, data, force_extension=file_ext)

        return json.dumps({"status": "ok"})
Beispiel #58
0
    def unzip(self):

        percent = 0.0

        self.task = Task.objects.get(pk=self.task.id)
        self.task.status = 'PROGRESS: UNZIP'
        meta = {
            'action': 'unzip',
            'file': 'Unzip %s' % self.source,
            'percent': percent,
            'items_unzipped': 0
        }
        self.task.meta.update(meta)
        self.task.save()

        zip_path = settings.DOWNLOAD_PATH
        unzip_path = settings.SOURCE_PATH

        final_path = check_create_folder('%s/%s' % (unzip_path, self.source))
        tmp_path = check_create_folder('%s/%s/tmp' % (unzip_path, self.source))
        tmp_path2 = check_create_folder('%s/%s/tmp2' %
                                        (unzip_path, self.source))
        total_weight = len(self.files)

        for zipped in self.files:
            #Initial Unzip Round
            # operation weigth 30%
            weight = 0.3

            zip = ZipFile('%s/%s/%s' % (zip_path, self.source, zipped), 'r')
            zip.extractall(path=tmp_path)
            zip.close()
            percent += (weight / total_weight) * 100
            self.task.meta['percent'] = percent
            self.task.save()

            # Second round of unzipping of files inside the unzip file
            # operation weight 70%
            weight = 0.7
            new_zip_files = glob.glob(tmp_path + '/*/*.zip')
            total_files = len(new_zip_files)
            file_counter = 0
            for zipped in new_zip_files:
                file_counter += 1

                zip = ZipFile(zipped, 'r')
                zip.extractall(path=tmp_path2)
                zip.close()
                percent += (file_counter / total_files) * (
                    (weight / total_weight) * 100)
                if file_counter % 20 == 0:
                    self.task.meta['items_unzipped'] = file_counter
                    self.task.meta['percent'] = percent
                    self.task.save()

        # copy xml files to the correct place
        unzipped_files = glob.glob(tmp_path2 + '/*.xml')
        for item in unzipped_files:
            shutil.copy(item, final_path)

        # delete tmp files
        try:
            shutil.rmtree(tmp_path)
            # shutil.rmtree(tmp_path2)
        except OSError as exc:
            if exc.errno != errno.ENOENT:
                raise

        self.task.meta['percent'] = 100
        self.task.save()
        return True
Beispiel #59
0
    if not os.path.exists(APPENGINE_TARGET_DIR):
        print('Downloading the AppEngine SDK...')

        #First try and get it from the 'featured' folder
        sdk_file = urlopen(FEATURED_SDK_REPO + APPENGINE_SDK_FILENAME)
        if sdk_file.getcode() == 404:
            #Failing that, 'deprecated'
            sdk_file = urlopen(DEPRECATED_SDK_REPO + APPENGINE_SDK_FILENAME)

        #Handle other errors
        if sdk_file.getcode() >= 299:
            raise Exception('App Engine SDK could not be found. {} returned code {}.'.format(sdk_file.geturl(), sdk_file.getcode()))

        zipfile = ZipFile(StringIO(sdk_file.read()))
        zipfile.extractall(TARGET_DIR)

        #Make sure the dev_appserver and appcfg are executable
        for module in ("dev_appserver.py", "appcfg.py"):
            app = os.path.join(APPENGINE_TARGET_DIR, module)
            st = os.stat(app)
            os.chmod(app, st.st_mode | stat.S_IEXEC)
    else:
        print('Not updating SDK as it exists. Remove {} and re-run to get the latest SDK'.format(APPENGINE_TARGET_DIR))

    print("Running pip...")
    args = ["pip", "install", "--no-deps", "-r", REQUIREMENTS_FILE, "-t", TARGET_DIR, "-I"]
    p = subprocess.Popen(args)
    p.wait()

    print("Installing Django {}".format(DJANGO_VERSION))
Beispiel #60
-1
def extractAll(zip):
    """
    Extrait tous les fichiers d'une archive word dans un répertoire temporaire.
    """

    myzip = ZipFile(zip, 'a')
    myzip.extractall('./../tmp/')