예제 #1
0
def get_uploaded_cropped_image(original_image, crop_data):
    """ Takes an Image object and a cropping tuple (left, upper, right, lower), and returns a new Image object"""

    tmpdir = tempfile.mkdtemp()
    original_filename = original_image.filepath.split('/')[-1]
    safe_local_path0 = os.path.join(tmpdir, original_filename)
    # get the original image and save it locally
    urlretrieve(original_image.filepath, safe_local_path0)
    # import pdb; pdb.set_trace()
    pimage = Pimage.open(safe_local_path0)
    SIZE = 300, 300
    cropped_image = pimage.crop(crop_data)
    cropped_image.thumbnail(SIZE)

    tmp_filename = '{}{}'.format(datetime.datetime.now(), original_filename)
    safe_local_path = os.path.join(tmpdir, tmp_filename)

    def rm_dirs():
        os.remove(safe_local_path0)
        os.remove(safe_local_path)
        os.rmdir(tmpdir)

    # TODO: For faster implementation,
    # avoid writing tempfile by passing a BytesIO object to cropped_image.save()
    cropped_image.save(fp=safe_local_path)
    file = open(safe_local_path, 'rb')

    # See if there is a matching photo already in the db
    hash_img = compute_hash(file.read())
    hash_found = Image.query.filter_by(hash_img=hash_img).first()
    if hash_found:
        rm_dirs()
        return hash_found

    # Generate new filename
    file_extension = original_filename.split('.')[-1]
    new_filename = '{}.{}'.format(hash_img, file_extension)

    # Upload file from local filesystem to S3 bucket and delete locally
    try:
        url = upload_file(safe_local_path, original_filename,
                          new_filename)
        rm_dirs()
        # Update the database to add the image
        new_image = Image(filepath=url, hash_img=hash_img, is_tagged=True,
                          date_image_inserted=datetime.datetime.now(),
                          department_id=original_image.department_id,
                          # TODO: Get the following field from exif data
                          date_image_taken=original_image.date_image_taken)
        db.session.add(new_image)
        db.session.commit()
        return new_image
    except:  # noqa
        exception_type, value, full_tback = sys.exc_info()
        current_app.logger.error('Error uploading to S3: {}'.format(
            ' '.join([str(exception_type), str(value),
                      format_exc()])
        ))
        rm_dirs()
        return None
def test_face_properties():
    local_path = "/tmp/best-epoch47-0.9314.onnx"

    if not os.path.exists(local_path):
        http_path = "https://github.com/Nebula4869/PyTorch-gender-age-estimation/raw/" \
                    "038331d26fc1fbf24d00365d0eb9d0e5e828dda6/models-2020-11-20-14-37/best-epoch47-0.9314.onnx"
        urlretrieve(http_path, local_path)
        print("Downloaded weights to {}".format(local_path))

    def age_is_female_from_asset_name(asset_name):
        age_str, gender_str = re.search("age_(\d+)_gender_(\w+)",
                                        asset_name).groups()
        return int(age_str), gender_str == "female"

    assets_path = os.path.join(
        rospkg.RosPack().get_path("image_recognition_age_gender"),
        'test/assets')
    images_gt = [(cv2.imread(os.path.join(assets_path, asset)),
                  age_is_female_from_asset_name(asset))
                 for asset in os.listdir(assets_path)]

    estimations = AgeGenderEstimator(local_path, 64, 16, 8).estimate(
        [image for image, _ in images_gt])
    for (_, (age_gt, is_female_gt)), (age,
                                      gender) in zip(images_gt, estimations):
        age = int(age)
        is_female = gender[0] > 0.5
        assert abs(age - age_gt) < 5, f"{age=}, {age_gt=}"
        assert is_female == is_female_gt, f"{is_female=}, {is_female_gt=}"
예제 #3
0
    def download(self, url, savefile, is_display=False):
        '''''下载文件或网页'''
        header_gzip = None

        for header in self.__opener.addheaders:  # 移除支持 gzip 压缩的 header
            if 'Accept-Encoding' in header:
                header_gzip = header
                self.__opener.addheaders.remove(header)

        self.__perLen = 0

        def reporthook(a, b, c):  # a:已经下载的数据大小; b:数据大小; c:远程文件大小;
            if c > 1000000:
                #nonlocal __perLen
                #global __perLen
                per = (100.0 * a * b) / c
                if per > 100: per = 100
                per = '{:.2f}%'.format(per)
                print('\b' * self.__perLen, per, end='')  # 打印下载进度百分比
                sys.stdout.flush()
                self.__perLen = len(per) + 1

        try:
            if is_display:
                print('--> {}\t'.format(url), end='')
                request.urlretrieve(url, savefile,
                                    reporthook)  # reporthook 为回调钩子函数,用于显示下载进度
            else:
                request.urlretrieve(url, savefile)
        except urllib.error.HTTPError as e:
            self.__error(e)
        finally:
            self.__opener.addheaders.append(header_gzip)
            print()
예제 #4
0
 def test_run_conformance(self, batchSystem=None):
     rootDir = self._projectRootPath()
     cwlSpec = os.path.join(rootDir, 'src/toil/test/cwl/spec')
     testhash = "91f108df4d4ca567e567fc65f61feb0674467a84"
     url = "https://github.com/common-workflow-language/common-workflow-language/archive/%s.zip" % testhash
     if not os.path.exists(cwlSpec):
         urlretrieve(url, "spec.zip")
         with zipfile.ZipFile('spec.zip', "r") as z:
             z.extractall()
         shutil.move("common-workflow-language-%s" % testhash, cwlSpec)
         os.remove("spec.zip")
     try:
         cmd = [
             "bash", "run_test.sh", "RUNNER=toil-cwl-runner", "DRAFT=v1.0",
             "-j4"
         ]
         if batchSystem:
             cmd.extend(["--batchSystem", batchSystem])
         subprocess.check_output(cmd, cwd=cwlSpec, stderr=subprocess.STDOUT)
     except subprocess.CalledProcessError as e:
         only_unsupported = False
         # check output -- if we failed but only have unsupported features, we're okay
         p = re.compile(
             r"(?P<failures>\d+) failures, (?P<unsupported>\d+) unsupported features"
         )
         for line in e.output.split("\n"):
             m = p.search(line)
             if m:
                 if int(m.group("failures")) == 0 and int(
                         m.group("unsupported")) > 0:
                     only_unsupported = True
                     break
         if not only_unsupported:
             print(e.output)
             raise e
예제 #5
0
 def test_run_conformance(self, batchSystem=None):
     rootDir = self._projectRootPath()
     cwlSpec = os.path.join(rootDir, 'src/toil/test/cwl/spec')
     workDir = os.path.join(cwlSpec, 'v1.0')
     # The latest cwl git hash. Update it to get the latest tests.
     testhash = "22490926651174c6cbe01c76c2ded3c9e8d0ee6f"
     url = "https://github.com/common-workflow-language/common-workflow-language/archive/%s.zip" % testhash
     if not os.path.exists(cwlSpec):
         urlretrieve(url, "spec.zip")
         with zipfile.ZipFile('spec.zip', "r") as z:
             z.extractall()
         shutil.move("common-workflow-language-%s" % testhash, cwlSpec)
         os.remove("spec.zip")
     try:
         cmd = ['cwltest', '--tool', 'toil-cwl-runner', '--test=conformance_test_v1.0.yaml',
                '--timeout=1800', '--basedir=' + workDir]
         if batchSystem:
             cmd.extend(["--batchSystem", batchSystem])
         subprocess.check_output(cmd, cwd=workDir, stderr=subprocess.STDOUT)
     except subprocess.CalledProcessError as e:
         only_unsupported = False
         # check output -- if we failed but only have unsupported features, we're okay
         p = re.compile(r"(?P<failures>\d+) failures, (?P<unsupported>\d+) unsupported features")
         for line in e.output.split("\n"):
             m = p.search(line)
             if m:
                 if int(m.group("failures")) == 0 and int(m.group("unsupported")) > 0:
                     only_unsupported = True
                     break
         if not only_unsupported:
             print(e.output)
             raise e
예제 #6
0
    def get_file(self, url='', path=''):
        '''It is possible to mimic FTP bulk data downloads using the \
                HTTP-based data distribution server at https://oceandata.sci.gsfc.nasa.gov.

        :param url: a single file name which can be obtained by calling #file_search() \
                an example would be \
                https://oceandata.sci.gsfc.nasa.gov/cgi/getfile/O1997001.L3b_DAY_CHL.nc
        :type url: :mod:`string`

        :param path: Destination directory into which the granule \
                needs to be downloaded.
        :type path: :mod:`string`

        :returns: a file object downloaded from the \
                HTTP-based data distribution server at https://oceandata.sci.gsfc.nasa.gov.

        '''
        try:
            #url = GET_URL
            if url:
                url = url
            else:
                raise Exception("'file' parameter is required!")
            file = os.path.basename(urlparse(url).path)
            if path == '':
            	path = os.path.join(os.path.dirname(__file__), file)
            else:
                path = path + '/' + file
            urlretrieve(url, path)
            print("Downloaded '%s' to '%s'" % (file, path))
            return file

        except Exception:
            raise
예제 #7
0
파일: cwlTest.py 프로젝트: chapmanb/toil
 def test_run_conformance(self):
     rootDir = self._projectRootPath()
     cwlSpec = os.path.join(rootDir, 'src/toil/test/cwl/spec')
     testhash = "7f510ec768b424601beb8c86700343afe722ac76"
     url = "https://github.com/common-workflow-language/common-workflow-language/archive/%s.zip" % testhash
     if not os.path.exists(cwlSpec):
         urlretrieve(url, "spec.zip")
         with zipfile.ZipFile('spec.zip', "r") as z:
             z.extractall()
         shutil.move("common-workflow-language-%s" % testhash, cwlSpec)
         os.remove("spec.zip")
     try:
         subprocess.check_output(["bash", "run_test.sh", "RUNNER=toil-cwl-runner", "DRAFT=v1.0"], cwd=cwlSpec,
                                 stderr=subprocess.STDOUT)
     except subprocess.CalledProcessError as e:
         only_unsupported = False
         # check output -- if we failed but only have unsupported features, we're okay
         p = re.compile(r"(?P<failures>\d+) failures, (?P<unsupported>\d+) unsupported features")
         for line in e.output.split("\n"):
             m = p.search(line)
             if m:
                 if int(m.group("failures")) == 0 and int(m.group("unsupported")) > 0:
                     only_unsupported = True
                     break
         if not only_unsupported:
             print(e.output)
             raise e
예제 #8
0
def download(url, filename):
    """Download a file by its URL.
    Args:
      url:
        The web location we want to retrieve.

      filename:
        The filename within the path to download the file.
    """

    urlretrieve(url, filename)
예제 #9
0
def download(url, filename):
    """Download a file by its URL.
    Args:
      url:
        The web location we want to retrieve.

      filename:
        The filename within the path to download the file.
    """

    urlretrieve(url, filename)
예제 #10
0
	def setUpClass(cls):
		cls.def_file = os.path.join(os.path.dirname(__file__), '../proto', 'alexnet.prototxt')
		cls.npz_file = os.path.join(os.path.dirname(__file__), '../data', 'tiny_imagenet.npz')
		cls.resolver = CaffeResolver()

		if 'PYDAAL_CAFFE_MODEL_DIR' in env:
			cls.data_file = os.path.join(env['PYDAAL_CAFFE_MODEL_DIR'], 'alexnet.caffemodel')
		elif cls.resolver.has_pycaffe():
			cls.data_path = os.path.join(tmp.gettempdir(), str(uuid.uuid4()))
			os.makedirs(cls.data_path)
			
			cls.data_file = os.path.join(cls.data_path, 'alexnet.caffemodel')
			print('Downloading AlexNet model from http://dl.caffe.berkeleyvision.org...')
			urlretrieve('http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel', cls.data_file, report_hook)
			print('\n')
예제 #11
0
def my_urlretrieve(url, *args, **kwargs):
    """
    Call urlretrieve and give friendly error messages depending
    on the result. If successful, return exactly what urlretrieve
    would. Arguments are exactly the same as urlretrieve, except that
    you can also specify a ``backlinks`` keyword used in the error
    message.

    Raises RetrieveError when an error occurs for which we can figure
    out a sensible error message.
    """
    try:
        backlinks = kwargs.pop('backlinks')
    except KeyError:
        backlinks = ''
    try:
        return urlretrieve(url, *args, **kwargs)
    except IOError as err:
        if err.strerror == 'unknown url type' and err.filename == 'https':
            raise RetrieveError(
                _(
                    "This Sage notebook is not configured to load worksheets "
                    "from 'https' URLs. Try a different URL or download the "
                    "worksheet and upload it directly from your "
                    "computer.\n%(backlinks)s",
                    backlinks=backlinks))
        else:
            raise
예제 #12
0
def download_file_from_url(source, destination):
    def basic_progress(blocknum, bs, size):
        if blocknum % 10 == 0:
            log.printer('.', end='', color=False)

    log.printer(f"Downloading '{source}' to '{destination}'",
                end='',
                color=False)
    try:
        urlretrieve(source, str(destination), basic_progress)
    except Exception as e:
        log.error(
            f"Error downloading '{source}' to '{destination}'\n{type(e)} {e}")
        return None
    log.printer('Done', color=False)
    return Path(destination)
예제 #13
0
파일: utils.py 프로젝트: weng-lab/SnoPlowPy
 def get_file_if_size_diff(url, d):
     fn = url.split('/')[-1]
     out_fnp = os.path.join(d, fn)
     net_file_size = int(urlopen(url).info()['Content-Length'])
     if os.path.exists(out_fnp):
         fn_size = os.path.getsize(out_fnp)
         if fn_size == net_file_size:
             print("skipping download of", fn)
             return out_fnp
         else:
             print("files sizes differed:")
             print("\t", "on disk:", fn_size)
             print("\t", "from net:", net_file_size)
     print("retrieving", fn)
     urlretrieve(url, out_fnp)
     return out_fnp
예제 #14
0
def main():
    # parse options
    options = docopt(__doc__, version=__version__)
    if options['--add-url']:
        url_flag = True
        url_prefix = 'https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc='
    else:
        url_flag = False
    # get GSE NUMBER
    gse_num = options['<GSE_NUM>']
    # check GSE NUMBER
    gse_pattern = re.compile(r'GSE\d{3,}')
    if not gse_pattern.match(gse_num):
        sys.exit('Error: <GSE_NUM> should be in correct format!')
    # set up MINiML url
    info_xml_name = gse_num + '_family.xml'
    info_xml_url = 'ftp.ncbi.nlm.nih.gov/geo/series/'
    info_xml_url += gse_num[:-3] + 'nnn/' + gse_num + '/miniml/'
    info_xml_url += info_xml_name + '.tgz'
    # store xml file
    try:  # try using ftp
        zipped_xml, _ = urlretrieve('ftp://' + info_xml_url)
    except Exception:  # ftp connection failed, using http
        zipped_xml, _ = urlretrieve('http://' + info_xml_url)
    # parse xml
    xml_file = tarfile.open(zipped_xml).extractfile(info_xml_name)
    info_xml = BeautifulSoup(xml_file, 'xml')
    # write infomation
    with smart_write(options['-o']) as f:
        # write GSE infomation
        f.write(gse_num + '\t')
        if url_flag:
            f.write(url_prefix + gse_num + '\t')
        f.write(info_xml.Series.Title.string + '\n')
        # write GSM information
        for sample in info_xml.find_all('Sample'):
            gsm_num = sample['iid']
            f.write(gsm_num + '\t')
            if url_flag:
                f.write(url_prefix + gsm_num)
            f.write(sample.Title.string)
            try:
                sra_page = sample.find('Relation', type='SRA')['target']
                f.write('\t' + fetch_sra(sra_page, url_flag=url_flag))
            except Exception:
                continue
            f.write('\n')
예제 #15
0
파일: l2ss.py 프로젝트: surajitdb/podaacpy
    def granule_download(self, query_string, path=''):
        ''' Granule Download service submits a job to subset and download. Upon a successful request,\
            token will be returned which can be used to check status.

            :param query_string: data collection query json as a string.
            :type query_string: :mod:`string`

            :param path: path to a directory where you want the subsetted \
                dataset to be stored.
            :type path: :mod:`string`

            :returns: a zip file downloaded and extracted in the destination\
                directory path provided.
        '''
        params = urlencode({'query': query_string})
        headers = {
            "Content-type": "application/x-www-form-urlencoded",
            "Accept": "*"
        }
        connection = HTTPSConnection("podaac-tools.jpl.nasa.gov")
        connection.request("POST", "/l2ss-services/l2ss/subset/submit", params,
                           headers)
        response = connection.getresponse()
        data = response.read().decode('utf-8')
        result = json.loads(data)
        token = result['token']
        connection.close()

        flag = 0
        while flag == 0:
            url = url = self.URL + "subset/status?token=" + token
            subset_response = requests.get(url).text
            subset_response_json = json.loads(subset_response)
            status = subset_response_json['status']
            if status == "done":
                flag = 1
            if status == "error":
                raise Exception(
                    "Unexpected error occured for the subset job you have requested"
                )
            if status == "partial error":
                raise Exception(
                    "The job was done but with some errors, please submit the job again"
                )
            time.sleep(1)

        print("Done! downloading the dataset zip .....")
        download_url = subset_response_json['resultURLs'][0]
        split = download_url.split('/')
        length = len(split)
        zip_file_name = split[length - 1]
        if path == '':
            path = os.path.join(os.path.dirname(__file__), zip_file_name)
        else:
            path = path + zip_file_name
        response = urlretrieve(download_url, path)
        zip_content = zipfile.ZipFile(path)
        zip_content.extractall()
        os.remove(path)
예제 #16
0
    def run(self):
        # abort if a global Malmo installation already exists and just use that
        if "MALMO_XSD_PATH" in os.environ:
            build.run(self)
            return

        # otherwise, install a new local Malmo
        from future.moves.urllib.request import urlretrieve

        malmo_ver = '0.18.0'

        if os.path.exists('minecraft_py/Malmo'):
            print("Removing existing Malmo folder...")
            shutil.rmtree('minecraft_py/Malmo')

        system = platform.system()
        bits, linkage = platform.architecture()
        if system == 'Linux':
            dist, version, vername = platform.linux_distribution()
            folder = 'Malmo-{}-{}-{}-{}-{}'.format(malmo_ver, system, dist,
                                                   version, bits)
        elif system == 'Darwin':
            folder = 'Malmo-{}-Mac-{}'.format(malmo_ver, bits)
        else:
            folder = 'Malmo-{}-{}-{}'.format(malmo_ver, system, bits)
        url = 'https://github.com/Microsoft/malmo/releases/download/{}/{}.zip'.format(
            malmo_ver, folder)

        print("Downloading Malmo...")
        urlretrieve(url, 'Malmo.zip')

        print("Unzipping Malmo...")
        zip = zipfile.ZipFile('Malmo.zip')
        zip.extractall('minecraft_py')
        zip.close()

        print("Removing zip...")
        os.remove('Malmo.zip')
        print("Renaming folder...")
        os.rename(os.path.join('minecraft_py', folder), 'minecraft_py/Malmo')

        print("Changing permissions...")
        make_executable('minecraft_py/Malmo/Minecraft/gradlew')
        make_executable('minecraft_py/Malmo/Minecraft/launchClient.sh')

        build.run(self)
예제 #17
0
 def urlretrieve(self, *args, **kwargs):
     resource = args[0]
     with support.transient_internet(resource):
         file_location, info = urllib_request.urlretrieve(*args, **kwargs)
         try:
             yield file_location, info
         finally:
             support.unlink(file_location)
예제 #18
0
 def urlretrieve(self, *args, **kwargs):
     resource = args[0]
     with support.transient_internet(resource):
         file_location, info = urllib_request.urlretrieve(*args, **kwargs)
         try:
             yield file_location, info
         finally:
             support.unlink(file_location)
예제 #19
0
파일: cwlTest.py 프로젝트: psafont/toil
 def setUp(self):
     """Runs anew before each test to create farm fresh temp dirs."""
     from builtins import str as normal_str
     self.outDir = os.path.join('/tmp/', 'toil-cwl-test-' + normal_str(uuid.uuid4()))
     os.makedirs(self.outDir)
     self.rootDir = self._projectRootPath()
     self.cwlSpec = os.path.join(self.rootDir, 'src/toil/test/cwl/spec')
     self.workDir = os.path.join(self.cwlSpec, 'v1.0')
     # The latest cwl git hash. Update it to get the latest tests.
     testhash = "22490926651174c6cbe01c76c2ded3c9e8d0ee6f"
     url = "https://github.com/common-workflow-language/common-workflow-language/archive/%s.zip" % testhash
     if not os.path.exists(self.cwlSpec):
         urlretrieve(url, "spec.zip")
         with zipfile.ZipFile('spec.zip', "r") as z:
             z.extractall()
         shutil.move("common-workflow-language-%s" % testhash, self.cwlSpec)
         os.remove("spec.zip")
예제 #20
0
 def setUp(self):
     """Runs anew before each test to create farm fresh temp dirs."""
     from builtins import str as normal_str
     self.outDir = os.path.join('/tmp/', 'toil-cwl-test-' + normal_str(uuid.uuid4()))
     os.makedirs(self.outDir)
     self.rootDir = self._projectRootPath()
     self.cwlSpec = os.path.join(self.rootDir, 'src/toil/test/cwl/spec')
     self.workDir = os.path.join(self.cwlSpec, 'v1.0')
     # The latest cwl git commit hash from https://github.com/common-workflow-language/common-workflow-language.
     # Update it to get the latest tests.
     testhash = 'a062055fddcc7d7d9dbc53d28288e3ccb9a800d8'
     url = 'https://github.com/common-workflow-language/common-workflow-language/archive/%s.zip' % testhash
     if not os.path.exists(self.cwlSpec):
         urlretrieve(url, 'spec.zip')
         with zipfile.ZipFile('spec.zip', 'r') as z:
             z.extractall()
         shutil.move('common-workflow-language-%s' % testhash, self.cwlSpec)
         os.remove('spec.zip')
예제 #21
0
파일: podaac.py 프로젝트: lewismc/podaacpy
    def extract_l4_granule(self, dataset_id='', path=''):
        '''This is an additional function that we have provided apart \
        from the available webservices. The extract_l4_granule helps \
        retrieve the level 4 datasets from OPeNDAP server directly, \
        accompanied by the search granule for retrieving granule name \
        related to the specific dataset_id.

        :param dataset_id: dataset persistent ID. dataset_id or \
                short_name is required for a granule search. Example: \
                PODAAC-ASOP2-25X01
        :type dataset_id: :mod:`string`

        :param path: Destination directory into which the granule \
                needs to be downloaded.
        :type format: :mod:`string`

        :returns: string representation of granule name.
        '''
        try:
            start_index = '1'
            search_data = self.granule_search(dataset_id=dataset_id,
                                              start_index=start_index)
            root = ET.fromstring(search_data.encode('utf-8'))
            url = root[12][7].attrib['href']
            compressed_granule = ntpath.basename(url)
            granule_name = root[12][0].text
            if path == '':
                compressed_path = os.path.join(os.path.dirname(__file__),
                                               compressed_granule)
            else:
                compressed_path = path + '/' + compressed_granule
            urlretrieve(url, compressed_path)
            if compressed_granule.endswith('.gz'):
                compressed_granule = gzip.open(compressed_path, 'rb')
                with open(path + '/' + granule_name,
                          'wb') as uncompressed_granule:
                    uncompressed_granule.write(compressed_granule.read())
                    compressed_granule.close()
                    uncompressed_granule.close()
        except Exception as e:
            print(e)
            raise

        return granule_name
예제 #22
0
파일: utils.py 프로젝트: weng-lab/SnoPlowPy
    def download(url,
                 fnp,
                 auth=None,
                 force=None,
                 file_size_bytes=0,
                 skipSizeCheck=None,
                 quiet=False,
                 umask=FileUmask):
        Utils.ensureDir(fnp)
        if not skipSizeCheck:
            if 0 == file_size_bytes:
                fsb = Utils.getHttpFileSizeBytes(url, auth)
                if fsb:
                    file_size_bytes = fsb
            Utils.deleteFileIfSizeNotMatch(fnp, file_size_bytes)

        if os.path.exists(fnp):
            if force:
                os.remove(fnp)
            else:
                return True

        Utils.quietPrint(quiet, "downloading", url, "...")

        if url.startswith("ftp://"):
            fnpTmp = urlretrieve(url)[0]
            shutil.move(fnpTmp, fnp)
            # chmod g+w
            st = os.stat(fnp)
            os.chmod(fnp, st.st_mode | umask)
            return True

        if not auth:
            r = requests.get(url)
        if auth or 403 == r.status_code:
            keyFnp = os.path.expanduser('~/.encode.txt')
            if os.path.exists(keyFnp):
                with open(keyFnp) as f:
                    toks = f.read().strip().split('\n')
                r = requests.get(url, auth=HTTPBasicAuth(toks[0], toks[1]))
            else:
                raise Exception("no ENCODE password file found at: " + keyFnp)
        if 200 != r.status_code:
            Utils.quietPrint(quiet, "could not download", url)
            Utils.quietPrint(quiet, "status_code:", r.status_code)
            return False

        # with open(fnpTmp, "wb") as f:
        with tempfile.NamedTemporaryFile("wb", delete=False) as f:
            f.write(r.content)
            fnpTmp = f.name
        shutil.move(fnpTmp, fnp)
        # chmod g+w
        st = os.stat(fnp)
        os.chmod(fnp, st.st_mode | umask)
        return True
예제 #23
0
    def extract_l4_granule(self, dataset_id='', path=''):
        '''This is an additional function that we have provided apart \
        from the availalble webservices. The extract_l4_granule helps \
        retrieve the level 4 datasets from openDap server directly, \
        accompanied by the search granule for retrieving granule name \
        related to the specific dataset_id and short_name

        :param dataset_id: dataset persistent ID. dataset_id or \
                short_name is required for a granule search. Example: \
                PODAAC-ASOP2-25X01
        :type dataset_id: :mod:`string`

        :param short_name: the shorter name for a dataset. \
                Either short_name or dataset_id is required for a \
                granule search. Example: ASCATA-L2-25km
        :type short_name: :mod:`string`

        :param path: Destination directory into which the granule \
                needs to be downloaded.
        :type format: :mod:`string`
        '''
        try:
            start_index = '1'
            search_data = self.granule_search(
                dataset_id=dataset_id, start_index=start_index)
            root = ET.fromstring(search_data.encode('utf-8'))
            url = root[12][7].attrib['href']
            granule_name = root[12][0].text
            granule_name = granule_name.split('\t')[3][:-1]
            if path == '':
                path = os.path.join(os.path.dirname(__file__), granule_name)
            else:
                path = path + '/' + granule_name
            urlretrieve(url, path)

        except Exception:
            raise

        return granule_name
예제 #24
0
    def setUpClass(cls):
        cls.npz_file = os.path.join(os.path.dirname(__file__), '../data',
                                    'tiny_imagenet.npz')
        cls.def_file = os.path.join(os.path.dirname(__file__), '../proto',
                                    'resnet.prototxt')
        cls.resolver = CaffeResolver()

        if 'PYDAAL_CAFFE_MODEL_DIR' in env:
            cls.data_file = os.path.join(env['PYDAAL_CAFFE_MODEL_DIR'],
                                         'ResNet-50-model.caffemodel')
        elif cls.resolver.has_pycaffe():
            cls.data_path = os.path.join(tmp.gettempdir(), str(uuid.uuid4()))
            os.makedirs(cls.data_path)

            cls.data_file = os.path.join(cls.data_path,
                                         'ResNet-50-model.caffemodel')
            print(
                'Downloading ResNet-50 model from the cloud storage: DeepDetect...'
            )
            urlretrieve(
                'https://deepdetect.com/models/resnet/ResNet-50-model.caffemodel',
                cls.data_file, report_hook)
            print('\n')
예제 #25
0
def saved_products(tmpdir_factory):

    _base = str(tmpdir_factory.mktemp("saved_products"))

    prodfile = os.path.join(_basedir, "drift_testproducts.tar.gz")

    # Download the test products if they don't exist locally
    if not os.path.exists(prodfile):
        print("Downloading test verification data.")
        url = "http://bao.chimenet.ca/testcache/drift_testproducts.tar.gz"
        urlretrieve(url, prodfile)

    with tarfile.open(prodfile, "r:gz") as tf:
        tf.extractall(path=_base)

    def _load(fname):
        path = os.path.join(_base, fname)

        if not os.path.exists(path):
            raise ValueError("Saved product %s does not exist" % path)

        return h5py.File(path, "r")

    return _load
예제 #26
0
    def run(self):
        from future.moves.urllib.request import urlretrieve

        if os.path.exists('minecraft_py/Malmo'):
            print("Removing existing Malmo folder...")
            shutil.rmtree('minecraft_py/Malmo')

        system = platform.system()
        bits, linkage = platform.architecture()
        if system == 'Linux':
            dist, version, vername = platform.linux_distribution()
            folder = 'Malmo-0.18.0-{}-{}-{}-{}'.format(system, dist, version,
                                                       bits)
        else:
            folder = 'Malmo-0.18.0-{}-{}'.format(system, bits)
        url = 'https://github.com/Microsoft/malmo/releases/download/0.18.0/{}.zip'.format(
            folder)

        print("Downloading Malmo...")
        urlretrieve(url, 'Malmo.zip')

        print("Unzipping Malmo...")
        zip = zipfile.ZipFile('Malmo.zip')
        zip.extractall('minecraft_py')
        zip.close()

        print("Removing zip...")
        os.remove('Malmo.zip')
        print("Renaming folder...")
        os.rename(os.path.join('minecraft_py', folder), 'minecraft_py/Malmo')

        print("Changing permissions...")
        make_executable('minecraft_py/Malmo/Minecraft/gradlew')
        make_executable('minecraft_py/Malmo/Minecraft/launchClient.sh')

        build.run(self)
예제 #27
0
파일: podaac.py 프로젝트: rugby110/podaacpy
    def granule_subset(self, input_file_path, path=''):
        '''Subset Granule service allows users to Submit subset jobs. \
        Use of this service should be preceded by a Granule Search in \
        order to identify and generate a list of granules to be subsetted.

        :param input_file_path: path to a json file which contains the \
        the request that you want to send to PO.DAAC
        :type input_file_path: :mod:`string`

        :param path: path to a directory where you want the subsetted \
        dataset to be stored.
        :type path: :mod:`string`

        :returns: a zip file downloaded and extracted in the destination\
        directory path provided.

        '''
        data = open(input_file_path, 'r+')
        input_data = json.load(data)
        input_string = json.dumps(input_data)

        # submit subset request
        params = urlencode({'query': input_string})
        headers = {
            "Content-type": "application/x-www-form-urlencoded",
            "Accept": "*"
        }
        conn = HTTPConnection("podaac.jpl.nasa.gov")
        conn.request("POST", "/ws/subset/granule?request=submit", params,
                     headers)
        response = conn.getresponse()

        data = response.read().decode('utf-8')
        result = json.loads(data)
        token = result['token']
        conn.close()

        flag = 0
        while (flag == 0):
            url = url = self.URL + "subset/status?token=" + token
            subset_response = requests.get(url).text
            subset_response_json = json.loads(subset_response)
            status = subset_response_json['status']
            if (status == "done"):
                flag = 1
            if (status == "error"):
                raise Exception(
                    "Unexpected error occured for the subset job you have requested"
                )
            time.sleep(1)

        print("Done! downloading the dataset zip .....")
        download_url = subset_response_json['resultURLs'][0]
        split = download_url.split('/')
        length = len(split)
        zip_file_name = split[length - 1]
        if path == '':
            path = os.path.join(os.path.dirname(__file__), zip_file_name)
        else:
            path = path + zip_file_name
        response = urlretrieve(download_url, path)
        zip_content = zipfile.ZipFile(path)
        zip_content.extractall()
        os.remove(path)
예제 #28
0
    def granule_subset(self, input_file_path, path=''):
        '''Subset Granule service allows users to Submit subset jobs. \
        Use of this service should be preceded by a Granule Search in \
        order to identify and generate a list of granules to be subsetted. \
        NOTE : At present PODAAC's granule subsetting service is only \
        restricted to Level2 granules.

        :param input_file_path: path to a json file which contains the \
        the request that you want to send to PO.DAAC
        :type input_file_path: :mod:`string`

        :param path: path to a directory where you want the subsetted \
        dataset to be stored.
        :type path: :mod:`string`

        :returns: a string token which can be used to determine the status\
        of a subset task.

        '''
        data = open(input_file_path, 'r+')
        input_data = json.load(data)
        input_string = json.dumps(input_data)

        # submit subset request
        params = urlencode({'query': input_string})
        headers = {
            "Content-type": "application/x-www-form-urlencoded",
            "Accept": "*"
        }
        conn = HTTPSConnection("podaac.jpl.nasa.gov")
        conn.request("POST", "/ws/subset/granule?request=submit", params,
                     headers)
        response = conn.getresponse()

        data = response.read().decode('utf-8')
        result = json.loads(data)
        token = result['token']
        conn.close()

        flag = 0
        while flag == 0:
            url = url = self.URL + "subset/status?token=" + token
            subset_response = requests.get(url, headers=HEADERS).text
            subset_response_json = json.loads(subset_response)
            status = subset_response_json['status']
            if status == "done":
                flag = 1
            if status == "error":
                raise Exception(
                    "Unexpected error during subset job, post your issue to the PO.DAAC forum https://podaac.jpl.nasa.gov/forum/"
                )
            time.sleep(1)

        download_url = subset_response_json['resultURLs'][0]
        split = download_url.split('/')
        length = len(split)
        zip_file_name = split[length - 1]
        if path == '':
            zip_path = os.path.join(os.path.dirname(__file__), zip_file_name)
        else:
            zip_path = path + '/' + zip_file_name
        response = urlretrieve(download_url, zip_path)
        zip_content = zipfile.ZipFile(zip_path)
        for i in zip_content.infolist():
            granule_name = i.filename
        zip_content.extractall(path=path)
        print(
            "Podaacpy completed granule subset task for granule '%s'. Granule available at '%s'."
            % (granule_name, path))
        os.remove(zip_path)
        return granule_name
def main():
    print("Loading SitoolsClient for", sitools_url)
    ds1 = Dataset(sitools_url + "/webs_SOHO_dataset")
    ds1.display()

    # date__ob
    # Format must be something like 2015-11-01T00:00:00.000 in version Sitools2 3.0 that will change
    param_query1 = [[ds1.fields_dict['date_obs']], ['2015-01-01T00:00:00.000', '2015-01-01T01:00:00.000'],
                    'DATE_BETWEEN']

    q1 = Query(param_query1)
    # Q2 = Query(param_query2)
    # Q3 = Query(param_query3)

    # Ask recnum, sunum,series_name,date__obs, ias_location,ias_path

    o1 = [ds1.fields_dict['filename'], ds1.fields_dict['filesize'], ds1.fields_dict['instrument'],
          ds1.fields_dict['date_obs'], ds1.fields_dict['secchisata'], ds1.fields_dict['secchisatb'],
          ds1.fields_dict['wavemin'], ds1.fields_dict['wavemax'], ds1.fields_dict['datatype'],
          ds1.fields_dict['download_path'], ds1.fields_dict['path'], ds1.fields_dict['id_sitools_view']]

    # Sort date__obs ASC
    s1 = [[ds1.fields_dict['date_obs'], 'ASC']]

    #       for field in ds1.fields_dict :
    #               field.display()

    #        print "\nPrint Query  ..."
    q1.display()
    # Q2.display()
    # Q3.display()

    result = ds1.search([q1], o1, s1)
    print(len(result), " item(s) found\n")
    # #        result=ds1.search([q1,Q2],o1,S1,limit_to_nb_res_max=10)
    if len(result) != 0:
        print("Results :\n")
        for i, data in enumerate(result):
            print("%d) %s" % (i + 1, data))

    print("Download just one SOHO data\nIn progress please wait ...")
    print("item : \n%s" % result[1])
    dataset_pk = ds1.primary_key.name
    try:
        ds1.execute_plugin(
            plugin_name='pluginSOHOtar',
            pkey_values_list=[result[1][dataset_pk]],
            filename='first_download_SOHO.tar'
        )
    except ValueError as e:
        print("Issue downloading id_sitools_view : %s " % result[1][dataset_pk])
        print("args is: %s" % e.args)
        print("Repr : %s" % e.__repr__())
    except Exception as e:
        print("Issue downloading id_sitools_view : %s " % result[1][dataset_pk])
        print("Repr : %s" % e.__repr__())

    else:
        print("Download id_sitools_view : %s ,file %s completed" % (result[1][dataset_pk],
                                                                    'first_download_SOHO.tar'))

    print("Try to download with urlretrieve")
    print("item : \n%s" % result[2])
    filename_item = (result[2]['filename'].split("/"))[-1]
    try:
        urlretrieve(result[2]['download_path'], filename_item)
    except Exception as e:
        print("Issue downloading id_sitools_view : %s " % result[2][dataset_pk])
        print("args is: %s" % e.args)
        print("repr : %s" % e.__repr__())
    else:
        print("Download id_sitools_view : %s , file %s completed" % (result[2][dataset_pk], filename_item))
예제 #30
0
    def run(self):
        # abort if a global Malmo installation already exists and just use that
        if "MALMO_XSD_PATH" in os.environ:
            build.run(self)
            return

        # otherwise, install a new local Malmo
        from future.moves.urllib.request import urlretrieve

        malmo_ver = '0.37.0'

        if os.path.exists('minecraft_py/Malmo'):
            print("Removing existing Malmo folder...")
            shutil.rmtree('minecraft_py/Malmo')

        if os.path.exists('betterfps'):
            print("Removing existing betterfps folder...")
            shutil.rmtree('betterfps')

        system = platform.system()
        bits, linkage = platform.architecture()
        if system == 'Linux':
            dist, version, vername = platform.linux_distribution()
            folder = 'Malmo-{}-{}-{}-{}-{}'.format(malmo_ver, system, dist,
                                                   version, bits)
        elif system == 'Darwin':
            folder = 'Malmo-{}-Mac-{}'.format(malmo_ver, bits)
        else:
            folder = 'Malmo-{}-{}-{}'.format(malmo_ver, system, bits)

        if malmo_ver in ['0.21.0', '0.22.0', '0.30.0']:
            folder += '_withBoost'
        elif int(malmo_ver.split('.')[1]) >= 31:
            # since 0.31.0
            folder += '_withBoost_Python{}.{}'.format(sys.version_info[0],
                                                      sys.version_info[1])

        url = 'https://github.com/Microsoft/malmo/releases/download/{}/{}.zip'.format(
            malmo_ver, folder)

        print("Downloading Malmo...")
        urlretrieve(url, 'Malmo.zip')

        print("Unzipping Malmo...")
        zip = zipfile.ZipFile('Malmo.zip')
        zip.extractall('minecraft_py')
        zip.close()

        print("Removing zip...")
        os.remove('Malmo.zip')
        print("Renaming folder...")
        os.rename(os.path.join('minecraft_py', folder), 'minecraft_py/Malmo')

        print("Changing permissions...")
        make_executable('minecraft_py/Malmo/Minecraft/gradlew')
        make_executable('minecraft_py/Malmo/Minecraft/launchClient.sh')

        # get betterfps
        betterfps_version = '1.4.5'
        betterfps_branch = '1.11'
        print('Downloading BetterFps for MC {}...'.format(betterfps_branch))
        urlretrieve(
            'https://codeload.github.com/Guichaguri/BetterFps/zip/{}'.format(
                betterfps_branch), 'BetterFps.zip')

        print("Unzipping BetterFps...")
        zip = zipfile.ZipFile('BetterFps.zip')
        zip.extractall('.')
        zip.close()
        os.remove('BetterFps.zip')
        os.rename('BetterFps-{}'.format(betterfps_branch), 'betterfps')

        print("Copying gradlew for BetterFps...")
        shutil.copy('minecraft_py/Malmo/Minecraft/gradlew', 'betterfps')
        shutil.copy('minecraft_py/Malmo/Minecraft/gradlew.bat', 'betterfps')
        shutil.copytree('minecraft_py/Malmo/Minecraft/gradle',
                        'betterfps/gradle')

        if platform.system() == 'Windows':
            gradle = 'gradlew'
        else:
            gradle = './gradlew'

        print('Building deobfuscated betterfps jar...')
        os.chdir('betterfps')
        os.system(gradle + ' jar')
        os.chdir('..')

        print('Copying betterfps jar to mods folder...')
        os.makedirs('minecraft_py/Malmo/Minecraft/run/mods')
        shutil.copy(
            'betterfps/build/libs/BetterFps-{}.jar'.format(betterfps_version),
            'minecraft_py/Malmo/Minecraft/run/mods')

        print("Copying betterfps config...")
        os.makedirs('minecraft_py/Malmo/Minecraft/run/config')
        shutil.copy('betterfps.json',
                    'minecraft_py/Malmo/Minecraft/run/config')

        print('Cleaning up build directory...')
        shutil.rmtree('betterfps')

        shutil.copy('options.txt', 'minecraft_py/Malmo/Minecraft/run')

        # Prevent race condition
        time.sleep(0.2)

        build.run(self)
예제 #31
0
    def execute_plugin(self, plugin_name=None, pkey_values_list=None, filename=None, **kwargs):
        """Donwload a selection of data

        parameter
        ---------
        plugin_name
            name of the plugin within sitools2
        pkey_values_list
            list of primary_key values for the current dataset
        filename
            name of the file donwloaded

        raise
        -----
        ValueError
            No plugin name provided
            plugin name does not exist
            No filename provided

        Return
        ------
            result execution of the plugin
            can be a tar zip etc...
        """

        # Determine if pk is a couple
        pk_item = self.fields_dict[self.primary_key.name]
        pk_item_component = pk_item.component.split("||','||")

        # primary key is like : (pk_item1, pk_item2)
        if len(pk_item_component) == 2:

            operation = 'LISTBOXMULTIPLE'
            pk_item1 = pk_item_component[0]
            pk_item2 = pk_item_component[1]
            recnum_list = [
                elmnt for idx, elmnt in enumerate(pkey_values_list)
                if idx % 2 == 0
            ]
            series_name_list = [
                elmnt for idx, elmnt in enumerate(pkey_values_list)
                if idx % 2 != 0
            ]

            kwargs.update({
                'p[0]': operation + "|" + pk_item1 + "|" + "|".join(str(recnum) for recnum in recnum_list),
                'p[1]': operation + "|" + pk_item2 + "|" + "|".join(str(series) for series in series_name_list)
            })

        # primary_key is like : recnum
        elif len(pk_item_component) == 1:
            resources_list = []
            if plugin_name is None:
                err_mess = "Error execute_plugin():\nNo plugin_name provided\n"
                raise ValueError(err_mess)
            for resource in self.resources_target:
                resources_list.append(resource.split("/")[-1])
            if plugin_name not in resources_list:
                err_mess = (
                        "Error execute_plugin():\n This plugin_name %s does not"
                        "exist in %s dataset\n" % (plugin_name, self.name)
                )
                raise ValueError(err_mess)
            if len(pkey_values_list) == 0:
                err_mess = (
                    "Error execute_plugin():\nNo identifiers pkey provided\n"
                )
                raise ValueError(err_mess)
            if filename is None:
                err_mess = (
                    "Error execute_plugin():\nNo filename provided\n"
                )
                raise ValueError(err_mess)
            operation = 'LISTBOXMULTIPLE'
            kwargs.update({
                'p[0]': operation + "|" + self.primary_key.name + "|" + "|".join(
                    str(pkey_value) for pkey_value in pkey_values_list)
            })

        url = self.url + "/" + plugin_name + "?" + urlencode(kwargs)
        # print("url exec_plugin : %s\n" % url)
        try:
            urlopen(url)
        except HTTPError as e:
            print("code error :%s" % e.code)
            print("Reason : %s " % e.reason)
            raise
        except Exception as e:
            print(e.args)
            raise
        else:
            return urlretrieve('%s' % url, filename)
def main():
    print("Loading SitoolsClient for", sitools_url)
    sitools_instance = Sitools2Instance(sitools_url)
    print("sitools_instance : %s " % sitools_instance.list_project())

    ds1 = Dataset(sitools_url + "/webs_STEREO_dataset")
    ds1.display()

    # date__ob
    # Format must be something like 2015-11-01T00:00:00.000 in version Sitools2 3.0 that will change
    param_query1 = [
        [ds1.fields_dict['date_obs']],
        ['2010-06-01T00:00:00.000', '2010-06-02T00:00:00.000'],
        'DATE_BETWEEN']

    q1 = Query(param_query1)
    # Q2 = Query(param_query2)
    # Q3 = Query(param_query3)

    # Ask recnum, sunum,series_name,date__obs, ias_location,ias_path

    o1 = [
        ds1.fields_dict['filename'], ds1.fields_dict['filesize'],
        ds1.fields_dict['instrument'], ds1.fields_dict['date_obs'],
        ds1.fields_dict['secchisata'], ds1.fields_dict['secchisatb'],
        ds1.fields_dict['wavemin'], ds1.fields_dict['wavemax'],
        ds1.fields_dict['datatype'], ds1.fields_dict['download_path'],
        ds1.fields_dict['path'], ds1.fields_dict['id_sitools_view']
    ]

    # Sort date__obs ASC
    s1 = [[ds1.fields_dict['date_obs'], 'ASC']]

    #       for field in ds1.fields_dict :
    #               field.display()

    #        print "\nPrint Query  ..."
    q1.display()
    # Q2.display()
    # Q3.display()

    # result = ds1.search([q1], o1, s1)
    #    print(len(result), " item(s) found\n")
    result = ds1.search([q1], o1, s1, limit_to_nb_res_max=10)
    print(len(result), " item(s) found\n")
    if len(result) != 0:
        print("Results :\n")
        for i, data in enumerate(result):
            print("%d) %s" % (i + 1, data))

    print("Download just one STEREO data\nIn progress please wait ...")
    print("item : \n%s" % result[1])
    print("id : %s" % result[1]['id_sitools_view'])
    try:
        ds1.execute_plugin(
            plugin_name='pluginSTEREOtar',
            pkey_values_list=[result[1]['id_sitools_view']],
            filename='first_download_STEREO.tar'
        )
    except ValueError as e:
        print("Issue downloading id_sitools_view : %s " % result[1]['id_sitools_view'])
        print("args is: %s" % e.args)
        print("repr : %s" % e.__repr__())
    except Exception as e:
        print("Issue downloading id_sitools_view : %s " % result[1]['id_sitools_view'])
        print("Message : %s" % e.__repr__())

    else:
        print("Download id_sitools_view : %s ,file %s completed" % (result[1]['id_sitools_view'],
                                                                    'first_download_STEREO.tar'))

    print("Try to download with urlretrieve")
    print("item : \n%s" % result[2])
    filename_item = (result[2]['filename'].split("/"))[-1]
    try:
        urlretrieve(result[2]['download_path'], filename_item)
    except Exception as e:
        print("Issue downloading id_sitools_view : %s " % result[2]['id_sitools_view'])
        print("type is: %s" % e.args)
        print("Message : %s" % e.__repr__())
    else:
        print("Download id_sitools_view : %s , file %s completed" % (result[2]['id_sitools_view'], filename_item))
예제 #33
0
            warn('Failed to remove all object files')
        finally:
            _clean.run(self)


# make dependency directory
if not os.path.exists('deps'):
    os.mkdir('deps')

# download Eigen if we don't have it in deps
eigenurl = 'http://bitbucket.org/eigen/eigen/get/3.2.6.tar.gz'
eigentarpath = os.path.join('deps', 'Eigen.tar.gz')
eigenpath = os.path.join('deps', 'Eigen')
if not os.path.exists(eigenpath):
    print('Downloading Eigen...')
    urlretrieve(eigenurl, eigentarpath)
    with tarfile.open(eigentarpath, 'r') as tar:
        tar.extractall('deps')
    thedir = glob(os.path.join('deps', 'eigen-eigen-*'))[0]
    shutil.move(os.path.join(thedir, 'Eigen'), eigenpath)
    print('...done!')

# make a list of extension modules
extension_pathspec = os.path.join('pyhsmm', '**',
                                  '*.pyx')  # not recursive before Python 3.5
paths = [os.path.splitext(fp)[0] for fp in glob(extension_pathspec)]
names = ['.'.join(os.path.split(p)) for p in paths]
ext_modules = [
    Extension(name,
              sources=[path + '.cpp'],
              include_dirs=['deps'],
예제 #34
0
    def get_file(self, filename=None, target_dir=None, quiet=False, filetype=None, **kwds):
        """Use the get_file() to retrieve data from MEDOC server  

        Parameters
        ----------
        filename : str 
            Name of the file , rename the default name using that parameter 
        target_dir : str
            The directory in which you will retreive the data
            If it does not exist it will be created
        quiet : boolean
            Do not print info concerning the data downloaded if that is set to 
            'True'
        filetype : str
        Can be 'temp', 'em', 'width' or 'chi2'
        Type of file downloaded

        Returns 
        -------
        MEDOC files on your current or targer_dir directory  
        """
        url_dict = {'temp': self.temp_fits_rice_uri, 'em': self.em_fits_rice_uri, 'width': self.width_fits_rice_uri,
                    'chi2': self.chi2_fits_rice_uri}
        filename_dict = {}
        # Allow upper case entries
        for k, v in iteritems(kwds):
            if k not in ['FILENAME', 'TARGET_DIR', 'QUIET', 'TYPE']:
                raise ValueError(
                    "Error get_file():\n'%s' entry for the search function "
                    "is not allowed\n"
                    % k)
            elif k == 'FILENAME':
                filename = v
            elif k == 'TARGET_DIR':
                target_dir = v
            elif k == 'QUIET':
                quiet = v
            elif k == 'TYPE':
                filetype = v
        if filetype is not None and type(filetype).__name__ != 'list':
            raise ValueError("Error get_file():\nfiletype should be a list\n")
        if filename is None and filetype is None:
            #           if not specified this is the default name
            for value in url_dict.values():
                key = value.split("/")[-1]
                value = sitools2_url + value
                filename_dict[key] = value
        elif filename is None and filetype is not None:
            for type_spec in filetype:
                if type_spec not in url_dict.keys() and type_spec != 'all':
                    raise ValueError("Error get_file():\nfilename = %s entry for the get function is not allowed\n"
                                     "filetype value should be in list 'temp','em','width','chi2', 'all'\n" % filetype)
                elif type_spec == 'all':
                    for value in url_dict.values():
                        key = value.split("/")[-1]
                        value = sitools2_url + value
                        filename_dict[key] = value
                else:
                    key = url_dict[type_spec].split("/")[-1]
                    value = sitools2_url + url_dict[type_spec]
                    filename_dict[key] = value

        elif filename is not None and filetype is not None:
            raise ValueError(
                "Warning get_file():\nfilename :%s\nfiletype : %s \nfilename"
                " and filetype are both specified at the same time\nNot"
                " allowed please remove one\n" % (filename, filetype))

        elif filename is not None and type(filename).__name__ != 'dict':
            raise ValueError(
                "Error get_file():\nfilename should be a dictionary\n")
        else:
            for k, v in iteritems(filename):
                if k not in url_dict.keys():
                    raise ValueError(
                        "Error get_file():\nfiletype = %s entry for the get"
                        " function is not allowed\nfiletype value should be "
                        " in list 'temp','em','width','chi2'\n" % k)
                else:
                    key = filename[k]
                    value = sitools2_url + url_dict[k]
                    filename_dict[key] = value

        if target_dir is not None:
            if not path.isdir(target_dir):
                stdout.write(
                    "Error get_file():\n'%s' directory did not exist.\n"
                    "Creation directory in progress ..." % target_dir)
                mkdir(target_dir)
            if target_dir[-1].isalnum():
                target_dir = target_dir + '/'
            else:
                stdout.write(
                    "Error get_file():\nCheck the param target_dir, special "
                    "char %s at the end of target_dir is not allowed."
                    % target_dir[-1])
        else:
            target_dir = ""
        for (item, url) in iteritems(filename_dict):
            try:
                urlretrieve(url, "%s%s" % (target_dir, item))
            except HTTPError:
                stdout.write("Error downloading %s%s \n" % (target_dir, item))
            else:
                if not quiet:
                    stdout.write("Download file %s%s completed\n" % (target_dir, item))
                    stdout.flush()
예제 #35
0
파일: setup.py 프로젝트: chiaolun/pyhsmm
        except:
            warn('Failed to remove all object files')
        finally:
            _clean.run(self)

# make dependency directory
if not os.path.exists('deps'):
    os.mkdir('deps')

# download Eigen if we don't have it in deps
eigenurl = 'http://bitbucket.org/eigen/eigen/get/3.2.6.tar.gz'
eigentarpath = os.path.join('deps', 'Eigen.tar.gz')
eigenpath = os.path.join('deps', 'Eigen')
if not os.path.exists(eigenpath):
    print('Downloading Eigen...')
    urlretrieve(eigenurl, eigentarpath)
    with tarfile.open(eigentarpath, 'r') as tar:
        tar.extractall('deps')
    thedir = glob(os.path.join('deps', 'eigen-eigen-*'))[0]
    shutil.move(os.path.join(thedir, 'Eigen'), eigenpath)
    print('...done!')

# make a list of extension modules
extension_pathspec = os.path.join('pyhsmm','**','*.pyx')  # not recursive before Python 3.5
paths = [os.path.splitext(fp)[0] for fp in glob(extension_pathspec)]
names = ['.'.join(os.path.split(p)) for p in paths]
ext_modules = [
    Extension(
        name, sources=[path + '.cpp'],
        include_dirs=['deps'],
        extra_compile_args=['-O3','-std=c++11','-DNDEBUG','-w','-DHMM_TEMPS_ON_HEAP'])