예제 #1
0
파일: update.py 프로젝트: 4shadoww/usploit
def check_for_updates():
	try:
		print(colors.green+"checking for updates..."+colors.end)
		r = requests.get("https://api.github.com/repos/4shadoww/hakkuframework/releases/latest")
		if(r.ok):
			items = json.loads(r.text or r.content)
			rver = items['tag_name']

			if "beta" in rver and "alpha" in info.version:
				print(colors.green+"update found"+colors.end)
				return True 

			elif "beta" not in rver and "alpha" not in rver:
				if "beta" in info.version or "alpha" in info.version:
					print(colors.green+"update found"+colors.end)
					return True

			elif version.parse(rver) > version.parse(info.version):
				print(colors.green+"update found"+colors.end)
				return True

			else:
				print(colors.yellow+"updates not found"+colors.end)
				return False
		else:
			print("error")
	except Exception as error:
		print(colors.red+"error: "+str(error)+colors.end)
예제 #2
0
def check_manifest(module_path):
    """
        Verification de la version de geonature par rapport au manifest
        Retourne le code du module en majuscule
    """
    log.info("checking manifest")
    configs_py = utilstoml.load_and_validate_toml(
        str(Path(module_path) / "manifest.toml"), ManifestSchemaConf
    )

    gn_v = version.parse(GEONATURE_VERSION)
    if gn_v < version.parse(
        configs_py["min_geonature_version"]
    ) and gn_v > version.parse(configs_py["max_geonature_version"]):
        raise GeoNatureError(
            "Geonature version {} is imcompatible with module".format(GEONATURE_VERSION)
        )
    for e_gn_v in configs_py["exclude_geonature_versions"]:
        if gn_v == version.parse(e_gn_v):
            raise GeoNatureError(
                "Geonature version {} is imcompatible with module".format(
                    GEONATURE_VERSION
                )
            )
    log.info("...%s\n", MSG_OK)
    return configs_py["module_code"].upper()
예제 #3
0
파일: config.py 프로젝트: chenyilun95/PANet
def assert_and_infer_cfg(make_immutable=True):
    """Call this function in your script after you have finished setting all cfg
    values that are necessary (e.g., merging a config from a file, merging
    command line config options, etc.). By default, this function will also
    mark the global cfg as immutable to prevent changing the global cfg settings
    during script execution (which can lead to hard to debug errors or code
    that's harder to understand than is necessary).
    """
    if __C.MODEL.RPN_ONLY or __C.MODEL.FASTER_RCNN:
        __C.RPN.RPN_ON = True
    if __C.RPN.RPN_ON or __C.RETINANET.RETINANET_ON:
        __C.TEST.PRECOMPUTED_PROPOSALS = False
    if __C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS:
        assert __C.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, \
            "Path to the weight file must not be empty to load imagenet pertrained resnets."
    if set([__C.MRCNN.ROI_MASK_HEAD, __C.KRCNN.ROI_KEYPOINTS_HEAD]) & _SHARE_RES5_HEADS:
        __C.MODEL.SHARE_RES5 = True
    if version.parse(torch.__version__) < version.parse('0.4.0'):
        __C.PYTORCH_VERSION_LESS_THAN_040 = True
        # create alias for PyTorch version less than 0.4.0
        init.uniform_ = init.uniform
        init.normal_ = init.normal
        init.constant_ = init.constant
        nn.GroupNorm = mynn.GroupNorm
    if make_immutable:
        cfg.immutable(True)
예제 #4
0
    def updateCheck(self):
        ''' Check for update at pypi'''


        try:
            from packaging.version import parse

        except ImportError:
            print("Error importing packaging")
            from pip._vendor.packaging.version import parse


        req = requests.get(self.repo.format(package=self.pkg))

        avail_version = parse('0')
        if req.status_code == requests.codes.ok:
            j = json.loads(req.text.encode(req.encoding))
            if 'releases' in j:
                releases = j['releases']
                for release in releases:
                    ver = parse(release)
                    if not ver.is_prerelease:
                        avail_version = max(avail_version, ver)

        return str(avail_version)
예제 #5
0
파일: modules.py 프로젝트: annndrey/npui
	def __init__(self, old, new):
		if isinstance(old, str):
			old = parse(old)
		if isinstance(new, str):
			new = parse(new)
		self.old = old
		self.new = new
예제 #6
0
    def test_empty_specifier(self, version):
        spec = SpecifierSet(prereleases=True)

        assert version in spec
        assert spec.contains(version)
        assert parse(version) in spec
        assert spec.contains(parse(version))
예제 #7
0
파일: status.py 프로젝트: DMOJ/site
def version_matrix(request):
    matrix = defaultdict(partial(defaultdict, LatestList))
    latest = defaultdict(list)
    groups = defaultdict(list)

    judges = {judge.id: judge.name for judge in Judge.objects.filter(online=True)}
    languages = Language.objects.all()

    for runtime in RuntimeVersion.objects.filter(judge__online=True).order_by('priority'):
        if runtime.version:
            matrix[runtime.judge_id][runtime.language_id].append(runtime)

    for judge, data in six.iteritems(matrix):
        name_tuple = judges[judge].rpartition('.')
        groups[name_tuple[0] or name_tuple[-1]].append((judges[judge], data))

    matrix = {}
    for group, data in six.iteritems(groups):
        if len(data) == 1:
            judge, data = data[0]
            matrix[judge] = data
            continue

        ds = list(range(len(data)))
        size = [1] * len(data)
        for i, (p, x) in enumerate(data):
            if ds[i] != i:
                continue
            for j, (q, y) in enumerate(data):
                if i != j and compare_version_list(x, y):
                    ds[j] = i
                    size[i] += 1
                    size[j] = 0

        rep = max(range(len(data)), key=size.__getitem__)
        matrix[group] = data[rep][1]
        for i, (j, x) in enumerate(data):
            if ds[i] != rep:
                matrix[j] = x

    for data in six.itervalues(matrix):
        for language, versions in six.iteritems(data):
            versions.versions = [version.parse(runtime.version) for runtime in versions]
            if versions.versions > latest[language]:
                latest[language] = versions.versions

    for data in six.itervalues(matrix):
        for language, versions in six.iteritems(data):
            versions.is_latest = versions.versions == latest[language]

    languages = sorted(languages, key=lambda lang: version.parse(lang.name))
    return render(request, 'status/versions.html', {
        'title': _('Version matrix'),
        'judges': sorted(matrix.keys()),
        'languages': languages,
        'matrix': matrix,
    })
def find_rpm_packaging_pkg_version(pkg_project_spec):
    """get a spec.j2 template and get the version"""
    if os.path.exists(pkg_project_spec):
        with open(pkg_project_spec) as f:
            for l in f:
                m = re.search('^Version:\s*(?P<version>.*)\s*$', l)
                if m:
                    return version.parse(m.group('version'))
        # no version in spec found
        print('ERROR: no version in %s found' % pkg_project_spec)
        return version.parse('0')
    return version.parse('0')
예제 #9
0
def test_demo(preconfigured_testing_api, android_version):

    # Load necessary resources
    settings_ico = tm_engine.load_template("resources/settings_ico.png")

    # Create a dictionary with filters to find specific device
    device_spec = {"android_version": android_version}

    with preconfigured_testing_api(device_spec, device_manager_url) as api:
        api.adb("shell pm clear com.kms.free")
        # Launch an app without specifying Activity
        api.adb("shell monkey -p com.kms.free 1")

        # Use packaging.version to compare different versions
        if version.parse(android_version) >= version.parse("5.0"):
            api.tap(
                "next",
                config=dict(crop_x_range=(0.4, 0.6), crop_y_range=(0.7, 0.9)))

            # We need to allow permissions
            for i in range(0, 2):
                time.sleep(5)

                # Some special cases needs special configuration
                api.tap("allow",
                        config=dict(
                            ocr_postprocessing=filter_postprocessing,
                            api_tap_index=-1))

        # But most of cases are simple
        api.tap("accept and continue")
        api.wait_for("activation code")

        # Scroll down (move your "finger"" from the lowest point to the highest one)
        # Untill you find the text (maximum number of screens is adjustable)
        api.scroll_for("use free version", (0.5, 0.6), (0.5, 0.2))
        api.tap("use free version")
        api.tap("run the scan")

        api.tap(
            "OK",
            config=dict(
                api_tap_timeout=60 * 5,
                crop_x_range=(0.1, 0.9),
                crop_y_range=(0.4, 0.8)))
        api.tap("rate later")
        api.tap(settings_ico)
        api.tap("additional")

        # Some checkboxes should change their state
        with api.assert_screen_change():
            api.tap("get notifications about")
            api.tap("get sound")
예제 #10
0
    def ensure_model_compatibility(metadata):
        from packaging import version

        model_version = metadata.get("rasa_core", "0.0.0")
        if version.parse(model_version) < version.parse("0.10.0a3"):
            raise UnsupportedDialogueModelError(
                "The model version is to old to be "
                "loaded by this Rasa Core instance. "
                "Either retrain the model, or run with"
                "an older version. "
                "Model version: {} Instance version: {}"
                "".format(model_version, rasa_core.__version__))
예제 #11
0
def supported_resampling(method):
    if method == Resampling.gauss:
        return False
    gdal110plus_only = (
        Resampling.mode, Resampling.average)
    gdal2plus_only = (
        Resampling.max, Resampling.min, Resampling.med,
        Resampling.q1, Resampling.q3)
    version = parse(rasterio.__gdal_version__)
    if version < parse('1.10'):
        return method not in gdal2plus_only and method not in gdal110plus_only
    if version < parse('2.0'):
        return method not in gdal2plus_only
    return True
예제 #12
0
파일: trace.py 프로젝트: avcopan/meinsum
def einsum(*args, **kwargs):
    """Call optimized einsum if possible
    
    Args:
        *args: Arguments for numpy.einsum.
        **kwargs: Keyword arguments for numpy.einsum.

    Returns:
        The output of numpy.einsum(*args, **kwargs).
    """
    if version.parse(numpy.__version__) >= version.parse('1.12'):
        kwargs['optimize'] = True
        return numpy.einsum(*args, **kwargs)
    else:
        return numpy.einsum(*args, **kwargs)
예제 #13
0
파일: model.py 프로젝트: shiva16/rasa_nlu
    def ensure_model_compatibility(metadata, version_to_check=None):
        from packaging import version

        if version_to_check is None:
            version_to_check = MINIMUM_COMPATIBLE_VERSION

        model_version = metadata.get("rasa_nlu_version", "0.0.0")
        if version.parse(model_version) < version.parse(version_to_check):
            raise UnsupportedModelError(
                "The model version is to old to be "
                "loaded by this Rasa NLU instance. "
                "Either retrain the model, or run with"
                "an older version. "
                "Model version: {} Instance version: {}"
                "".format(model_version, rasa_nlu.__version__))
예제 #14
0
def latest_version(pkg_name, prerelease=False, silent=False):
    try:
        info = get_pkg_info(pkg_name, silent=silent)
    except ValueError:
        if silent:
            return None, None
        else:
            raise
    if not info:
        return None, None

    try:
        versions = [
            v for v in sorted(
                list(info['releases']),
                key=packaging_version.parse
            )
        ]
        if not prerelease:
            versions = [v for v in versions
                        if not packaging_version.parse(v).is_prerelease]
        version = versions[-1]
    except IndexError:
        return None, None

    return parse_version(version), version
예제 #15
0
    def do_update_own_boot_file(self, arg):
        """
        Update 'bootstrap_env/boot_bootstrap_env.py' via cookiecutter
        """
        # https://packaging.pypa.io/en/latest/version/
        parsed_bootstrap_env_version = parse(bootstrap_env_version)

        if parsed_bootstrap_env_version.is_prerelease:
            use_pre_release = "y"
        else:
            use_pre_release = "n"

        repro_path = Path(self.path_helper.base, "boot_source")

        # https://cookiecutter.readthedocs.io
        result = verbose_cookiecutter(
            template=str(repro_path),
            no_input=True,
            overwrite_if_exists=True,
            output_dir=str(self.path_helper.base.parent),
            extra_context={
                "_version": bootstrap_env_version,
                "use_pre_release": use_pre_release,
            },
        )
        print("\nbootstrap file created here: %s" % result)
예제 #16
0
def _context_py2rpmversion(context):
    """get a python PEP0440 compatible version and translate it to an RPM
    version"""
    # the context needs a variable set via {% set upstream_version = 'ver' %}
    _context_check_variable(context, CONTEXT_VAR_UPSTREAM_VERSION, "py2rpmversion")
    version = context.vars[CONTEXT_VAR_UPSTREAM_VERSION]
    v_python = parse(version)
    # fedora does not allow '~' in versions but uses a combination of Version
    # and Release
    # https://fedoraproject.org/wiki/Packaging:Versioning\#Pre-Release_packages
    if context["spec_style"] == "fedora":
        if len(v_python._version.release) >= 4:
            return "%d.%d.%d" % (v_python._version.release[0:3])
        else:
            return v_python.base_version
    else:
        v_rpm = v_python.public
        if v_python.is_prerelease:
            # we need to add the 'x' in front of alpha/beta releases because
            # in the python world, "1.1a10" > "1.1.dev10"
            # but in the rpm world, "1.1~a10" < "1.1~dev10"
            v_rpm = v_rpm.replace("a", "~xalpha")
            v_rpm = v_rpm.replace("b", "~xbeta")
            v_rpm = v_rpm.replace("rc", "~rc")
            v_rpm = v_rpm.replace(".dev", "~dev")
        return v_rpm
예제 #17
0
파일: simple.py 프로젝트: dstufft/warehouse
def simple_detail(project, request):
    # TODO: Handle files which are not hosted on PyPI

    # Make sure that we're using the normalized version of the URL.
    if project.normalized_name != request.matchdict.get(
        "name", project.normalized_name
    ):
        return HTTPMovedPermanently(
            request.current_route_path(name=project.normalized_name)
        )

    # Get the latest serial number for this project.
    request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)

    # Get all of the files for this project.
    files = sorted(
        request.db.query(File)
        .options(joinedload(File.release))
        .join(Release)
        .filter(Release.project == project)
        .all(),
        key=lambda f: (parse(f.release.version), f.filename),
    )

    return {"project": project, "files": files}
def _pretty_table(release, projects, include_obs):
    from prettytable import PrettyTable
    tb = PrettyTable()
    fn = ['name',
          'release (%s)' % release,
          'u-c (%s)' % release,
          'rpm packaging (%s)' % release,
          'reviews']
    if include_obs:
        fn += ['obs']
    fn += ['comment']
    tb.field_names = fn

    for p_name, x in projects.items():
        if x.rpm_packaging_pkg == version.parse('0'):
            comment = 'needs packaging'
        elif x.rpm_packaging_pkg < x.release:
            comment = 'needs upgrade'
        elif x.rpm_packaging_pkg == x.release:
            if x.release > x.upper_constraints:
                comment = 'needs downgrade (u-c)'
            comment = 'perfect'
        elif x.rpm_packaging_pkg > x.release:
            comment = 'needs downgrade'
        else:
            comment = ''
        row = [p_name, x.release, x.upper_constraints, x.rpm_packaging_pkg,
               x.reviews]
        if include_obs:
            row += [x.obs_published]
        row += [comment]

        tb.add_row(row)

    return tb
def _parse_version(version_str):
    # not sure how consistently these are installed, just try all of them
    try:
        from packaging import version
        return version.parse(version_str)
    except ImportError:
        pass

    try:
        from distutils.version import LooseVersion, StrictVersion
        try:
            return StrictVersion(version_str)
        except distutils.version.InvalidVersion:
            return LooseVersion(version_str)
    except ImportError:
        pass

    try:
        from setuptools import parse_version
        return parse_version(version_str)
    except ImportError:
        pass

    try:
        from pkg_resources import parse_version
        return parse_version(version_str)
    except ImportError:
        pass

    raise NotImplementedError(version_str)
예제 #20
0
def check_dependencies():
    """Verify dependant libraries/binaries are present with correct versions."""
    print('Checking library/binary dependencies')
    for (binary, binary_get_version, binary_present_re,
         binary_version_re, binary_minversion) in EXTERNAL_DEPENDENCIES:
        binary_args = [binary] + binary_get_version
        required_binary = 'required binary/library %s' % (
            ' '.join(binary_args))
        try:
            proc = subprocess.Popen(
                binary_args,
                stdin=mininet_test_util.DEVNULL,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,
                close_fds=True)
            proc_out, proc_err = proc.communicate()
            binary_output = proc_out.decode()
            if proc_err is not None:
                binary_output += proc_err.decode()
        except subprocess.CalledProcessError:
            # Might have run successfully, need to parse output
            pass
        except OSError:
            print('could not run %s' % required_binary)
            return False
        present_match = re.search(binary_present_re, binary_output)
        if not present_match:
            print('%s not present or did not return expected string %s (%s)' % (
                required_binary, binary_present_re, binary_output))
            return False
        if binary_version_re:
            version_match = re.search(binary_version_re, binary_output)
            if version_match is None:
                print('could not get version from %s (%s)' % (
                    required_binary, binary_output))
                return False
            try:
                binary_version = version_match.group(1)
            except ValueError:
                print('cannot parse version %s for %s' % (
                    version_match, required_binary))
                return False
            if version.parse(binary_version) < version.parse(binary_minversion):
                print('%s version %s is less than required version %s' % (
                    required_binary, binary_version, binary_minversion))
                return False
    return True
예제 #21
0
def _mac_version():
    from packaging.version import parse
    from platform import mac_ver
    ver = parse(mac_ver()[0])

    if parse("10.15") > ver >= parse("10.14"):
        return {"mac_version": "mojave"}
    if parse("10.14") > ver >= parse("10.13"):
        return {"mac_version": "highsierra"}
    if parse("10.13") > ver >= parse("10.12"):
        return {"mac_version": "sierra"}
    if parse("10.12") > ver >= parse("10.11"):
        return {"mac_version": "elcapitan"}
예제 #22
0
    def test_runner_uses_source_from_filename(self):
        doc = Document()
        source = nbformat.v4.new_notebook()
        result = {}
        def load(filename):
            handler = bahn.NotebookHandler(filename=filename)
            handler.modify_document(doc)
            result['handler'] = handler
            result['filename'] = filename
        with_script_contents(source, load)

        assert result['handler']._runner.path == result['filename']
        if version.parse(nbconvert.__version__) < version.parse("5.4"):
            assert result['handler']._runner.source == "\n# coding: utf-8\n"
        else:
            assert result['handler']._runner.source == "#!/usr/bin/env python\n# coding: utf-8\n"
        assert not doc.roots
    def test_storage_celery_save(self):
        """
        Make sure it actually works when using Celery as a task queue
        """
        storage = QueuedStorage(
            local='django.core.files.storage.FileSystemStorage',
            remote='django.core.files.storage.FileSystemStorage',
            local_options=dict(location=self.local_dir),
            remote_options=dict(location=self.remote_dir))

        field = models.TestModel._meta.get_field('testfile')
        field.storage = storage

        obj = models.TestModel()
        obj.testfile.save(self.test_file_name, File(self.test_file))
        obj.save()


        self.assertTrue(obj.testfile.storage.result.get())
        self.assertTrue(path.isfile(path.join(self.local_dir, obj.testfile.name)))
        self.assertTrue(
            path.isfile(path.join(self.remote_dir, obj.testfile.name)),
            "Remote file is not available.")
        self.assertFalse(storage.using_local(obj.testfile.name))
        self.assertTrue(storage.using_remote(obj.testfile.name))

        self.assertEqual(self.test_file_name,
                         storage.get_valid_name(self.test_file_name))
        self.assertEqual(self.test_file_name,
                         storage.get_available_name(self.test_file_name))

        subdir_path = os.path.join('test', self.test_file_name)
        self.assertTrue(storage.exists(subdir_path))
        self.assertEqual(storage.path(self.test_file_name),
                         path.join(self.local_dir, self.test_file_name))
        self.assertEqual(storage.listdir('test')[1], [self.test_file_name])
        self.assertEqual(storage.size(subdir_path),
                         os.stat(self.test_file_path).st_size)
        self.assertEqual(storage.url(self.test_file_name), self.test_file_name)

        if version.parse(DJANGO_VERSION) in SpecifierSet("<=2.0"):
            self.assertIsInstance(storage.accessed_time(subdir_path), datetime)
            self.assertIsInstance(storage.created_time(subdir_path), datetime)
            self.assertIsInstance(storage.modified_time(subdir_path), datetime)
        else:
            self.assertIsInstance(storage.get_accessed_time(subdir_path), datetime)
            self.assertIsInstance(storage.get_created_time(subdir_path), datetime)
            self.assertIsInstance(storage.get_modified_time(subdir_path), datetime)

        subdir_name = 'queued_storage_2.txt'
        testfile = storage.open(subdir_name, 'w')
        try:
            testfile.write('test')
        finally:
            testfile.close()
        self.assertTrue(storage.exists(subdir_name))
        storage.delete(subdir_name)
        self.assertFalse(storage.exists(subdir_name))
예제 #24
0
 def check(version):
     try:
         url_pattern = 'https://pypi.python.org/pypi/deepctr/json'
         req = requests.get(url_pattern)
         latest_version = parse('0')
         version = parse(version)
         if req.status_code == requests.codes.ok:
             j = json.loads(req.text.encode('utf-8'))
             releases = j.get('releases', [])
             for release in releases:
                 ver = parse(release)
                 if not ver.is_prerelease:
                     latest_version = max(latest_version, ver)
             if latest_version > version:
                 logging.warning('\nDeepCTR version {0} detected. Your version is {1}.\nUse `pip install -U deepctr` to upgrade.Changelog: https://github.com/shenweichen/DeepCTR/releases/tag/v{0}'.format(
                     latest_version, version))
     except Exception:
         return
예제 #25
0
파일: utils.py 프로젝트: roderik/bigchaindb
def tendermint_version_is_compatible(running_tm_ver):
    """
    Check Tendermint compatability with BigchainDB server

    :param running_tm_ver: Version number of the connected Tendermint instance
    :type running_tm_ver: str
    :return: True/False depending on the compatability with BigchainDB server
    :rtype: bool
    """

    # Splitting because version can look like this e.g. 0.22.8-40d6dc2e
    tm_ver = running_tm_ver.split('-')
    if not tm_ver:
        return False
    for ver in __tm_supported_versions__:
        if version.parse(ver) == version.parse(tm_ver[0]):
            return True
    return False
예제 #26
0
파일: suggestions.py 프로젝트: kiibohd/kll
 def show(self):
     '''
     Show relevant suggestions based on the requested versions
     '''
     sorted_keys = sorted([version.parse(suggestion) for suggestion in suggestions.keys() if version.parse(suggestion) > self.file_version])
     for key in sorted_keys:
         print("\033[1m === {} === \033[0m".format(key))
         for line in suggestions[str(key)]:
             print(line)
         print("")
예제 #27
0
 def _acceptable_interpreter_constraints(self):
   default_constraints = PythonSetup.global_instance().interpreter_constraints
   whitelisted_constraints = self.get_options().interpreter_constraints_whitelist
   # The user wants to lint everything.
   if whitelisted_constraints == []:
     return []
   # The user did not pass a whitelist option.
   elif whitelisted_constraints is None:
     whitelisted_constraints = ()
   return [version.parse(v) for v in default_constraints + whitelisted_constraints]
예제 #28
0
 def _constraints_are_whitelisted(self, constraint_tuple):
   """
   Detect whether a tuple of compatibility constraints
   matches constraints imposed by the merged list of the global
   constraints from PythonSetup and a user-supplied whitelist.
   """
   if self._acceptable_interpreter_constraints == []:
     # The user wants to lint everything.
     return True
   return all(version.parse(constraint) in self._acceptable_interpreter_constraints
          for constraint in constraint_tuple)
def find_openbuildservice_pkg_version(published_xml, pkg_name):
    """find the version in the openbuildservice published xml for the given
    pkg name"""
    import pymod2pkg
    import xml.etree.ElementTree as ET

    if published_xml and os.path.exists(published_xml):
        with open(published_xml) as f:
            tree = ET.fromstring(f.read())

        distro_pkg_name = pymod2pkg.module2package(pkg_name, 'suse')
        for child in tree:
            if not child.attrib['name'].startswith('_') and \
               child.attrib['name'].endswith('.rpm') and not \
               child.attrib['name'].endswith('.src.rpm'):
                (name, ver, release, epoch, arch) = _rpm_split_filename(
                    child.attrib['name'])
                if name == distro_pkg_name:
                    return version.parse(ver)
    return version.parse('0')
예제 #30
0
def validate_community(specs, instance):
  validation_result = {}
  status_text = ''
  status = ''
  text_result = ''
  try:
    validator = jsonschema.validators.validator_for(specs[instance['api']]['schema']) 
    validator.check_schema(specs[instance['api']]['schema'])
    v = validator(specs[instance['api']]['schema'])
    result = v.iter_errors(instance)
    has_error = False
    for error in sorted(result,key=str):
      if not has_error:
        text_result = '<ul>'
      has_error = True
      text_result = '%s<li>Error in %s: %s</li>' % (text_result, '->'.join(str(path) for path in error.path), error.message)

    if has_error:
      text_result = '%s</ul>' % (text_result)
      status = 'invalid'
      status_text = 'Invalid'
    elif version.parse(instance['api']) < version.parse('0.4.0'):
      status = 'warning'
      status_text = 'Warning'
      text_result = 'API version too old! You should upgrade your file'
    # TODO: Check lastchange date
    #elif instance['lastchange']:
    #  status = 'warning'
    #  status_text = 'Warning'
    #  text_result = 'No Update on API file for more than 2 month!'
    else:
      status = 'valid'
      status_text = 'Valid'

    validation_result['status_text'] = status_text
    validation_result['status'] = status
    validation_result['result'] = text_result
    return validation_result

  except KeyError as e:
    print('Invalid or unknown API version %s: %s' % (instance['api'], url))
예제 #31
0
 def __init__(self, dvc_dir):
     self.dvc_dir = dvc_dir
     self.updater_file = os.path.join(dvc_dir, self.UPDATER_FILE)
     self.lock = Lock(self.updater_file + ".lock",
                      tmp_dir=os.path.join(dvc_dir, "tmp"))
     self.current = version.parse(__version__).base_version
예제 #32
0
파일: dashboard.py 프로젝트: wjjmjh/optuna
from optuna.study import StudyDirection
import optuna.trial


with try_import() as _imports:
    from bokeh import __version__ as bokeh_version
    import bokeh.command.bootstrap
    import bokeh.document  # NOQA
    import bokeh.layouts
    import bokeh.models
    import bokeh.models.widgets
    import bokeh.plotting
    import bokeh.themes
    import tornado.gen

    if version.parse(bokeh_version) >= version.parse("2.0.0"):
        raise ImportError(
            "Your version of bokeh is " + bokeh_version + " . "
            "Please install bokeh version earlier than 2.0.0. "
            "Bokeh can be installed by executing `$ pip install 'bokeh<2.0.0'`. "
            "For further information, please refer to the installation guide of bokeh. ",
            name="bokeh",
        )


_mode = None  # type: Optional[str]
_study = None  # type: Optional[optuna.study.Study]

_HEADER_FORMAT = """
<style>
body {{
예제 #33
0
#!/usr/bin/env python
import colored_traceback.always

import os
import sys
import cv2
import scipy.io
import numpy as np
import itertools
import shutil
import threading
import time
import schedule
import tensorflow as tf
from packaging import version
assert version.parse(tf.__version__) > version.parse("1.0.0"), \
    "Tensorflow version >= 1.0.0 required"

from drl.config import parse_flags

FLAGS = parse_flags()

import gym
import gym_offroad_nav.envs

from drl.monitor import Monitor
# Show how each agent behaves in a seperate monitor thread
monitor = Monitor()
monitor.start()

from drl.ac.estimators import get_estimator
예제 #34
0
def main():
    """Create the model and start the evaluation process."""

    args = get_arguments()

    gpu0 = args.gpu

    if not os.path.exists(args.save):
        os.makedirs(args.save)

    if args.model == 'DeeplabMulti':
        model = DeeplabMulti(num_classes=args.num_classes)
    elif args.model == 'Oracle':
        model = Res_Deeplab(num_classes=args.num_classes)
        if args.restore_from == RESTORE_FROM:
            args.restore_from = RESTORE_FROM_ORC
    elif args.model == 'DeeplabVGG':
        model = DeeplabVGG(num_classes=args.num_classes)
        if args.restore_from == RESTORE_FROM:
            args.restore_from = RESTORE_FROM_VGG

    if args.restore_from[:4] == 'http':
        saved_state_dict = model_zoo.load_url(args.restore_from)
    else:
        saved_state_dict = torch.load(args.restore_from)
    ### for running different versions of pytorch
    model_dict = model.state_dict()
    saved_state_dict = {
        k: v
        for k, v in saved_state_dict.items() if k in model_dict
    }
    model_dict.update(saved_state_dict)
    ###
    model.load_state_dict(saved_state_dict)

    model.eval()
    model.cuda(gpu0)

    testloader = data.DataLoader(cityscapesDataSet(args.data_dir,
                                                   args.data_list,
                                                   crop_size=(1024, 512),
                                                   mean=IMG_MEAN,
                                                   scale=False,
                                                   mirror=False,
                                                   set=args.set),
                                 batch_size=1,
                                 shuffle=False,
                                 pin_memory=True)

    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(1024, 2048),
                             mode='bilinear',
                             align_corners=True)
    else:
        interp = nn.Upsample(size=(1024, 2048), mode='bilinear')

    for index, batch in enumerate(testloader):
        if index % 100 == 0:
            print('%d processd' % index)
        image, _, name = batch
        if args.model == 'DeeplabMulti':
            output1, output2 = model(Variable(image, volatile=True).cuda(gpu0))
            output = interp(output2).cpu().data[0].numpy()
        elif args.model == 'DeeplabVGG' or args.model == 'Oracle':
            output = model(Variable(image, volatile=True).cuda(gpu0))
            output = interp(output).cpu().data[0].numpy()

        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)

        output_col = colorize_mask(output)
        output = Image.fromarray(output)

        name = name[0].split('/')[-1]
        output.save('%s/%s' % (args.save, name))
        output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
예제 #35
0
파일: __init__.py 프로젝트: wzpy/plotoptix
logging.basicConfig(level=logging.WARN, format='[%(levelname)s] (%(threadName)-10s) %(message)s')

# import PlotOptiX modules ###############################################
from plotoptix.enums import *
from plotoptix.npoptix import NpOptiX
from plotoptix.tkoptix import TkOptiX

# check PlotOptiX updates ################################################
import json
import urllib.request
from packaging import version

try:
    url = "https://pypi.python.org/pypi/plotoptix/json"
    webURL = urllib.request.urlopen(url, timeout=3)
    data = webURL.read()
    encoding = webURL.info().get_content_charset('utf-8')
    data_dict = json.loads(data.decode(encoding))
    versions = list(data_dict["releases"].keys())
    versions.sort(key=version.parse)

    if version.parse(__version__) < version.parse(versions[-1]):
        print(80 * "*")
        print("PlotOptiX newer version is available:", versions[-1])
        print("to update your release use:")
        print("      pip install plotoptix --upgrade")
        print(80 * "*")

except: pass
#   See the License for the specific language governing permissions and
#   limitations under the License.

from cil.framework import ImageGeometry
from cil.optimisation.functions import L2NormSquared, L1Norm
import numpy as np
import matplotlib.pyplot as plt
from cil.utilities import dataexample
from cil.utilities import noise
import os
import sys
import unittest
import warnings
from cil.utilities.quality_measures import mse, mae, psnr
from packaging import version
if version.parse(np.version.version) >= version.parse("1.13"):
    try:
        from skimage import data, io, filters
        from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity
        has_skimage = True
    except ImportError as ie:
            has_skimage = False
else:
    has_skimage = False

class CCPiTestClass(unittest.TestCase):
        
    def assertNumpyArrayEqual(self, first, second):
        res = True
        try:
            numpy.testing.assert_array_equal(first, second)
예제 #37
0
    'scotland']

# Parametrized
WINDOW_COUNT = 3
WINDOW_LENGTH = 10
SAMPLE_LENGTH = WINDOW_LENGTH * S_RATE

######################################################

os.environ['WANDB_PROJECT'] = 'w2v_did'
os.environ['WANDB_LOG_MODEL'] = 'true'

if is_apex_available():
    pass

if version.parse(torch.__version__) >= version.parse("1.6"):
    _is_native_amp_available = True

logger = logging.getLogger(__name__)


def list_field(default=None, metadata=None):
    return field(default_factory=lambda: default, metadata=metadata)


@dataclass
class ModelArguments:
    """
    Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
    """
from __future__ import print_function
import os
import sys
import io
import datetime
from packaging import version
import numpy as np
import matplotlib.pyplot as plt
from math import pi
import tensorflow as tf

print(__doc__)

# Display tensorflow version
print("TensorFlow version: ", tf.version.VERSION)
assert version.parse(tf.version.VERSION).release[0] >= 2, \
    "This notebook requires TensorFlow 2.0 or above."


def train_step(x_input, y_input, a, ratio_learn):
    with tf.GradientTape() as tape:
        loss = tf.square(tf.math.multiply(x_input, a) - y_input)

    dloss_da = tf.gradients(loss, a)
    optimizer = tf.keras.optimizers.SGD(learning_rate=ratio_learn)
    optimizer.apply_gradients(zip(dloss_da, a))


# Create data
x_vals = np.random.normal(1, 0.1, 100)
y_vals = np.repeat(10., 100)
예제 #39
0
#source_encoding = 'utf-8'

# The master toctree document.
master_doc = 'contents'

# General information about the project.
project = 'scikit-learn'
copyright = '2007 - 2019, scikit-learn developers (BSD License)'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
parsed_version = parse(sklearn.__version__)
version = ".".join(parsed_version.base_version.split(".")[:2])
# The full version, including alpha/beta/rc tags.
# Removes post from release name
if parsed_version.is_postrelease:
    release = parsed_version.base_version
else:
    release = sklearn.__version__

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
예제 #40
0
    def __generate_custom_profile_devices_configuration(self, instance):
        '''
        template = '[ {% for net in networks %}' \
                '{"eth{{loop.index -1 }}": ' \
                '{"name": "eth{{loop.index -1}}",' \
                '"type" : "nic",'  \
                '"parent": "{{ net.intf_name }}",' \
                '"nictype": "bridged" ,' \
                '"hwaddr" : {{ net.mac }} }"
                '{% endfor %} ]'
        '''
        devices = {}
        template_value_bridge = '{"name":"%s","type":"nic","parent":"%s","nictype":"bridged"}'
        template_value_bridge_mac = '{"name":"%s","type":"nic","parent":"%s","nictype":"bridged","hwaddr":"%s"}'
        template_value_phy = '{"name":"%s","type":"nic","parent":"%s","nictype":"physical"}'
        template_value_macvlan = '{"name":"%s","type":"nic","parent":"%s","nictype":"macvlan"}'

        '''
        # Create tenant's storage pool
        # TODO: allow more storage backends
        lxc storage create $TENANT dir

        # Add a root disk to the tenant's profile
        lxc profile device add $TENANT root disk path=/ pool=$TENANT
        
        '''

        template_disk = '{"path":"%s","type":"disk","pool":"%s"}'

        template_key = '%s'
        template_key2 = "eth%d"
        for i, n in enumerate(instance.networks):
            if n.get('network_uuid') is not None:
                nws = self.agent.get_network_plugin(None).get(list(self.agent.get_network_plugin(None).keys())[0])
                # print(nws.getNetworkInfo(n.get('network_uuid')))
                br_name = nws.get_network_info(n.get('network_uuid')).get('virtual_device')
                # print(br_name)
                n.update({'br_name': br_name})
                if n.get('intf_name') is None:
                    n.update({'intf_name': "eth"+str(i)})
                #nw_k = str(template_key % n.get('intf_name'))
                nw_k = str(template_key2 % i)
                if n.get('mac') is not None:
                    nw_v = json.loads(str(template_value_bridge_mac % (n.get('intf_name'), n.get('br_name'), n.get('mac'))))
                else:
                    nw_v = json.loads(str(template_value_bridge % (n.get('intf_name'), n.get('br_name'))))

            elif self.agent.get_os_plugin().get_intf_type(n.get('br_name')) in ['ethernet']:
                #if n.get('')
                #cmd = "sudo ip link add name %s link %s type macvlan"
                #veth_name = str('veth-%s' % entity.uuid[:5])
                #cmd = str(cmd % (veth_name, n.get('br_name')))
                #self.agent.getOSPlugin().executeCommand(cmd, True)
                #nw_v = json.loads(str(template_value_phy % (n.get('intf_name'), veth_name)))
                nw_v = json.loads(str(template_value_macvlan % (n.get('intf_name'), n.get('br_name'))))
                #nw_k = str(template_key % n.get('intf_name'))
                nw_k = str(template_key2 % i)
                #self.agent.getOSPlugin().set_interface_unaviable(n.get('br_name'))
            elif self.agent.get_os_plugin().get_intf_type(n.get('br_name')) in ['wireless']:
                nw_v = json.loads(str(template_value_phy % (n.get('intf_name'), n.get('br_name'))))
                #nw_k = str(template_key % n.get('intf_name'))
                nw_k = str(template_key2 % i)
                self.agent.get_os_plugin().set_interface_unaviable(n.get('br_name'))
            else:
                if n.get('intf_name') is None:
                    n.update({'intf_name': "eth" + str(i)})
                #nw_k = str(template_key % n.get('intf_name'))
                nw_k = str(template_key2 % i)
                nw_v = json.loads(str(template_value_bridge % (n.get('intf_name'), n.get('br_name'))))

            devices.update({nw_k: nw_v})

        lxd_version = self.conn.host_info['environment']['server_version']
        if version.parse(lxd_version) >= version.parse("2.20"):
            if instance.storage is None or len(instance.storage) == 0:
                st_n = "root"
                st_v = json.loads(str(template_disk % ("/","default")))
                devices.update({st_n: st_v})
            else:
                for s in instance.storage:
                    st_n = s.get("name")
                    st_v = json.loads(str(template_disk % (s.get("path"), s.get("pool"))))
                    devices.update({st_n: st_v})

        #devices = Environment().from_string(template)
        #devices = devices.render(networks=entity.networks)
        return devices
예제 #41
0
class TestCreateUserTask(TestCase):
    """
    Tests of UserTaskStatus creation for new UserTasks.
    """
    def tearDown(self):
        super(TestCreateUserTask, self).tearDown()
        SIGNAL_DATA.clear()

    @classmethod
    def setUpTestData(cls):
        super(TestCreateUserTask, cls).setUpTestData()
        cls.user = User.objects.create_user('test_user', '*****@*****.**',
                                            'password')

    def test_create_user_task(self):
        """The create_user_task signal handler should create a new UserTaskStatus record"""
        self._create_user_task(eager=False)

    @override_settings(CELERY_ALWAYS_EAGER=True)
    def test_create_user_task_eager(self):
        """Eager tasks should still have UserTaskStatus records created on execution."""
        self._create_user_task(eager=True)
        assert SIGNAL_DATA['received_status'].state == UserTaskStatus.SUCCEEDED

    def test_create_group(self):
        """The create_user_task signal handler should correctly handle celery groups"""
        self._create_group(eager=False)

    @pytest.mark.skipif(
        CELERY_VERSION < version.parse('4.0'),
        reason=
        "celery 3.1 doesn't provide accurate group metadata to eager tasks")
    @override_settings(CELERY_ALWAYS_EAGER=True)
    def test_create_group_eager(self):
        """Eager groups should still have UserTaskStatus records created on execution."""
        self._create_group(eager=True)

    def test_create_chain(self):
        """The create_user_task signal handler should correctly handle celery chains."""
        self._create_chain(eager=False)

    @pytest.mark.skipif(
        CELERY_VERSION < version.parse('4.0'),
        reason=
        "celery 3.1 doesn't provide accurate chain metadata to eager tasks")
    @override_settings(CELERY_ALWAYS_EAGER=True)
    def test_create_chain_eager(self):
        """Eager chains should still have UserTaskStatus records created on execution."""
        self._create_chain(eager=True)

    def test_create_chord(self):
        """The create_user_task signal handler should correctly handle celery chords"""
        self._create_chord(eager=False)

    @pytest.mark.skipif(
        CELERY_VERSION < version.parse('4.0'),
        reason=
        "celery 3.1 doesn't provide accurate chord metadata to eager tasks")
    @override_settings(CELERY_ALWAYS_EAGER=True)
    def test_create_chord_eager(self):
        """Eager chords should still have UserTaskStatus records created on execution."""
        self._create_chord(eager=True)

    def test_create_chord_exclude_body(self):
        """If the body task of a chord is not a UserTask, it should be cleanly omitted from the status."""
        chord([
            sample_task.s(self.user.id, '1', user_task_name='Chord: 1 & 2'),
            sample_task.s(self.user.id,
                          '2',
                          user_task_name='I should be ignored')
        ])(normal_task.s('3'))
        assert UserTaskStatus.objects.count() == 4
        chord_status = UserTaskStatus.objects.get(task_class='celery.chord')
        assert chord_status.task_id
        assert chord_status.parent is None
        assert chord_status.is_container
        assert chord_status.name == 'Chord: 1 & 2'
        assert chord_status.total_steps == 2
        verify_state(chord_status, False)

        group_status = UserTaskStatus.objects.get(task_class='celery.group')
        assert group_status.task_id
        assert group_status.parent_id == chord_status.id
        assert group_status.is_container
        assert group_status.name == 'Chord: 1 & 2'
        assert group_status.total_steps == 2
        verify_state(group_status, False)

        header_tasks = UserTaskStatus.objects.filter(parent=group_status)
        assert len(header_tasks) == 2
        for status in header_tasks:
            assert status.task_id
            assert status.parent_id == group_status.id
            assert not status.is_container
            assert status.name in ['SampleTask: 1', 'SampleTask: 2']
            assert status.total_steps == 1
            verify_state(status, False)

    def test_missing_user_id(self):
        """Queueing of the task should fail if the user ID is not provided."""
        with pytest.raises(TypeError) as exc_info:
            missing_user_id.delay()
        assert str(
            exc_info.value
        ) == 'Each invocation of a UserTaskMixin subclass must include the user_id'

    def test_invalid_user_id(self):
        """Queueing of the task should fail if an invalid user ID is given."""
        with pytest.raises(TypeError) as exc_info:
            sample_task.delay('arg1', 'arg2')
        assert str(exc_info.value) == 'Invalid user_id: arg1'

    def test_non_user_task_publish(self):
        """Non-UserTask tasks should still pass through the before_task_publish handler cleanly."""
        normal_task.delay('Argument')
        statuses = UserTaskStatus.objects.all()
        assert not statuses

    @override_settings(CELERY_ALWAYS_EAGER=True, CELERY_IGNORE_RESULT=False)
    def test_non_user_task_success(self):
        """Non-UserTask tasks should still pass through start and success handlers cleanly."""
        result = normal_task.delay('Argument')
        assert result.get() == 'placeholder'
        statuses = UserTaskStatus.objects.all()
        assert not statuses

    def _create_user_task(self, eager):
        """Create a task based on UserTaskMixin and verify some assertions about its corresponding status."""
        result = sample_task.delay(self.user.id, 'Argument')
        statuses = UserTaskStatus.objects.all()
        assert len(statuses) == 1
        status = statuses[0]
        assert status.task_id == result.id
        assert status.task_class == 'test_signals.sample_task'
        assert status.user_id == self.user.id
        assert status.parent is None
        assert not status.is_container
        assert status.name == 'SampleTask: Argument'
        assert status.total_steps == 1
        verify_state(status, eager)

    def _create_chain(self, eager):
        """Create a celery chain and verify some assertions about the corresponding status records"""
        chain(
            sample_task.si(self.user.id, '1'),
            sample_task.si(self.user.id, '2', user_task_name='Chain: 1, 2, 3'),
            sample_task.si(self.user.id, '3'),
            normal_task.si('Argument')).delay()
        assert UserTaskStatus.objects.count() == 4
        chain_status = UserTaskStatus.objects.get(task_class='celery.chain')
        assert chain_status.task_id
        assert chain_status.parent is None
        assert chain_status.is_container
        assert chain_status.name == 'Chain: 1, 2, 3'
        assert chain_status.total_steps == 3
        verify_state(chain_status, eager)

        children = UserTaskStatus.objects.filter(parent=chain_status)
        assert len(children) == 3
        for status in children:
            assert not status.is_container
            assert status.name in [
                'SampleTask: 1', 'SampleTask: 2', 'SampleTask: 3'
            ]
            assert status.total_steps == 1
            verify_state(status, eager)

    def _create_chord(self, eager):
        """Create a celery chord and verify some assertions about the corresponding status records"""
        chord([
            sample_task.s(self.user.id, '1'),
            sample_task.s(self.user.id,
                          '2',
                          user_task_name='Chord: 1 & 2, then 3')
        ])(sample_task.s(self.user.id, '3'))
        assert UserTaskStatus.objects.count() == 5
        chord_status = UserTaskStatus.objects.get(task_class='celery.chord')
        assert chord_status.task_id
        assert chord_status.parent is None
        assert chord_status.is_container
        assert chord_status.name == 'Chord: 1 & 2, then 3'
        assert chord_status.total_steps == 3
        verify_state(chord_status, eager)

        group_status = UserTaskStatus.objects.get(task_class='celery.group')
        assert group_status.task_id
        assert group_status.parent_id == chord_status.id
        assert group_status.is_container
        assert group_status.name == 'Chord: 1 & 2, then 3'
        assert group_status.total_steps == 2
        verify_state(group_status, eager)

        header_tasks = UserTaskStatus.objects.filter(parent=group_status)
        assert len(header_tasks) == 2
        for status in header_tasks:
            assert status.task_id
            assert status.parent_id == group_status.id
            assert not status.is_container
            assert status.name in ['SampleTask: 1', 'SampleTask: 2']
            assert status.total_steps == 1
            verify_state(status, eager)

        body_status = UserTaskStatus.objects.get(parent=chord_status,
                                                 is_container=False)
        assert body_status.task_id
        assert body_status.name == 'SampleTask: 3'
        assert body_status.total_steps == 1
        verify_state(body_status, eager)

    def _create_group(self, eager):
        """Create a celery group and verify some assertions about the corresponding status records"""
        result = group(
            sample_task.s(self.user.id, '1'),
            sample_task.s(self.user.id, '2',
                          user_task_name='Group: 1, 2')).delay()
        assert UserTaskStatus.objects.count() == 3
        group_status = UserTaskStatus.objects.get(task_class='celery.group')
        assert group_status.task_id == result.id
        assert group_status.parent is None
        assert group_status.is_container
        assert group_status.name == 'Group: 1, 2'
        assert group_status.total_steps == 2
        verify_state(group_status, eager)

        assert len(result.children) == 2
        for result in result.children:
            task_id = result.id
            status = UserTaskStatus.objects.get(task_id=task_id)
            assert status.parent_id == group_status.id
            assert not status.is_container
            assert status.name in ['SampleTask: 1', 'SampleTask: 2']
            assert status.total_steps == 1
            verify_state(status, eager)
예제 #42
0
import mock
import pytest
from celery import __version__ as celery_version
from celery import chain, chord, group, shared_task
from packaging import version

from django.contrib.auth.models import User
from django.db import transaction
from django.test import TestCase, TransactionTestCase, override_settings

from user_tasks import user_task_stopped
from user_tasks.models import UserTaskStatus
from user_tasks.signals import start_user_task
from user_tasks.tasks import UserTask

CELERY_VERSION = version.parse(celery_version)
LOGGER = logging.getLogger(__name__)
USER_ID = 1


class SampleTask(UserTask):  # pylint: disable=abstract-method
    """
    Small UserTask subclass for use in test cases.
    """
    @classmethod
    def generate_name(cls, arguments_dict):
        return 'SampleTask: {}'.format(arguments_dict['argument'])

    @staticmethod
    def calculate_total_steps(arguments_dict):
        return arguments_dict['kwargs'].get(
class TestQualityMeasures(CCPiTestClass):
    
    def setUp(self):
        print ("SETUP", np.version.version)
        if has_skimage:

            id_coins = dataexample.CAMERA.get()

            id_coins_noisy = noise.gaussian(id_coins, var=0.05, seed=10)
            
            ig = id_coins.geometry.copy()
            dc1 = ig.allocate('random')
            dc2 = ig.allocate('random')

            self.dc1 = dc1
            self.dc2 = dc2
            self.id_coins = id_coins
            self.id_coins_noisy = id_coins_noisy

    @unittest.skipIf(version.parse(np.version.version) < version.parse("1.13"), "Skip test with numpy < 1.13")
    def test_mse1(self):
        if has_skimage:
            #%%  Check Mean Squared error for random image and images
            res1 = mse(self.id_coins, self.id_coins_noisy)
            res2 = mean_squared_error(self.id_coins.as_array(), self.id_coins_noisy.as_array())
            print('Check MSE for CAMERA image gaussian noise')
            np.testing.assert_almost_equal(res1, res2, decimal=5)
        else:
            self.skipTest("scikit0-image not present ... skipping")
    @unittest.skipIf(version.parse(np.version.version) < version.parse("1.13"), "Skip test with numpy < 1.13")
    def test_mse2(self):
        if has_skimage:
            #%%  Check Mean Squared error for random image and images

            res1 = mse(self.dc1, self.dc2)
            res2 = mean_squared_error(self.dc1.as_array(), self.dc2.as_array())
            print('Check MSE for random ImageData')
            np.testing.assert_almost_equal(res1, res2, decimal=5)

        else:
            self.skipTest("scikit0-image not present ... skipping")
    @unittest.skipIf(version.parse(np.version.version) < version.parse("1.13"), "Skip test with numpy < 1.13")
    def test_psnr1(self):
        if has_skimage:

            res1 = psnr(self.id_coins, self.id_coins_noisy, data_range = self.dc1.max())
            res2 = peak_signal_noise_ratio(self.id_coins.as_array(), self.id_coins_noisy.as_array())
            print('Check PSNR for CAMERA image gaussian noise')
            np.testing.assert_almost_equal(res1, res2, decimal=3)
        else:
            self.skipTest("scikit0-image not present ... skipping")
    @unittest.skipIf(version.parse(np.version.version) < version.parse("1.13"), "Skip test with numpy < 1.13")
    def test_psnr2(self):
        if has_skimage:

            res1 = psnr(self.dc1, self.dc2, data_range = self.dc1.max())
            res2 = peak_signal_noise_ratio(self.dc1.as_array(), self.dc2.as_array())
            print('Check PSNR for random ImageData')
            np.testing.assert_almost_equal(res1, res2, decimal=3)

        else:
            self.skipTest("scikit0-image not present ... skipping")
예제 #44
0
async def upgrade(settings: dict):
    """Perform upgradation steps."""
    context_builder = DefaultContextBuilder(settings)
    context = await context_builder.build_context()
    try:
        version_upgrade_config_inst = VersionUpgradeConfig(
            settings.get("upgrade.config_path"))
        upgrade_configs = version_upgrade_config_inst.upgrade_configs
        root_profile, public_did = await wallet_config(context)
        version_storage_record = None
        upgrade_to_version = f"v{__version__}"
        versions_found_in_config = upgrade_configs.keys()
        sorted_versions_found_in_config = sorted(
            versions_found_in_config, key=lambda x: package_version.parse(x))
        async with root_profile.session() as session:
            storage = session.inject(BaseStorage)
            try:
                version_storage_record = await storage.find_record(
                    type_filter=RECORD_TYPE_ACAPY_VERSION, tag_query={})
                upgrade_from_version = version_storage_record.value
                if "upgrade.from_version" in settings:
                    print((f"version {upgrade_from_version} found in storage"
                           ", --from-version will be ignored."))
            except StorageNotFoundError:
                if "upgrade.from_version" in settings:
                    upgrade_from_version = settings.get("upgrade.from_version")
                else:
                    upgrade_from_version = sorted_versions_found_in_config[-1]
                    print("No ACA-Py version found in wallet storage and "
                          "no --from-version specified. Selecting "
                          f"{upgrade_from_version} as --from-version from "
                          "the config.")
        if upgrade_from_version == upgrade_to_version:
            raise UpgradeError(
                f"Version {upgrade_from_version} to upgrade from and "
                f"current version to upgrade to {upgrade_to_version} "
                "are same.")
        if upgrade_from_version not in sorted_versions_found_in_config:
            raise UpgradeError(
                f"No upgrade configuration found for {upgrade_from_version}")
        upgrade_from_version_index = sorted_versions_found_in_config.index(
            upgrade_from_version)
        for config_from_version in sorted_versions_found_in_config[
                upgrade_from_version_index:]:
            print(f"Running upgrade process for {config_from_version}")
            upgrade_config = upgrade_configs.get(config_from_version)
            # Step 1 re-saving all BaseRecord and BaseExchangeRecord
            if "resave_records" in upgrade_config:
                resave_record_paths = upgrade_config.get("resave_records")
                for record_path in resave_record_paths:
                    try:
                        record_type = ClassLoader.load_class(record_path)
                    except ClassNotFoundError as err:
                        raise UpgradeError(
                            f"Unknown Record type {record_path}") from err
                    if not issubclass(record_type, BaseRecord):
                        raise UpgradeError(
                            f"Only BaseRecord can be resaved, found: {str(record_type)}"
                        )
                    async with root_profile.session() as session:
                        all_records = await record_type.query(session)
                        for record in all_records:
                            await record.save(
                                session,
                                reason=
                                "re-saving record during ACA-Py upgrade process",
                            )
                        if len(all_records) == 0:
                            print(f"No records of {str(record_type)} found")
                        else:
                            print(
                                f"All records of {str(record_type)} successfully re-saved"
                            )
            # Step 2 Update existing records, if required
            if ("update_existing_records" in upgrade_config
                    and upgrade_config.get("update_existing_records") is True):
                update_existing_recs_callable = (
                    version_upgrade_config_inst.get_update_existing_func(
                        config_from_version))
                if not update_existing_recs_callable:
                    raise UpgradeError("No update_existing_records function "
                                       f"specified for {config_from_version}")
                await update_existing_recs_callable(root_profile)
        # Update storage version
        async with root_profile.session() as session:
            storage = session.inject(BaseStorage)
            if not version_storage_record:
                await storage.add_record(
                    StorageRecord(
                        RECORD_TYPE_ACAPY_VERSION,
                        upgrade_to_version,
                    ))
            else:
                await storage.update_record(version_storage_record,
                                            upgrade_to_version, {})
        await root_profile.close()
    except BaseError as e:
        raise UpgradeError(f"Error during upgrade: {e}")
예제 #45
0
    def __init__(
        self,
        device,
        *,
        shots=1000,
        wires=None,
        active_reset=True,
        load_qc=True,
        readout_error=None,
        symmetrize_readout="exhaustive",
        calibrate_readout="plus-eig",
        **kwargs,
    ):
        pl_version = pkg_resources.get_distribution("pennylane").version
        if version.parse(pl_version) >= version.parse("0.14.0.dev"):
            raise ValueError(
                "Using the QPU via PennyLane-Forest is being deprecated \
                    with PennyLane version 0.14.0 and higher.")

        if readout_error is not None and load_qc:
            raise ValueError("Readout error cannot be set on the physical QPU")

        self.readout_error = readout_error

        self._eigs = {}

        self._compiled_program = None
        """Union[None, pyquil.ExecutableDesignator]: the latest compiled program. If parametric
        compilation is turned on, this will be a parametric program."""

        if kwargs.get("parametric_compilation", False):
            # Raise a warning if parametric compilation was explicitly turned on by the user
            # about turning the operator estimation off

            # TODO: Remove the warning and toggling once a migration to the new operator estimation
            # API has been executed. This new API provides compatibility between parametric
            # compilation and operator estimation.
            warnings.warn(
                "Parametric compilation is currently not supported with operator"
                "estimation. Operator estimation is being turned off.")

        self.parametric_compilation = kwargs.get("parametric_compilation",
                                                 True)

        if self.parametric_compilation:
            self._compiled_program_dict = {}
            """dict[int, pyquil.ExecutableDesignator]: stores circuit hashes associated
                with the corresponding compiled programs."""

            self._parameter_map = {}
            """dict[str, float]: stores the string of symbolic parameters associated with
                their numeric values. This map will be used to bind parameters in a parametric
                program using PyQuil."""

            self._parameter_reference_map = {}
            """dict[str, pyquil.quilatom.MemoryReference]: stores the string of symbolic
                parameters associated with their PyQuil memory references."""

        timeout = kwargs.pop("timeout", None)

        if shots <= 0:
            raise ValueError("Number of shots must be a positive integer.")

        self.connection = super()._get_connection(**kwargs)

        if load_qc:
            self.qc = get_qc(device, as_qvm=False, connection=self.connection)
            if timeout is not None:
                self.qc.compiler.quilc_client.timeout = timeout
        else:
            self.qc = get_qc(device, as_qvm=True, connection=self.connection)
            if timeout is not None:
                self.qc.compiler.client.timeout = timeout

        self.num_wires = len(self.qc.qubits())

        if wires is None:
            # infer the number of modes from the device specs
            # and use consecutive integer wire labels
            wires = range(self.num_wires)

        if isinstance(wires, int):
            raise ValueError(
                "Device has a fixed number of {} qubits. The wires argument can only be used "
                "to specify an iterable of wire labels.".format(
                    self.num_wires))

        if self.num_wires != len(wires):
            raise ValueError("Device has a fixed number of {} qubits and "
                             "cannot be created with {} wires.".format(
                                 self.num_wires, len(wires)))

        super(QVMDevice, self).__init__(wires, shots, **kwargs)

        self.active_reset = active_reset
        self.symmetrize_readout = symmetrize_readout
        self.calibrate_readout = calibrate_readout
        self.wiring = {i: q for i, q in enumerate(self.qc.qubits())}
예제 #46
0
 def _is_outdated(self):
     return version.parse(self.current) < version.parse(self.latest)
def _parse_strict_version(ctx, tag):
    ver = parse(tag)
    if isinstance(ver, Version):
        ctx.state[Version] = ver
        return str(ver)
예제 #48
0
파일: test_io.py 프로젝트: vshulyak/pandera
"""Unit tests for io module"""

import platform
import tempfile
from pathlib import Path
from packaging import version

import pandas as pd
import pytest
import yaml
import pandera as pa
from pandera import io

PYYAML_VERSION = version.parse(yaml.__version__)  # type: ignore


def _create_schema(index="single"):

    if index == "multi":
        index = pa.MultiIndex([
            pa.Index(pa.Int, name="int_index0"),
            pa.Index(pa.Int, name="int_index1"),
            pa.Index(pa.Int, name="int_index2"),
        ])
    elif index == "single":
        # make sure io modules can handle case when index name is None
        index = pa.Index(pa.Int, name=None)
    else:
        index = None

    return pa.DataFrameSchema(columns={
예제 #49
0
def torch_version() -> Tuple[int, ...]:
    result = version.parse(torch.__version__).release
    assert result
    return result
예제 #50
0
# -*- Blueprint setup -*-
from packaging.version import parse
from sanic import Blueprint
from sanic.response import html, json
from sanic.exceptions import NotFound

from utils.SiteHelper import JailbreakMap as jMap
from utils.SiteHelper import DeviceMapPG as dMap
from utils.SiteHelper import MinVersionMap as minMap
from utils.SiteHelper import MaxVersionMap as maxMap

HomeBP = Blueprint("HomeBP")
_tools14 = [
    x for x in jMap
    if parse(x.get('minIOS')) <= parse("14.0") <= parse(x.get('maxIOS'))
]
_tools13 = [
    x for x in jMap
    if parse(x.get('minIOS')) <= parse("13.0") <= parse(x.get('maxIOS'))
]
_tools12 = [
    x for x in jMap
    if parse(x.get('minIOS')) <= parse("12.0") <= parse(x.get('maxIOS'))
]
_tools11 = [
    x for x in jMap
    if parse(x.get('minIOS')) <= parse("11.0") <= parse(x.get('maxIOS'))
]
_tools10 = [
    x for x in jMap
    if parse(x.get('minIOS')) <= parse("10.0") <= parse(x.get('maxIOS'))
예제 #51
0
KCONFIG_OUTPUT = os.path.abspath(os.environ["KCONFIG_OUTPUT"])

# -- General configuration ------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.

# recommonmark didn't have a setup function before 0.5.0, so source_parsers
# must be added manually
recommonmark_version = pkg_resources.get_distribution("recommonmark").version
if version.parse(recommonmark_version) < version.parse('0.5.0'):
    extensions = ['sphinx.ext.intersphinx', 'breathe', 'sphinx.ext.ifconfig']
else:
    extensions = [
        'sphinx.ext.intersphinx', 'breathe', 'sphinx.ext.ifconfig',
        'recommonmark'
    ]

# Add any paths that contain templates here, relative to this directory.
#templates_path = ['../_templates']

# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']

if version.parse(recommonmark_version) < version.parse('0.5.0'):
예제 #52
0
# third party
from jinja2 import Template
from packaging import version

# this forces the import priority to use site-packages first and current dir last
# this allows us to import torch when calling this file directly since there is a
# subdir here also called torch
del sys.path[0]
sys.path.append("")
# third party
import torch as th  # noqa: E402

# syft absolute
from syft.lib.torch import allowlist  # noqa: E402

TORCH_VERSION = version.parse(th.__version__.split("+")[0])
py_ver = sys.version_info
PYTHON_VERSION = version.parse(f"{py_ver.major}.{py_ver.minor}")
OS_NAME = platform.system().lower()

# we need a file to keep all the errors in that makes it easy to debug failures
TARGET_PLATFORM = f"{PYTHON_VERSION}_{OS_NAME}"
REPORT_FILE_PATH = os.path.abspath(
    Path(__file__) / "../../../.." /
    f"allowlist_report_{TARGET_PLATFORM}.html")

report_path = os.path.abspath((Path(__file__) / "../../../.."))
support_files = glob.glob(
    os.path.join(report_path, "allowlist_test_support_*.jsonl"))

if len(support_files) < 1:
def run_example(topdir, generator, ci_environment, buildflags, recipe, example):

    # extract global menu
    menu_file = topdir / 'testing' / 'menu.yml'
    skip_global, expect_failure_global, env_global, definitions_global, targets_global = extract_menu_file(
        menu_file, generator, ci_environment)

    sys.stdout.write('\n  {}\n'.format(example))

    # extract local menu
    menu_file = recipe / example / 'menu.yml'
    skip_local, expect_failure_local, env_local, definitions_local, targets_local = extract_menu_file(
        menu_file, generator, ci_environment)

    skip = skip_global or skip_local
    expect_failure = expect_failure_global or expect_failure_local

    # local env vars override global ones
    env = env_global.copy()
    for entry in env_local:
        env[entry] = env_local[entry]

    # local definitions override global ones
    definitions = definitions_global.copy()
    for entry in definitions_local:
        definitions[entry] = definitions_local[entry]

    # Decide configuration from CMAKE_BUILD_TYPE, by default it's Debug
    configuration = definitions[
        'CMAKE_BUILD_TYPE'] if 'CMAKE_BUILD_TYPE' in definitions else 'Debug'

    # local targets extend global targets
    targets = targets_global + targets_local

    for entry in env:
        os.environ[entry] = env[entry]
    definitions_string = ' '.join(
        r'-D{0}="{1}"'.format(entry, os.path.expandvars(definitions[entry]))
        for entry in definitions)

    # we append a time stamp to the build directory
    # to avoid it being re-used when running tests multiple times
    # when debugging on a laptop
    time_stamp = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d-%H-%M-%S')
    build_directory = recipe / example / 'build-{0}'.format(time_stamp)
    cmakelists_path = recipe / example

    min_cmake_version = get_min_cmake_version(
        cmakelists_path / 'CMakeLists.txt')
    system_cmake_version = get_system_cmake_version()

    if version.parse(system_cmake_version) < version.parse(min_cmake_version):
        sys.stdout.write(
            '\nSKIPPING (system cmake version < min. cmake version for this recipe)\n'
        )
        return 0

    if skip:
        sys.stdout.write('\nSKIPPING recipe (based on menu.yml)\n')
        return 0

    return_code = 0

    custom_script = 'custom.sh'
    custom_script_path = cmakelists_path / custom_script
    if custom_script_path.exists():
        # if this directory contains a custom.sh script, we launch it
        step = custom_script
        command = 'bash "{0}" "{1}"'.format(custom_script_path, build_directory)
        return_code += run_command(
            step=step, command=command, expect_failure=expect_failure)
    else:
        # if there is no custom script, we run tests "normally"

        # configure step
        step = 'configuring'
        command = cmake_configuration_command(cmakelists_path, build_directory,
                                              generator, definitions_string)
        return_code += run_command(
            step=step, command=command, expect_failure=expect_failure)

        base_command = r'cmake --build "{0}"'.format(build_directory)

        # build step
        step = '{0} configuration {1}'.format('building', configuration)
        command = base_command + ' --config {0} -- {1}'.format(
            configuration, buildflags)
        return_code += run_command(
            step=step, command=command, expect_failure=expect_failure)

        # extra targets
        for target in targets:
            step = '{0} configuration {1}'.format(target, configuration)

            # on VS '--target test' fails but '--target RUN_TESTS' seems to work
            if generator.startswith('Visual Studio'):
                if target == 'test':
                    target = 'RUN_TESTS'

            command = base_command + ' --config {0} --target {1}'.format(
                configuration, target)
            return_code += run_command(
                step=step, command=command, expect_failure=expect_failure)

        # execute dashboard script, if it exists
        dashboard_script = 'dashboard.cmake'
        dashboard_script_path = cmakelists_path / dashboard_script
        if dashboard_script_path.exists():
            # if this directory contains a dashboard.cmake script, we launch it
            step = dashboard_script
            command = 'ctest -C {0} -S "{1}" -DCTEST_CMAKE_GENERATOR="{2}" {3}'.format(
                configuration, dashboard_script_path, generator,
                definitions_string)
            return_code += run_command(
                step=step, command=command, expect_failure=expect_failure)

    for entry in env:
        os.environ.pop(entry)

    return return_code
예제 #54
0
def get_release(releases_data):
    releases = list(
        filter(lambda x: x["prerelease"] is False and x["draft"] is False,
               releases_data))
    return None if not releases else sorted(
        releases, key=lambda x: version.parse(x["tag_name"])).pop()
예제 #55
0
def main():
    from transformers import __version__ as transformers_version
    if version.parse(transformers_version) < version.parse(
            "3.1.0"):  # past_key_values name does not exist in 3.0.2 or older
        raise RuntimeError("This tool requires transformers 3.1.0 or later.")

    args = parse_arguments()
    setup_logger(args.verbose)

    if args.tolerance == 0:
        args.tolerance = DEFAULT_TOLERANCE[args.precision]

    logger.info(f"Arguments:{args}")

    cache_dir = args.cache_dir
    output_dir = args.output if not args.output.endswith(
        ".onnx") else os.path.dirname(args.output)
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    if args.precision != Precision.FLOAT32:
        assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx"

    if args.precision == Precision.FLOAT16:
        assert args.use_gpu, "fp16 requires --use_gpu"

    if args.precision == Precision.INT8:
        assert not args.use_gpu, "quantization only supports CPU"

    if args.use_external_data_format:
        assert not args.output.endswith(
            '.onnx'
        ), "output shall be a directory for --use_external_data_format"

    model_class = MODEL_CLASSES[args.model_class][0]
    model_type = "beam_search_step" if args.model_class == "GPT2LMHeadModel_BeamSearchStep" else "default"
    gpt2helper = Gpt2HelperFactory.create_helper(model_type)
    gpt2tester = Gpt2TesterFactory.create_tester(model_type)
    config = AutoConfig.from_pretrained(args.model_name_or_path,
                                        cache_dir=cache_dir)
    if model_type == 'beam_search_step':
        model = model_class.from_pretrained(args.model_name_or_path,
                                            config=config,
                                            batch_size=args.batch_size,
                                            beam_size=args.beam_size,
                                            cache_dir=cache_dir)
    else:
        model = model_class.from_pretrained(args.model_name_or_path,
                                            config=config,
                                            cache_dir=cache_dir)

    device = torch.device("cuda:0" if args.use_gpu else "cpu")
    model.eval().to(device)

    if (not args.use_external_data_format) and (config.n_layer > 24):
        logger.info(f"Try --use_external_data_format when model size > 2GB")

    onnx_model_paths = gpt2helper.get_onnx_paths(
        output_dir,
        args.model_name_or_path,
        args.model_class,
        new_folder=args.use_external_data_format)

    raw_onnx_model = onnx_model_paths["raw"]

    logger.info(f"Exporting ONNX model to {raw_onnx_model}")
    use_padding = MODEL_CLASSES[args.model_class][2]
    gpt2helper.export_onnx(model,
                           device,
                           raw_onnx_model,
                           args.verbose,
                           args.use_external_data_format,
                           has_position_ids=use_padding,
                           has_attention_mask=use_padding)

    if args.optimize_onnx or args.precision != Precision.FLOAT32:
        output_path = onnx_model_paths[str(args.precision) if args.
                                       precision != Precision.INT8 else 'fp32']

        logger.info(f"Optimizing model to {output_path}")
        gpt2helper.optimize_onnx(raw_onnx_model, output_path,
                                 args.precision == Precision.FLOAT16,
                                 model.config.num_attention_heads,
                                 model.config.hidden_size,
                                 args.use_external_data_format)
    else:
        output_path = raw_onnx_model

    if args.precision == Precision.INT8:
        logger.info("quantizing model...")
        QuantizeHelper.quantize_onnx_model(output_path,
                                           onnx_model_paths['int8'],
                                           args.use_external_data_format)
        model = QuantizeHelper.quantize_torch_model(model)
        logger.info("finished quantizing model")
        output_path = onnx_model_paths['int8']

    if args.output.endswith(
            '.onnx'
    ) and output_path != args.output and not args.use_external_data_format:
        import shutil
        shutil.move(output_path, args.output)
        output_path = args.output

    logger.info(f"Output path: {output_path}")

    session = create_onnxruntime_session(output_path,
                                         args.use_gpu,
                                         enable_all_optimization=True,
                                         verbose=args.verbose)
    if session is not None:
        gpt2helper.test_parity(session,
                               model,
                               device,
                               args.precision == Precision.FLOAT16,
                               rtol=args.tolerance,
                               atol=args.tolerance,
                               model_class=args.model_class,
                               has_position_ids=use_padding,
                               has_attention_mask=use_padding)

    if args.input_test_file:
        test_inputs = []
        # Each line of test file is a JSON string like:
        # {"input_ids": [[14698, 257, 1310, 13688, 319, 326]]}
        with open(args.input_test_file) as read_f:
            for _, line in enumerate(read_f):
                line = line.rstrip()
                data = json.loads(line)
                input_ids = torch.from_numpy(
                    numpy.asarray(data["input_ids"],
                                  dtype=numpy.int64)).to(device)

                if use_padding:
                    if "attention_mask" in data:
                        numpy_float = numpy.float16 if args.precision == Precision.FLOAT16 else numpy.float32
                        attention_mask = torch.from_numpy(
                            numpy.asarray(data["attention_mask"],
                                          dtype=numpy_float)).to(device)
                    else:
                        padding = -1
                        attention_mask = (
                            input_ids !=
                            padding).type(torch.float16 if args.precision ==
                                          Precision.FLOAT16 else torch.float32)
                        input_ids.masked_fill_(input_ids == padding, 0)

                    if "position_ids" in data:
                        position_ids = torch.from_numpy(
                            numpy.asarray(data["position_ids"],
                                          dtype=numpy.int64)).to(device)
                    else:
                        position_ids = (attention_mask.long().cumsum(-1) - 1)
                        position_ids.masked_fill_(position_ids < 0, 0)

                    inputs = {
                        "input_ids": input_ids,
                        "position_ids": position_ids,
                        "attention_mask": attention_mask
                    }
                else:
                    inputs = {"input_ids": input_ids}

                if model_type == "beam_search_step":
                    beam_select_idx = torch.zeros([1,
                                                   input_ids.shape[0]]).long()

                    input_log_probs = torch.zeros([input_ids.shape[0], 1])
                    input_unfinished_sents = torch.ones(
                        [input_ids.shape[0], 1], dtype=torch.bool)
                    inputs.update({
                        "beam_select_idx":
                        beam_select_idx,
                        "input_log_probs":
                        input_log_probs,
                        "input_unfinished_sents":
                        input_unfinished_sents,
                    })

                test_inputs.append(inputs)

        gpt2tester.test_generation(session,
                                   model,
                                   device,
                                   test_inputs,
                                   precision=args.precision,
                                   model_class=args.model_class,
                                   top_k=20,
                                   top_k_no_order=True,
                                   max_steps=24,
                                   max_inputs=0,
                                   verbose=args.verbose,
                                   save_test_data=3,
                                   save_test_data_dir=Path(output_path).parent)

    logger.info(f"Done. Output model: {output_path}")
예제 #56
0
#   See the License for the specific language governing permissions and
#   limitations under the License.

import matplotlib
import numpy as np
import pandas as pd
import pymc3 as pm
import theano.tensor as tt
import pytest
import theano
from pymc3.theanof import floatX
from packaging import version

from .helpers import SeededTest

if version.parse(matplotlib.__version__) < version.parse('3.3'):
    matplotlib.use('Agg', warn=False)
else:
    matplotlib.use('Agg')


def get_city_data():
    """Helper to get city data"""
    data = pd.read_csv(pm.get_data('srrs2.dat'))
    cty_data = pd.read_csv(pm.get_data('cty.dat'))

    data = data[data.state == 'MN']

    data['fips'] = data.stfips * 1000 + data.cntyfips
    cty_data['fips'] = cty_data.stfips * 1000 + cty_data.ctfips
    data['lradon'] = np.log(np.where(data.activity == 0, .1, data.activity))
예제 #57
0
    def train(self, model_path: Optional[str] = None):
        """
        Main training entry point.

        Args:
            model_path:
                (Optional) Local path to model if model to train has been instantiated from a local path
                If present, we will try reloading the optimizer/scheduler states from there.
        """
        train_dataloader = self.get_train_dataloader()
        if self.args.max_steps > 0:
            t_total = self.args.max_steps
            num_train_epochs = (self.args.max_steps //
                                (len(train_dataloader) //
                                 self.args.gradient_accumulation_steps) + 1)
        else:
            t_total = int(
                len(train_dataloader) //
                self.args.gradient_accumulation_steps *
                self.args.num_train_epochs)
            num_train_epochs = self.args.num_train_epochs

        optimizer, scheduler = self.get_optimizers(num_training_steps=t_total)

        # Check if saved optimizer or scheduler states exist
        if (model_path is not None
                and os.path.isfile(os.path.join(model_path, "optimizer.pt"))
                and os.path.isfile(os.path.join(model_path, "scheduler.pt"))):
            # Load in optimizer and scheduler states
            optimizer.load_state_dict(
                torch.load(os.path.join(model_path, "optimizer.pt"),
                           map_location=self.args.device))
            scheduler.load_state_dict(
                torch.load(os.path.join(model_path, "scheduler.pt")))

        model = self.model
        if self.args.fp16:
            if not is_apex_available():
                raise ImportError(
                    "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
                )
            model, optimizer = amp.initialize(
                model, optimizer, opt_level=self.args.fp16_opt_level)

        # multi-gpu training (should be after apex fp16 initialization)
        if self.args.n_gpu > 1:
            model = torch.nn.DataParallel(model)

        # Distributed training (should be after apex fp16 initialization)
        if self.args.local_rank != -1:
            model = torch.nn.parallel.DistributedDataParallel(
                model,
                device_ids=[self.args.local_rank],
                output_device=self.args.local_rank,
                find_unused_parameters=True,
            )

        if self.tb_writer is not None:
            self.tb_writer.add_text("args", self.args.to_json_string())
            self.tb_writer.add_hparams(self.args.to_sanitized_dict(),
                                       metric_dict={})

        # Train!
        if is_torch_tpu_available():
            total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size(
            )
        else:
            total_train_batch_size = (self.args.train_batch_size *
                                      self.args.gradient_accumulation_steps *
                                      (torch.distributed.get_world_size()
                                       if self.args.local_rank != -1 else 1))
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", self.num_examples(train_dataloader))
        logger.info("  Num Epochs = %d", num_train_epochs)
        logger.info("  Instantaneous batch size per device = %d",
                    self.args.per_device_train_batch_size)
        logger.info(
            "  Total train batch size (w. parallel, distributed & accumulation) = %d",
            total_train_batch_size)
        logger.info("  Gradient Accumulation steps = %d",
                    self.args.gradient_accumulation_steps)
        logger.info("  Total optimization steps = %d", t_total)

        self.global_step = 0
        self.epoch = 0
        epochs_trained = 0
        steps_trained_in_current_epoch = 0
        # Check if continuing training from a checkpoint
        if model_path is not None:
            # set global_step to global_step of last saved checkpoint from model path
            try:
                self.global_step = int(model_path.split("-")[-1].split("/")[0])
                epochs_trained = self.global_step // (
                    len(train_dataloader) //
                    self.args.gradient_accumulation_steps)
                steps_trained_in_current_epoch = self.global_step % (
                    len(train_dataloader) //
                    self.args.gradient_accumulation_steps)

                logger.info(
                    "  Continuing training from checkpoint, will skip to saved global_step"
                )
                logger.info("  Continuing training from epoch %d",
                            epochs_trained)
                logger.info("  Continuing training from global step %d",
                            self.global_step)
                logger.info(
                    "  Will skip the first %d steps in the first epoch",
                    steps_trained_in_current_epoch)
            except ValueError:
                self.global_step = 0
                logger.info("  Starting fine-tuning.")

        tr_loss = 0.0
        logging_loss = 0.0
        model.zero_grad()
        train_iterator = trange(epochs_trained,
                                int(num_train_epochs),
                                desc="Epoch",
                                disable=not self.is_local_master())
        for epoch in train_iterator:
            if isinstance(train_dataloader, DataLoader) and isinstance(
                    train_dataloader.sampler, DistributedSampler):
                train_dataloader.sampler.set_epoch(epoch)

            if is_torch_tpu_available():
                parallel_loader = pl.ParallelLoader(
                    train_dataloader,
                    [self.args.device]).per_device_loader(self.args.device)
                epoch_iterator = tqdm(parallel_loader,
                                      desc="Iteration",
                                      disable=not self.is_local_master())
            else:
                epoch_iterator = tqdm(train_dataloader,
                                      desc="Iteration",
                                      disable=not self.is_local_master())

            for step, inputs in enumerate(epoch_iterator):

                # Skip past any already trained steps if resuming training
                if steps_trained_in_current_epoch > 0:
                    steps_trained_in_current_epoch -= 1
                    continue

                tr_loss += self._training_step(model, inputs, optimizer)

                if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
                        # last step in epoch but step is always smaller than gradient_accumulation_steps
                        len(epoch_iterator) <=
                        self.args.gradient_accumulation_steps and
                    (step + 1) == len(epoch_iterator)):
                    if self.args.fp16:
                        torch.nn.utils.clip_grad_norm_(
                            amp.master_params(optimizer),
                            self.args.max_grad_norm)
                    else:
                        torch.nn.utils.clip_grad_norm_(model.parameters(),
                                                       self.args.max_grad_norm)

                    if is_torch_tpu_available():
                        xm.optimizer_step(optimizer)
                    else:
                        optimizer.step()

                    scheduler.step()
                    model.zero_grad()
                    self.global_step += 1
                    self.epoch = epoch + (step + 1) / len(epoch_iterator)

                    if (self.args.logging_steps > 0
                            and self.global_step % self.args.logging_steps
                            == 0) or (self.global_step == 1
                                      and self.args.logging_first_step):
                        logs: Dict[str, float] = {}
                        logs["loss"] = (tr_loss -
                                        logging_loss) / self.args.logging_steps
                        # backward compatibility for pytorch schedulers
                        logs["learning_rate"] = (
                            scheduler.get_last_lr()[0]
                            if version.parse(torch.__version__) >=
                            version.parse("1.4") else scheduler.get_lr()[0])
                        logging_loss = tr_loss

                        self._log(logs)

                        if self.args.evaluate_during_training:
                            self.evaluate()

                    if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
                        # In all cases (even distributed/parallel), self.model is always a reference
                        # to the model we want to save.
                        if hasattr(model, "module"):
                            assert model.module is self.model
                        else:
                            assert model is self.model
                        # Save model checkpoint
                        output_dir = os.path.join(
                            self.args.output_dir,
                            f"{PREFIX_CHECKPOINT_DIR}-{self.global_step}")

                        self.save_model(output_dir)

                        if self.is_world_master():
                            self._rotate_checkpoints()

                        if is_torch_tpu_available():
                            xm.rendezvous("saving_optimizer_states")
                            xm.save(optimizer.state_dict(),
                                    os.path.join(output_dir, "optimizer.pt"))
                            xm.save(scheduler.state_dict(),
                                    os.path.join(output_dir, "scheduler.pt"))
                        elif self.is_world_master():
                            torch.save(
                                optimizer.state_dict(),
                                os.path.join(output_dir, "optimizer.pt"))
                            torch.save(
                                scheduler.state_dict(),
                                os.path.join(output_dir, "scheduler.pt"))

                if self.args.max_steps > 0 and self.global_step > self.args.max_steps:
                    epoch_iterator.close()
                    break
            if self.args.max_steps > 0 and self.global_step > self.args.max_steps:
                train_iterator.close()
                break
            if self.args.tpu_metrics_debug:
                # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
                xm.master_print(met.metrics_report())

        if self.tb_writer:
            self.tb_writer.close()

        logger.info(
            "\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n"
        )
        return TrainOutput(self.global_step, tr_loss / self.global_step)
예제 #58
0
def main(argv=None,
         experiment_name="",
         run_id=0,
         csv_filename="gpt2_parity_results.csv"):
    result = {}
    from transformers import __version__ as transformers_version
    if version.parse(transformers_version) < version.parse(
            "3.1.0"):  # past_key_values name does not exist in 3.0.2 or older
        raise RuntimeError("This tool requires transformers 3.1.0 or later.")

    args = parse_arguments(argv)
    setup_logger(args.verbose)

    if not experiment_name:
        import sys
        experiment_name = " ".join(argv if argv else sys.argv[1:])

    if args.tolerance == 0:
        args.tolerance = DEFAULT_TOLERANCE[args.precision]

    logger.info(f"Arguments:{args}")

    cache_dir = args.cache_dir
    output_dir = args.output if not args.output.endswith(
        ".onnx") else os.path.dirname(args.output)
    prepare_environment(cache_dir, output_dir, args.use_gpu)

    if args.precision != Precision.FLOAT32:
        assert args.optimize_onnx, "fp16/int8 requires --optimize_onnx"

    if args.precision == Precision.FLOAT16:
        assert args.use_gpu, "fp16 requires --use_gpu"

    if args.precision == Precision.INT8:
        assert not args.use_gpu, "quantization only supports CPU"

    if args.use_external_data_format:
        assert not args.output.endswith(
            '.onnx'
        ), "output shall be a directory for --use_external_data_format"

    model_class = MODEL_CLASSES[args.model_class][0]
    use_padding = MODEL_CLASSES[args.model_class][2]

    if args.model_class == "GPT2LMHeadModel_BeamSearchStep":
        model_type = "beam_search_step"
    elif args.model_class == "GPT2LMHeadModel_ConfigurableOneStepSearch":
        model_type = "configurable_one_step_search"
    else:
        model_type = "default"

    gpt2helper = Gpt2HelperFactory.create_helper(model_type)
    gpt2tester = Gpt2TesterFactory.create_tester(model_type)
    config = AutoConfig.from_pretrained(args.model_name_or_path,
                                        cache_dir=cache_dir)
    if model_type == 'beam_search_step':
        model = model_class.from_pretrained(args.model_name_or_path,
                                            config=config,
                                            batch_size=1,
                                            beam_size=args.beam_size,
                                            cache_dir=cache_dir)
    elif model_type == 'configurable_one_step_search':
        model = model_class.from_pretrained(
            args.model_name_or_path,
            config=config,
            batch_size=1,
            beam_size=args.beam_size,
            ignore_eos=args.ignore_eos,
            temperature=args.temperature,
            repetition_penalty=args.repetition_penalty,
            excluded_token_ids=args.excluded_token_ids,
            length_penalty=args.length_penalty,
            do_sample=args.do_sample,
            do_sample_top_p=args.do_sample_top_p,
            do_sample_top_k=args.do_sample_top_k,
            cache_dir=cache_dir)
    else:
        model = model_class.from_pretrained(args.model_name_or_path,
                                            config=config,
                                            cache_dir=cache_dir)

    device = torch.device("cuda:0" if args.use_gpu else "cpu")
    model.eval().to(device)

    if (not args.use_external_data_format) and (config.n_layer > 24):
        logger.info(f"Try --use_external_data_format when model size > 2GB")

    onnx_model_paths = gpt2helper.get_onnx_paths(
        output_dir,
        args.model_name_or_path,
        args.model_class,
        new_folder=args.use_external_data_format,
        remove_existing=[
            "fp32", "fp16", "int8"
        ])  # Do not remove raw model to save time in parity test

    raw_onnx_model = onnx_model_paths["raw"]

    if os.path.exists(raw_onnx_model):
        logger.warning(
            f"Skip exporting ONNX model since it existed: {raw_onnx_model}")
    else:
        logger.info(f"Exporting ONNX model to {raw_onnx_model}")
        gpt2helper.export_onnx(model,
                               device,
                               raw_onnx_model,
                               args.verbose,
                               args.use_external_data_format,
                               has_position_ids=use_padding,
                               has_attention_mask=use_padding)

    fp16_params = {"keep_io_types": args.keep_io_types}
    if args.io_block_list:
        fp16_params["keep_io_types"] = args.io_block_list
    if args.node_block_list:
        fp16_params["node_block_list"] = args.node_block_list
    if args.op_block_list:
        fp16_params["op_block_list"] = args.op_block_list
    if args.force_fp16_initializers:
        fp16_params["force_fp16_initializers"] = args.force_fp16_initializers

    is_io_float16 = (args.precision == Precision.FLOAT16
                     and not args.keep_io_types)

    if args.optimize_onnx or args.precision != Precision.FLOAT32:
        output_path = onnx_model_paths[str(args.precision) if args.
                                       precision != Precision.INT8 else 'fp32']

        logger.info(f"Optimizing model to {output_path}")
        gpt2helper.optimize_onnx(raw_onnx_model, output_path,
                                 args.precision == Precision.FLOAT16,
                                 model.config.num_attention_heads,
                                 model.config.hidden_size,
                                 args.use_external_data_format, **fp16_params)
    else:
        output_path = raw_onnx_model

    if args.precision == Precision.INT8:
        logger.info("quantizing model...")
        QuantizeHelper.quantize_onnx_model(output_path,
                                           onnx_model_paths['int8'],
                                           args.use_external_data_format)
        model = QuantizeHelper.quantize_torch_model(model)
        logger.info("finished quantizing model")
        output_path = onnx_model_paths['int8']

    if args.output.endswith(
            '.onnx'
    ) and output_path != args.output and not args.use_external_data_format:
        import shutil
        shutil.move(output_path, args.output)
        output_path = args.output

    logger.info(f"Output path: {output_path}")
    model_size_in_MB = int(
        get_onnx_model_size(output_path, args.use_external_data_format) /
        1024 / 1024)

    session = create_onnxruntime_session(output_path,
                                         args.use_gpu,
                                         enable_all_optimization=True,
                                         verbose=args.verbose)
    if args.model_class == "GPT2LMHeadModel" and session is not None:
        parity_result = gpt2helper.test_parity(
            session,
            model,
            device,
            is_io_float16,
            rtol=args.tolerance,
            atol=args.tolerance,
            model_class=args.model_class,
            has_position_ids=use_padding,
            has_attention_mask=use_padding,
            test_cases_per_run=args.test_cases,
            total_runs=args.test_runs,
            verbose=args.verbose)

        latency = gpt2helper.test_performance(session,
                                              model,
                                              device,
                                              is_io_float16,
                                              total_runs=100,
                                              use_io_binding=True,
                                              model_class=args.model_class,
                                              has_position_ids=use_padding,
                                              has_attention_mask=use_padding,
                                              batch_size=8,
                                              sequence_length=1,
                                              past_sequence_length=32)

        if args.precision == Precision.FLOAT16:
            logger.info(f"fp16 conversion parameters:{fp16_params}")

        # Write results to file
        import csv
        from onnxruntime import __version__ as ort_version
        latency_name = get_latency_name()
        csv_file_existed = os.path.exists(csv_filename)
        with open(csv_filename, mode="a", newline='') as csv_file:
            column_names = [
                "experiment", "run_id", "model_name", "model_class", "gpu",
                "precision", "optimizer", "test_cases", "runs",
                "keep_io_types", "io_block_list", "op_block_list",
                "node_block_list", "force_fp16_initializers",
                "ORT_TRANSFORMER_OPTIONS", "ORT_CUDA_GEMM_OPTIONS",
                "onnxruntime", latency_name, "top1_match_rate",
                "onnx_size_in_MB", "diff_50_percentile", "diff_90_percentile",
                "diff_95_percentile", "diff_99_percentile", "diff_pass_rate",
                "nan_rate", "top1_match_rate_per_run"
            ]
            csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
            if not csv_file_existed:
                csv_writer.writeheader()
            row = {
                "experiment": experiment_name,
                "run_id": run_id,
                "model_name": args.model_name_or_path,
                "model_class": args.model_class,
                "gpu": args.use_gpu,
                "precision": args.precision,
                "optimizer": args.optimize_onnx,
                "test_cases": args.test_cases,
                "runs": args.test_runs,
                "keep_io_types": args.keep_io_types,
                "io_block_list": args.io_block_list,
                "op_block_list": args.op_block_list,
                "node_block_list": args.node_block_list,
                "force_fp16_initializers": args.force_fp16_initializers,
                "ORT_TRANSFORMER_OPTIONS":
                os.getenv('ORT_TRANSFORMER_OPTIONS'),
                "ORT_CUDA_GEMM_OPTIONS": os.getenv('ORT_CUDA_GEMM_OPTIONS'),
                "onnxruntime": ort_version,
                latency_name: f"{latency:.2f}",
                "diff_50_percentile": parity_result["max_diff_percentile_50"],
                "diff_90_percentile": parity_result["max_diff_percentile_90"],
                "diff_95_percentile": parity_result["max_diff_percentile_95"],
                "diff_99_percentile": parity_result["max_diff_percentile_99"],
                "diff_pass_rate": parity_result["diff_pass_rate"],
                "nan_rate": parity_result["nan_rate"],
                "top1_match_rate": parity_result["top1_match_rate"],
                "top1_match_rate_per_run":
                parity_result["top1_match_rate_per_run"],
                "onnx_size_in_MB": "{}".format(model_size_in_MB),
            }
            logger.info(f"result: {row}")
            result.update(row)
            csv_writer.writerow(row)

    if args.input_test_file:
        test_inputs = []
        # Each line of test file is a JSON string like:
        # {"input_ids": [[14698, 257, 1310, 13688, 319, 326]]}
        with open(args.input_test_file) as read_f:
            for _, line in enumerate(read_f):
                line = line.rstrip()
                data = json.loads(line)
                input_ids = torch.from_numpy(
                    numpy.asarray(data["input_ids"],
                                  dtype=numpy.int64)).to(device)

                if use_padding:
                    if "attention_mask" in data:
                        numpy_float = numpy.float16 if is_io_float16 else numpy.float32
                        attention_mask = torch.from_numpy(
                            numpy.asarray(data["attention_mask"],
                                          dtype=numpy_float)).to(device)
                    else:
                        padding = -1
                        attention_mask = (input_ids != padding).type(
                            torch.float16 if is_io_float16 else torch.float32)
                        input_ids.masked_fill_(input_ids == padding, 0)

                    if "position_ids" in data:
                        position_ids = torch.from_numpy(
                            numpy.asarray(data["position_ids"],
                                          dtype=numpy.int64)).to(device)
                    else:
                        position_ids = (attention_mask.long().cumsum(-1) - 1)
                        position_ids.masked_fill_(position_ids < 0, 0)

                    inputs = {
                        "input_ids": input_ids,
                        "position_ids": position_ids,
                        "attention_mask": attention_mask
                    }
                else:
                    inputs = {"input_ids": input_ids}

                if model_type == "beam_search_step" or model_type == "configurable_one_step_search":
                    beam_select_idx = torch.zeros([1,
                                                   input_ids.shape[0]]).long()

                    input_log_probs = torch.zeros([input_ids.shape[0], 1])
                    input_unfinished_sents = torch.ones(
                        [input_ids.shape[0], 1], dtype=torch.bool)
                    inputs.update({
                        "beam_select_idx":
                        beam_select_idx,
                        "input_log_probs":
                        input_log_probs,
                        "input_unfinished_sents":
                        input_unfinished_sents,
                    })

                test_inputs.append(inputs)

        gpt2tester.test_generation(session,
                                   model,
                                   device,
                                   test_inputs,
                                   precision=args.precision,
                                   model_class=args.model_class,
                                   top_k=20,
                                   top_k_no_order=True,
                                   max_steps=24,
                                   max_inputs=0,
                                   verbose=args.verbose,
                                   save_test_data=3,
                                   save_test_data_dir=Path(output_path).parent)

    logger.info(f"Done. Output model: {output_path}")
    return result
예제 #59
0
def torch_version_check():
    from packaging import version

    return version.parse(torch.__version__) > version.parse("1.4.0")
예제 #60
0
    def fit(cls,
            train_data,
            label,
            tuning_data=None,
            time_limits=None,
            output_directory='./ag_text',
            feature_columns=None,
            holdout_frac=None,
            eval_metric=None,
            stopping_metric=None,
            nthreads_per_trial=None,
            ngpus_per_trial=None,
            dist_ip_addrs=None,
            num_trials=None,
            search_strategy=None,
            search_options=None,
            scheduler_options=None,
            hyperparameters=None,
            plot_results=None,
            seed=None,
            verbosity=2):
        """Fit models to make predictions based on text inputs.

        Parameters
        ----------
        train_data : :class:`autogluon.task.tabular_prediction.TabularDataset` or `pandas.DataFrame`
            Training dataset where rows = individual training examples, columns = features.
        label : str
            Name of the label column. It can be a stringBy default, we will search for a column named
        tuning_data : :class:`autogluon.task.tabular_prediction.TabularDataset` or `pandas.DataFrame`, default = None
            Another dataset containing validation data reserved for hyperparameter tuning (in same format as training data).
            If `tuning_data = None`, `fit()` will automatically hold out random examples from `train_data` for validation.
        time_limits : int or str, default = None
            Approximately how long `fit()` should run for (wallclock time in seconds if int).
            String values may instead be used to specify time in different units such as: '1min' or '1hour'.
            Longer `time_limits` will usually improve predictive accuracy.
            If not specified, `fit()` will run until all models to try by default have completed training.
        output_directory : str, default = './ag_text'
            Path to directory where models and intermediate outputs should be saved.
        feature_columns : List[str], default = None
            Which columns of table to consider as predictive features (other columns will be ignored, except for label-column).
            If None (by default), all columns of table are considered predictive features.
        holdout_frac : float, default = None
            Fraction of train_data to holdout as tuning data for optimizing hyperparameters (ignored unless `tuning_data = None`).
            If None, default value is selected based on the number of training examples.
        eval_metric : str, default = None
            The evaluation metric that will be used to evaluate the model's predictive performance.
            If None, an appropriate default metric will be selected (accuracy for classification, mean-squared-error for regression).
            Options for classification include: 'acc' (accuracy), 'nll' (negative log-likelihood).
            Additional options for binary classification include: 'f1' (F1 score), 'mcc' (Matthews coefficient), 'auc' (area under ROC curve).
            Options for regression include: 'mse' (mean squared error), 'rmse' (root mean squared error), 'mae' (mean absolute error).
        stopping_metric, default = None
            Metric which iteratively-trained models use to early stop to avoid overfitting.
            Defaults to `eval_metric` value (if None).
            Options are identical to options for `eval_metric`.
        nthreads_per_trial, default = None
            The number of threads per individual model training run. By default, all available CPUs are used.
        ngpus_per_trial, default = None
            The number of GPUs to use per individual model training run. If unspecified, a default value is chosen based on total number of GPUs available.
        dist_ip_addrs, default = None
            List of IP addresses corresponding to remote workers, in order to leverage distributed computation.
        num_trials : , default = None
            The number of trials in the HPO search
        search_strategy : str, default = None
            Which hyperparameter search algorithm to use. Options include:
            'random' (random search), 'bayesopt' (Gaussian process Bayesian optimization),
            'skopt' (SKopt Bayesian optimization), 'grid' (grid search),
            'hyperband' (Hyperband scheduling with random search), 'bayesopt-hyperband'
            (Hyperband scheduling with GP-BO search).
            If unspecified, the default is 'random'.
        search_options : dict, default = None
            Options passed to searcher.
        scheduler_options : dict, default = None
            Additional kwargs passed to scheduler __init__.
        hyperparameters : dict, default = None
            Determines the hyperparameters used by the models. Each hyperparameter may be either fixed value or search space of many values.
            For example of default hyperparameters, see: `autogluon.task.text_prediction.text_prediction.default()`
        plot_results : bool, default = None
            Whether or not to plot intermediate training results during `fit()`.
        seed : int, default = None
            Seed value for random state used inside `fit()`. 
        verbosity : int, default = 2
            Verbosity levels range from 0 to 4 and control how much information is printed
            during fit().
            Higher levels correspond to more detailed print statements
            (you can set verbosity = 0 to suppress warnings).
            If using logging, you can alternatively control amount of information printed
            via `logger.setLevel(L)`,
            where `L` ranges from 0 to 50 (Note: higher values of `L` correspond to fewer print
            statements, opposite of verbosity levels)

        Returns
        -------
        model
            A `BertForTextPredictionBasic` object that can be used for making predictions on new data.
        """
        assert dist_ip_addrs is None, 'Training on remote machine is currently not supported.'
        # Version check of MXNet
        if version.parse(mxnet.__version__) < version.parse('1.7.0') \
                or version.parse(mxnet.__version__) >= version.parse('2.0.0'):
            raise ImportError(
                'You will need to ensure that you have mxnet>=1.7.0, <2.0.0. '
                'For more information about how to install mxnet, you can refer to '
                'https://sxjscience.github.io/KDD2020/ .')

        if verbosity < 0:
            verbosity = 0
        elif verbosity > 4:
            verbosity = 4
        console_log = verbosity >= 2
        logging_config(folder=output_directory,
                       name='ag_text_prediction',
                       logger=logger,
                       level=verbosity2loglevel(verbosity),
                       console=console_log)
        # Parse the hyper-parameters
        if hyperparameters is None:
            hyperparameters = ag_text_prediction_params.create('default')
        elif isinstance(hyperparameters, str):
            hyperparameters = ag_text_prediction_params.create(hyperparameters)
        else:
            base_params = ag_text_prediction_params.create('default')
            hyperparameters = merge_params(base_params, hyperparameters)
        if seed is not None:
            hyperparameters['seed'] = seed
        np.random.seed(hyperparameters['seed'])
        if not isinstance(train_data, pd.DataFrame):
            train_data = load_pd.load(train_data)
        # Inference the label
        if not isinstance(label, list):
            label = [label]
        label_columns = []
        for ele in label:
            if isinstance(ele, int):
                label_columns.append(train_data.columns[ele])
            else:
                label_columns.append(ele)
        if feature_columns is None:
            all_columns = list(train_data.columns)
            feature_columns = [
                ele for ele in all_columns if ele not in label_columns
            ]
        else:
            if isinstance(feature_columns, str):
                feature_columns = [feature_columns]
            for col in feature_columns:
                assert col not in label_columns, 'Feature columns and label columns cannot overlap.'
                assert col in train_data.columns,\
                    'Feature columns must be in the pandas dataframe! Received col = "{}", ' \
                    'all columns = "{}"'.format(col, train_data.columns)
            all_columns = feature_columns + label_columns
            all_columns = [
                ele for ele in train_data.columns if ele in all_columns
            ]
        if tuning_data is None:
            if holdout_frac is None:
                holdout_frac = default_holdout_frac(len(train_data), True)
            train_data, tuning_data = random_split_train_val(
                train_data, valid_ratio=holdout_frac)

        else:
            if not isinstance(tuning_data, pd.DataFrame):
                tuning_data = load_pd.load(tuning_data)
        train_data = train_data[all_columns]
        tuning_data = tuning_data[all_columns]
        column_properties = get_column_properties(
            pd.concat([train_data, tuning_data]),
            metadata=None,
            label_columns=label_columns,
            provided_column_properties=None,
            categorical_default_handle_missing_value=True)

        train_data = TabularDataset(train_data,
                                    column_properties=column_properties,
                                    label_columns=label_columns)
        tuning_data = TabularDataset(
            tuning_data,
            column_properties=train_data.column_properties,
            label_columns=label_columns)

        logger.info('Train Dataset:')
        logger.info(train_data)
        logger.info('Tuning Dataset:')
        logger.info(tuning_data)
        logger.debug('Hyperparameters:')
        logger.debug(hyperparameters)
        has_text_column = False
        for k, v in column_properties.items():
            if v.type == _C.TEXT:
                has_text_column = True
                break
        if not has_text_column:
            raise NotImplementedError('No Text Column is found! This is currently not supported by '
                                      'the TextPrediction task. You may try to use '
                                      'TabularPrediction.fit().\n' \
                                      'The inferred column properties of the training data is {}'
                                      .format(train_data))
        problem_types = []
        label_shapes = []
        for label_col_name in label_columns:
            problem_type, label_shape = infer_problem_type(
                column_properties=column_properties,
                label_col_name=label_col_name)
            problem_types.append(problem_type)
            label_shapes.append(label_shape)
        logging.info(
            'Label columns={}, Feature columns={}, Problem types={}, Label shapes={}'
            .format(label_columns, feature_columns, problem_types,
                    label_shapes))
        eval_metric, stopping_metric, log_metrics =\
            infer_eval_stop_log_metrics(problem_types[0],
                                        label_shapes[0],
                                        eval_metric=eval_metric,
                                        stopping_metric=stopping_metric)
        logging.info('Eval Metric={}, Stop Metric={}, Log Metrics={}'.format(
            eval_metric, stopping_metric, log_metrics))
        model_candidates = []
        for model_type, kwargs in hyperparameters['models'].items():
            search_space = kwargs['search_space']
            if model_type == 'BertForTextPredictionBasic':
                model = BertForTextPredictionBasic(
                    column_properties=column_properties,
                    label_columns=label_columns,
                    feature_columns=feature_columns,
                    label_shapes=label_shapes,
                    problem_types=problem_types,
                    stopping_metric=stopping_metric,
                    log_metrics=log_metrics,
                    base_config=None,
                    search_space=search_space,
                    output_directory=output_directory,
                    logger=logger)
                model_candidates.append(model)
            else:
                raise ValueError(
                    'model_type = "{}" is not supported. You can try to use '
                    'model_type = "BertForTextPredictionBasic"'.format(
                        model_type))
        assert len(
            model_candidates) == 1, 'Only one model is supported currently'
        recommended_resource = get_recommended_resource(
            nthreads_per_trial=nthreads_per_trial,
            ngpus_per_trial=ngpus_per_trial)
        if search_strategy is None:
            search_strategy = hyperparameters['hpo_params']['search_strategy']
        if time_limits is None:
            time_limits = hyperparameters['hpo_params']['time_limits']
        else:
            if isinstance(time_limits, str):
                if time_limits.endswith('min'):
                    time_limits = int(float(time_limits[:-3]) * 60)
                elif time_limits.endswith('hour'):
                    time_limits = int(float(time_limits[:-4]) * 60 * 60)
                else:
                    raise ValueError(
                        'The given time_limits="{}" cannot be parsed!'.format(
                            time_limits))
        if num_trials is None:
            num_trials = hyperparameters['hpo_params']['num_trials']
        if scheduler_options is None:
            scheduler_options = hyperparameters['hpo_params'][
                'scheduler_options']
            if scheduler_options is None:
                scheduler_options = dict()
        if search_strategy.endswith('hyperband'):
            # Specific defaults for hyperband scheduling
            scheduler_options['reduction_factor'] = scheduler_options.get(
                'reduction_factor', 4)
            scheduler_options['grace_period'] = scheduler_options.get(
                'grace_period', 10)
            scheduler_options['max_t'] = scheduler_options.get('max_t', 50)

        if recommended_resource['num_gpus'] == 0:
            warnings.warn(
                'Recommend to use GPU to run the TextPrediction task!')
        model = model_candidates[0]
        if plot_results is None:
            if in_ipynb():
                plot_results = True
            else:
                plot_results = False
        model.train(train_data=train_data,
                    tuning_data=tuning_data,
                    resource=recommended_resource,
                    time_limits=time_limits,
                    search_strategy=search_strategy,
                    search_options=search_options,
                    scheduler_options=scheduler_options,
                    num_trials=num_trials,
                    plot_results=plot_results,
                    console_log=verbosity >= 2,
                    ignore_warning=verbosity <= 2)
        return model