def publish_pypi(target, args): """ Build and publish the target on a pypi repository """ config = Configuration(args) assert config.has_pypi_auth(), "Missing PyPi authentication" # Build the project setup = target.check_path("setup.py") logger.info(f"Building Python project using {setup}") sandbox.run_setup(setup, ["clean", "sdist", "bdist_wheel"]) # Check some files were produced dist = target.check_path("dist") build = glob.glob(f"{dist}/*") assert len(build) > 0, "No built files found" logger.info("Will upload {}".format(", ".join(map(os.path.basename, build)))) # Use default repository repository = args.repository or DEFAULT_REPOSITORY logger.info(f"Will upload on {repository}") # Upload it through twine upload_settings = Settings( username=config.pypi["username"], password=config.pypi["password"], repository_url=repository, verbose=True, disable_progress_bar=False, ) twine_upload(upload_settings, build) logger.info("PyPi publication finished.")
def _upload(self, **kwargs): """ Upload to pypi """ self.log.info(f"Using pypi config from {self.pypirc_path}") if not self.dry_run: twine_upload(self.twine_settings, [self.dist_path])
def pypi_upload(args): print('Uploading to PyPi') try: twine_upload([ 'dist/docker_compose-{}*.whl'.format(args.release), 'dist/docker-compose-{}*.tar.gz'.format(args.release) ]) except HTTPError as e: if e.response.status_code == 400 and 'File already exists' in e.message: if not args.finalize_resume: raise ScriptError( 'Package already uploaded on PyPi.' ) print('Skipping PyPi upload - package already uploaded') else: raise ScriptError('Unexpected HTTP error uploading package to PyPi: {}'.format(e))
def upload(dist_dir): """Upload a given component to pypi The pypi username and password must either be specified in a ~/.pypirc file or in environment variables PYPI_USER and PYPI_PASS """ if 'PYPI_USER' in os.environ and 'PYPI_PASS' in os.environ: pypi_user = os.environ['PYPI_USER'] pypi_pass = os.environ['PYPI_PASS'] else: pypi_user = None pypi_pass = None print("No PYPI user information in environment") packages = glob.glob(dist_dir) # Invoke upload this way since subprocess call of twine cli has cross platform issues twine_upload(packages, 'pypi', False, None, pypi_user, pypi_pass, None, None, '~/.pypirc', False, None, None, None)
def finalize(args): try: repository = Repository(REPO_ROOT, args.repo) img_manager = ImageManager(args.release) pr_data = repository.find_release_pr(args.release) if not pr_data: raise ScriptError('No PR found for {}'.format(args.release)) if not check_pr_mergeable(pr_data): raise ScriptError( 'Can not finalize release with an unmergeable PR') if not img_manager.check_images(args.release): raise ScriptError('Missing release image') br_name = branch_name(args.release) if not repository.branch_exists(br_name): raise ScriptError('No local branch exists for this release.') gh_release = repository.find_release(args.release) if not gh_release: raise ScriptError('No Github release draft for this version') repository.checkout_branch(br_name) pypandoc.convert_file(os.path.join(REPO_ROOT, 'README.md'), 'rst', outputfile=os.path.join(REPO_ROOT, 'README.rst')) run_setup(os.path.join(REPO_ROOT, 'setup.py'), script_args=['sdist', 'bdist_wheel']) merge_status = pr_data.merge() if not merge_status.merged: raise ScriptError('Unable to merge PR #{}: {}'.format( pr_data.number, merge_status.message)) print('Uploading to PyPi') twine_upload(['dist/*']) img_manager.push_images() repository.publish_release(gh_release) except ScriptError as e: print(e) return 1 return 0
def upload(self, noop: bool, verbose: bool, skip_existing: bool, **additional_kwargs) -> bool: """ Upload artifact to repository using Twine. For known repositories (like PyPI), the web URLs of successfully uploaded packages will be displayed. :param noop: Do not apply any changes.. :param verbose: Show verbose output for Twine. :param skip_existing: Continue uploading files if one already exists. (May not work, check your repository for support.) :raises ImproperConfigurationError: The upload failed due to a configuration error. :returns True if successfull, False otherwise. """ addon_kwargs = { "non_interactive": True, "verbose": verbose, "skip_existing": skip_existing, **additional_kwargs, } try: twine_settings = self._create_twine_settings(addon_kwargs) if not noop: twine_upload(upload_settings=twine_settings, dists=self.dists) except TwineException as e: raise ImproperConfigurationError( "Upload to artifact repository has failed") from e except requests.HTTPError as e: logger.warning(f"Upload to artifact repository has failed: {e}") return False else: return True
def finalize(args): try: repository = Repository(REPO_ROOT, args.repo) img_manager = ImageManager(args.release) pr_data = repository.find_release_pr(args.release) if not pr_data: raise ScriptError('No PR found for {}'.format(args.release)) if not check_pr_mergeable(pr_data): raise ScriptError('Can not finalize release with an unmergeable PR') if not img_manager.check_images(args.release): raise ScriptError('Missing release image') br_name = branch_name(args.release) if not repository.branch_exists(br_name): raise ScriptError('No local branch exists for this release.') gh_release = repository.find_release(args.release) if not gh_release: raise ScriptError('No Github release draft for this version') repository.checkout_branch(br_name) pypandoc.convert_file( os.path.join(REPO_ROOT, 'README.md'), 'rst', outputfile=os.path.join(REPO_ROOT, 'README.rst') ) run_setup(os.path.join(REPO_ROOT, 'setup.py'), script_args=['sdist', 'bdist_wheel']) merge_status = pr_data.merge() if not merge_status.merged: raise ScriptError('Unable to merge PR #{}: {}'.format(pr_data.number, merge_status.message)) print('Uploading to PyPi') twine_upload(['dist/*']) img_manager.push_images() repository.publish_release(gh_release) except ScriptError as e: print(e) return 1 return 0
) print("\t%s" % call_info) log.write(call_info) log.write(output) print("Build output is in log file: %r" % log_filename) print("\ngit tag version (will raise a error of tag already exists)") verbose_check_call("git", "tag", "v%s" % __version__) print("\nUpload with twine:") twine_args = sys.argv[1:] twine_args.remove("publish") twine_args.insert(1, "dist/*") print("\ttwine upload command args: %r" % " ".join(twine_args)) from twine.commands.upload import main as twine_upload twine_upload(twine_args) print("\ngit push tag to server") verbose_check_call("git", "push", "--tags") sys.exit(0) def get_authors(): try: with open(os.path.join(PACKAGE_ROOT, "AUTHORS"), "r") as f: authors = [l.strip(" *\r\n") for l in f if l.strip().startswith("*")] except Exception as err: authors = "[Error: %s]" % err
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Upload """ from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import os from twine.commands.upload import main as twine_upload CURRENT_PATH = os.path.dirname(os.path.realpath(__file__)) if __name__ == "__main__": if os.environ.get("TWINE_USERNAME") is None: os.environ["TWINE_USERNAME"] = "******" print("No TWINE_USERNAME, using sbr") if os.environ.get("TWINE_PASSWORD") is None: print("No TWINE_PASSWORD, upload cancelled") exit() dist_dir = os.path.join(CURRENT_PATH, "dist") if not os.listdir(dist_dir): print("Nothing to upload") exit() twine_upload([os.path.join(dist_dir, "*")])
# Distribution file (.whl or .tar.gz) r = requests.get(url) fname = join(dist_dir, name) with open(fname, "wb") as file: file.write(r.content) elif name == "doc.zip": # Documentation file doc_url = url else: raise ValueError(f"Unknown asset '{name}'") if doc_url is None: raise ValueError("Release has no documentation") # Upload distributions to PyPI print("Upload distributions to PyPI...") twine_upload([f"{dist_dir}/*"]) # Upload documentation on host server print("Upload documentation to host server...") hostname = input("Hostname: ") username = input("Username: "******"./{args.package}" client = SSHClient() client.set_missing_host_key_policy(AutoAddPolicy) client.connect(hostname=hostname, username=username, password=password) # Naviagte into home directory remote_exec(client, f"cd /home/{username}") # Download zipped documentation into home directory
rcc = int(previous.split('.')[-1]) + 1 # generate setup.py for test-pypi gensetup(for_pypi=True, test_subversion=rcc) try: # generate distribution archives run_chk('python setup.py sdist bdist_wheel') finally: # revert setup.py no matter what gensetup(for_pypi=False) # upload to test.pypi twine_settings = TwineSettings(username='******', password=passwd, repository_url='https://test.pypi.org/legacy/') try: twine_upload(twine_settings, (os.path.join('dist', '{:s}*'.format(distprefix(rc=rcc))), )) finally: run_chk('rm {:s}.*'.format(os.path.join('dist', distprefix()))) # check that the uploaded package is visible on test pypi while True: errtxt = subprocess.run( 'pip install --index-url https://test.pypi.org/simple/ --no-deps' ' {:s}==NULL'.format(pkgname), shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stderr if '{:s}.{:s}.{:d}'.format(*version, rcc) in \ errtxt.split('from versions: ')[1].split(')')[0].split(', '): break
async def async_main(context): context.task = scriptworker.client.get_task(context.config) decision_task_id = context.task['taskGroupId'] decision_task = 'https://community-tc.services.mozilla.com/api/queue/v1/task/{task_id}'.format(task_id=decision_task_id) decision_json = json.loads(requests.get(decision_task).text) github_repo = os.environ.get('GITHUB_HEAD_REPO_URL', decision_json['payload']['env']['GITHUB_HEAD_REPO_URL']) github_tag = os.environ.get('GITHUB_HEAD_TAG', decision_json['payload']['env']['GITHUB_HEAD_TAG']) github_token = os.environ.get('GITHUB_ACCESS_TOKEN', '') assert len(github_repo) > 0 assert len(github_tag) > 0 assert len(github_token) > 0 log.debug('Will upload to Github; {} release {}'.format(github_repo, github_tag)) def download_pkgs(tasksId=None, pkg_ext=None): for taskId in tasksId: task_subdir = os.path.join(context.config['work_dir'], taskId) artifacts = queue.listLatestArtifacts(taskId) if 'artifacts' in artifacts: artifacts = [a['name'] for a in artifacts['artifacts']] log.debug('all artifacts: {}'.format(artifacts)) artifacts = filter(lambda x: x.endswith(pkg_ext), artifacts) log.debug('filtered artifacts: {}'.format(artifacts)) urls = [get_artifact_url(taskId, a) for a in artifacts] log.debug('urls: {}'.format(urls)) tasks, files = download_artifacts(context, urls, parent_dir=task_subdir) log.debug('files: {}'.format(files)) downloadTasks.extend(tasks) allPackages.extend(files) queue = Queue(options={'rootUrl': context.config['taskcluster_root_url']}) downloadTasks = [] allPackages = [] upload_targets = [] if 'upload_targets' in context.task['payload']: upload_targets = context.task['payload']['upload_targets'] if 'python' in context.task['payload']['artifacts_deps']: pythonArtifactTaskIds = context.task['payload']['artifacts_deps']['python'] download_pkgs(tasksId=pythonArtifactTaskIds, pkg_ext='.whl') if 'javascript' in context.task['payload']['artifacts_deps']: jsArtifactTaskIds = context.task['payload']['artifacts_deps']['javascript'] download_pkgs(tasksId=jsArtifactTaskIds, pkg_ext='.tgz') if 'java_aar' in context.task['payload']['artifacts_deps']: aarArtifactTaskIds = context.task['payload']['artifacts_deps']['java_aar'] download_pkgs(tasksId=aarArtifactTaskIds, pkg_ext='.maven.zip') if 'nuget' in context.task['payload']['artifacts_deps']: aarArtifactTaskIds = context.task['payload']['artifacts_deps']['nuget'] download_pkgs(tasksId=aarArtifactTaskIds, pkg_ext='.nupkg') if 'cpp' in context.task['payload']['artifacts_deps']: cppArtifactTaskIds = context.task['payload']['artifacts_deps']['cpp'] download_pkgs(tasksId=cppArtifactTaskIds, pkg_ext='native_client.tar.xz') if 'ios' in context.task['payload']['artifacts_deps']: iosArtifactTaskIds = context.task['payload']['artifacts_deps']['ios'] download_pkgs(tasksId=iosArtifactTaskIds, pkg_ext='.tar.xz') # Wait on downloads await raise_future_exceptions(downloadTasks) with open(os.path.expanduser('~/.pypirc'), 'w') as rc: rc.write(''' [distutils] index-servers = pypi pypitest [pypi] username={pypi_username} password={pypi_password} [pypitest] repository=https://test.pypi.org/legacy/ username={pypitest_username} password={pypitest_password}'''.format( pypi_username=os.environ.get('PYPI_USERNAME'), pypi_password=os.environ.get('PYPI_PASSWORD'), pypitest_username=os.environ.get('PYPITEST_USERNAME'), pypitest_password=os.environ.get('PYPITEST_PASSWORD'), )) allWheels = list(filter(lambda x: '.whl' in x, allPackages)) allWheels.extend(['--skip-existing']) allNpmPackages = list(filter(lambda x: '.tgz' in x, allPackages)) allAarPackages = list(filter(lambda x: '.maven.zip' in x, allPackages)) allNugetPackages = list(filter(lambda x: '.nupkg' in x, allPackages)) allFrameworkPackages = list(filter(lambda x: '.framework' in x, allPackages)) log.debug('allWheels: {}'.format(allWheels)) log.debug('allNpmPackages: {}'.format(allNpmPackages)) log.debug('allAarPackages: {}'.format(allAarPackages)) log.debug('allNugetPackages: {}'.format(allNugetPackages)) log.debug('allFrameworkPackages: {}'.format(allFrameworkPackages)) allCppPackages = [] for cpp in filter(lambda x: 'native_client.tar.xz' in x, allPackages): task_id = os.path.split(os.path.split(cpp)[0])[1] new_nc = get_native_client_final_name(task_id) new_cpp = os.path.join(os.path.split(cpp)[0], new_nc) log.debug('Moving {} to {}...'.format(cpp, new_cpp)) assert len(new_cpp) > 0 os.rename(cpp, new_cpp) allCppPackages.extend([ new_cpp ]) log.debug('allCppPackages: {}'.format(allCppPackages)) if 'USE_TEST_PYPI' in os.environ and os.environ['USE_TEST_PYPI'] == '1': allWheels.extend(['-r', 'pypitest']) if 'github' in upload_targets: log.debug('Starting GitHub upload ...') gh_release = get_github_release(repo=github_repo, tag=github_tag, token=github_token) log.debug('GitHub release collected ...') all_assets = {a.name: a for a in gh_release.get_assets()} log.debug('All GitHub assets {} for {}.'.format([a.name for a in all_assets], github_tag)) for pkg in allCppPackages + allWheels + allNpmPackages + allAarPackages + allNugetPackages + allFrameworkPackages: log.debug('Maybe uploading to GitHub {}.'.format(pkg)) asset = all_assets.get(os.path.basename(pkg), None) if asset and asset.state == 'starter' and os.path.isfile(pkg): log.debug('Removing partially uploaded asset {} on release {} and uploading again.'.format(pkg, github_tag)) asset.delete_asset() if asset and asset.state == 'uploaded': log.debug('Skipping Github upload for existing asset {} on release {}.'.format(pkg, github_tag)) else: log.debug('Should be uploading to GitHub {}.'.format(pkg)) # Ensure path exists, since we can have CLI flags for Twine if os.path.isfile(pkg): log.debug('Performing Github upload for new asset {} on release {}.'.format(pkg, github_tag)) gh_release.upload_asset(path=pkg) if 'pypi' in upload_targets: try: twine_upload(allWheels) except Exception as e: log.debug('Twine Upload Exception: {}'.format(e)) if 'npm' in upload_targets: assert len(allNpmPackages) == 3, "should only have one CPU, one GPU and one TFLite package" subprocess.check_call(['npm-cli-login']) for package in allNpmPackages: parsed = parse_semver(github_tag) tag = 'latest' if parsed.prerelease is None else 'prerelease' # --access=public is required because by default org packages are private rc = subprocess.call(['npm', 'publish', '--access=public', '--verbose', package, '--tag', tag]) if rc > 0: log.debug('NPM Upload Exception: {}'.format(rc)) if 'jcenter' in upload_targets: old_bintray = AttrDict(dict( org = os.environ.get('BINTRAY.USERNAME'), username = os.environ.get('BINTRAY_USERNAME'), apikey = os.environ.get('BINTRAY_APIKEY'), repo = os.environ.get('BINTRAY_REPO'), pkg = os.environ.get('BINTRAY_PKG'), )) new_bintray = AttrDict(dict( org = os.environ.get('BINTRAY_NEW_ORG'), username = os.environ.get('BINTRAY_NEW_USERNAME'), apikey = os.environ.get('BINTRAY_NEW_APIKEY'), repo = os.environ.get('BINTRAY_NEW_REPO'), pkg = os.environ.get('BINTRAY_NEW_PKG'), )) bintray_version = github_tag.replace('v', '') readme_tag = get_github_readme(repo=github_repo, tag=github_tag, subdir='native_client/java') # on 0.9.3 the repo name was changed from org.mozilla.deepspeech to org.deepspeech parsed = parse_semver(github_tag) name_switchpoint = parse_semver('0.9.3') is_new_name = parsed >= name_switchpoint bintray = new_bintray if is_new_name else old_bintray for mavenZip in allAarPackages: zipFile = os.path.basename(mavenZip) log.debug('Pushing {} to Bintray/JCenter as {}'.format(mavenZip, bintray_username)) #curl -T libdeepspeech/build/libdeepspeech-0.4.2-alpha.0.maven.zip -uX:Y 'https://api.bintray.com/content/deepspeech-ci/org.deepspeech/libdeepspeech/0.4.2-alpha.0/libdeepspeech-0.4.2-alpha.0.maven.zip;publish=1;override=1;explode=1 r = requests.put('https://api.bintray.com/content/{}/{}/{}/{}/{}'.format(bintray.org, bintray.repo, bintray.pkg, bintray_version, zipFile), auth = (bintray.username, bintray.apikey), params = { 'publish': 1, 'override': 1, 'explode': 1 }, data = open(mavenZip, 'rb').read()) log.debug('Pushing {} resulted in {}: {}'.format(mavenZip, r.status_code, r.text)) assert (r.status_code == 200) or (r.status_code == 201) r = requests.post('https://api.bintray.com/packages/{}/{}/{}/versions/{}/release_notes'.format(bintray.org, bintray.repo, bintray.pkg, bintray_version), auth = (bintray.username, bintray.apikey), json = {'bintray': { 'syntax': 'markdown', 'content': readme_tag }}) assert r.status_code == 200 r = requests.post('https://api.bintray.com/packages/{}/{}/{}/readme'.format(bintray.org, bintray.repo, bintray.pkg), auth = (bintray.username, bintray.apikey), json = {'bintray': { 'syntax': 'markdown', 'content': readme_tag }}) assert r.status_code == 200 if 'nuget' in upload_targets: nuget_old_apikey = os.environ.get('NUGET_DEEPSPEECH_APIKEY') nuget_new_apikey = os.environ.get('NUGET_MOZILLA_VOICE_APIKEY') # https://docs.microsoft.com/en-us/nuget/api/package-publish-resource # https://docs.microsoft.com/en-us/nuget/api/nuget-protocols for nugetPkg in allNugetPackages: nugetFile = os.path.basename(nugetPkg) key, keymsg = (nuget_old_apikey, 'old key') if nugetFile.startswith('DeepSpeech') else (nuget_new_apikey, 'new key') log.debug('Pushing {} to NuGet Gallery with {}'.format(nugetFile, keymsg)) pkg_name = os.path.splitext(nugetFile)[0].split('.')[0] pkg_version = '.'.join(os.path.splitext(nugetFile)[0].split('.')[1:]) log.debug('Requesting verification key for {} v{}'.format(pkg_name, pkg_version)) # first we create a scope-verify key scope_key_headers = { 'X-NuGet-ApiKey': key, 'X-NuGet-Protocol-Version': '4.1.0', } r = requests.post('https://www.nuget.org/api/v2/package/create-verification-key/{}/{}'.format(pkg_name, pkg_version), headers = scope_key_headers) assert r.status_code == 200 scope_verify_key = r.json()['Key'] assert len(scope_verify_key) > 0 log.debug('Received verification key for {} v{}'.format(pkg_name, pkg_version)) log.debug('Run verification key for {} v{}'.format(pkg_name, pkg_version)) verify_key_headers = { 'X-NuGet-ApiKey': scope_verify_key, 'X-NuGet-Protocol-Version': '4.1.0', } r = requests.get('https://www.nuget.org/api/v2/verifykey/{}/{}'.format(pkg_name, pkg_version), headers = verify_key_headers) assert r.status_code == 200 all_headers = { 'X-NuGet-ApiKey': key, 'X-NuGet-Protocol-Version': '4.1.0', } # send as multipart/form-data using files= r = requests.put('https://www.nuget.org/api/v2/package', headers = all_headers, files = { 'file': (nugetFile, open(nugetPkg, 'rb') ) } ) log.debug('Pushing {} resulted in {}: {}'.format(nugetPkg, r.status_code, r.text)) # Don't assert on those: # 200/201 : successful upload # 409: conflict (existing successfully uploaded package, in case of re-upload) assert (r.status_code == 200) or (r.status_code == 201) or (r.status_code == 202) or (r.status_code == 409) if 'readthedocs' in upload_targets: parsed_version = parse_semver(github_tag) readthedocs_api_token = os.environ.get('READTHEDOCS_API_TOKEN') auth_headers = {'Authorization': 'Token {}'.format(readthedocs_api_token)} log.debug('Tag on GitHub: {}'.format(github_tag)) # We don't publish prerelease versions to ReadTheDocs if not parsed_version.prerelease: log.debug('Not a prerelease, triggering build') r = requests.post('https://readthedocs.org/api/v3/projects/deepspeech/versions/{}/builds/'.format(github_tag), headers=auth_headers).json() assert r['triggered'] build_url = r['build']['_links']['_self'] log.debug('Triggered build URL: {}'.format(build_url)) rtd_latest_version = requests.get('https://readthedocs.org/api/v3/projects/deepspeech/versions/latest/', headers=auth_headers).json()['identifier'] rtd_latest_version = parse_semver(rtd_latest_version) should_update_default = parsed_version > rtd_latest_version log.debug('Latest version on RTD: {}, should update default: {}'.format(rtd_latest_version, should_update_default)) if should_update_default: async def wait_for_build_and_update_version(): r = requests.get(build_url, headers=auth_headers).json() if r['state']['code'] != 'finished': log.debug('Build not finished') raise Exception('not finished') log.debug('Build finished, updating default version and default branch') r = requests.patch('https://readthedocs.org/api/v3/projects/deepspeech/', headers=auth_headers, json={'default_version': github_tag, 'default_branch': github_tag}) r.raise_for_status() # Wait for build to finish and set default version. # Retry 20 times, waiting 30 seconds in between. await retry_async(wait_for_build_and_update_version, attempts=20, sleeptime_callback=lambda *args, **kwargs: 30)
def upload(self, project: t.Any, file: t.Any) -> t.Any: settings = twine.settings.Settings( repository_url=self.url, username=self._username, password=self._password ) # TODO: use twine Repository object instead of command? twine_upload(settings, [file])