def save_user_token(url, cookies_dict): config = local_config.get_config() if config is None: config = {} config.setdefault('remotes', {}) is_new_remote_or_null = False remote_url = get_remote_url(url) if remote_url not in config['remotes'] \ or config['remotes'][remote_url] is None: is_new_remote_or_null = True # Decide what are we going to do if user entered invalid username or password: # either use `current_user_token` if it exists or raise an error allow_login_if_current_user_token_is_set = False user_token = None if 'token' not in cookies_dict: if is_new_remote_or_null or not allow_login_if_current_user_token_is_set: logger.error('Unauthorized: invalid username and/or password.') exit(1) else: user_token = config['remotes'][remote_url]['current_user_token'] else: user_token = cookies_dict['token'] config['current_remote'] = remote_url config['remotes'].setdefault(remote_url, {}) config['remotes'][remote_url]['current_user_token'] = user_token local_config.save_config(config) logger.info('Logged in successfully.')
def list_remotes(args): if args.verbose: remotes = local_config.get_all_remotes() msg = '\n: '.join(remotes) logger.info('Remotes:') logger.log(msg, print_header=False)
def push(args): infraboxcli.env.check_project_root(args) infraboxcli.env.check_env_cli_token(args) if not args.url: logger.error('either --url or INFRABOX_URL must be set') sys.exit(1) if not os.path.isdir(args.project_root): logger.error('%s does not exist or is not a directory' % args.project_root) sys.exit(1) validate_infrabox_file(args) if args.validate_only: return zip_file = zipdir(args) result = upload_zip(args, zip_file) logger.info(result['url']) if args.show_console: show_console(result['build']['id'], args)
def logout(args): token_deleted = delete_current_user_token() if token_deleted: logger.info('Successfully logged out.') else: logger.info('Already logged out.')
def build_and_run(args, job): job_type = job['type'] start_date = datetime.now() logger.info("Starting job %s" % job['name']) if job_type == "docker-compose": build_and_run_docker_compose(args, job) elif job_type == "docker": build_and_run_docker(args, job) elif job_type == "wait": # do nothing pass else: logger.error("Unknown job type") sys.exit(1) end_date = datetime.now() # track as parent parent_jobs.append({ "name": job['name'], "state": 'finished', "start_date": str(start_date), "end_date": str(end_date), "machine_config": job.get('machine_config', None), "depends_on": job.get('depends_on', []) }) logger.info("Finished job %s" % job['name'])
def signal_handler(_, __): logger.info("Stopping docker containers") execute(['docker-compose', '-f', compose_file_new, 'stop'], env=env, cwd=job['build_context']) os.remove(compose_file_new) sys.exit(0)
def build_docker_image(args, job, image_name, target=None): # Build the image logger.info("Build docker image") docker_file = os.path.normpath( os.path.join(get_build_context(job, args), job['docker_file'])) cmd = ['docker', 'build', '-t', image_name, '.', '-f', docker_file] if 'build_arguments' in job: for name, value in job['build_arguments'].items(): cmd += ['--build-arg', '%s=%s' % (name, value)] if args.build_arg: for a in args.build_arg: cmd += ['--build-arg', a] if not args.build_arg or not any([ build_arg.startswith("INFRABOX_BUILD_NUMBER=") for build_arg in args.build_arg ]): cmd += ['--build-arg', 'INFRABOX_BUILD_NUMBER=local'] # memory limit if not args.unlimited: cmd += ['-m', '%sm' % job['resources']['limits']['memory']] if target: cmd += ['--target', target] execute(cmd, cwd=get_build_context(job, args))
def build_and_run_docker(args, job): create_infrabox_directories(args, job) image_name = None if args.tag: image_name = args.tag else: image_name = args.project_name + '_' + job['name'] image_name = image_name.replace("/", "-") image_name = image_name.lower() deployments = job.get('deployments', []) new_images = [] if deployments: for d in deployments: target = d.get('target', None) if not target and not job.get('build_only', True): continue build_docker_image(args, job, image_name, target=target) new_images.extend(tag_docker_image(image_name, [d])) # tag when target is set build_docker_image(args, job, image_name) if not job.get('build_only', True): run_container(args, job, image_name) new_images.extend( tag_docker_image(image_name, filter(lambda d: 'target' not in d, deployments))) # tag when target is _not_ set for new_image in new_images: logger.info(new_image)
def add_project_token(args): check_project_is_set(args) url = args.url + api_projects_endpoint_url + args.project_id + '/tokens' data = { 'description': args.description, #TODO<Steffen> when scope push/pull functionality is implemented, # delete following 2 lines and uncomment next 2 lines 'scope_push': True, 'scope_pull': True #'scope_push': args.scope_push, #'scope_pull': args.scope_pull } response = post(url, data, get_user_headers(), verify=args.ca_bundle, timeout=60) if response.status_code != 200: logger.error(response.json()['message']) return # Print project token to the CLI logger.info('Authentication Token:' + '\nPlease save your token at a secure place. We will not show it to you again.\n') logger.log(response.json()['data']['token'], print_header=False) return response
def delete_project_token_by_id(args): check_project_is_set(args) url = args.url + api_projects_endpoint_url + args.project_id + '/tokens/' + args.id response = delete(url, get_user_headers(), verify=args.ca_bundle, timeout=60) logger.info(response.json()['message']) return response
def validate(args): if not os.path.isdir(args.project_root): logger.error('%s does not exist or is not a directory' % args.project_root) sys.exit(1) validate_infrabox_json(args) logger.info("No issues found infrabox.json")
def get_project_token_id_by_description(args): all_project_tokens = get_project_tokens(args).json() for project_token in all_project_tokens: if args.description == project_token['description']: return project_token['id'] logger.info('Token with such a description does not exist.') return None
def get_project_id_by_name(args): all_projects = get_projects(args).json() for project in all_projects: if args.remote_project_name == project['name']: return project['id'] logger.info('Project with such a name does not exist.') return None
def get_project_by_id(args): all_projects = get_projects(args).json() for project in all_projects: if args.project_id == project['id']: return project logger.info('Project with such an id does not exist.') return None
def get_secret_id_by_name(args): all_secrets = get_secrets(args).json() for secret in all_secrets: if args.name == secret['name']: return secret['id'] logger.info('Secret with such a name does not exist.') return None
def add_secret(args): check_project_is_set(args) url = args.url + api_projects_endpoint_url + args.project_id + '/secrets' data = {'name': args.name, 'value': args.value} response = post(url, data, get_user_headers(), verify=args.ca_bundle, timeout=60) logger.info(response.json()['message']) return response
def list_projects(args): if args.verbose: all_projects = get_projects(args).json() logger.info('Projects:') msg = "" for project in all_projects: msg += 'Name: {}\nId: {}\nType: {}\nPublic: {}\n---\n'\ .format(project['name'], project['id'], project['type'], project['public']) logger.log(msg, print_header=False)
def delete_project_by_id(args): infraboxcli.env.check_env_url(args) url = args.url + api_projects_endpoint_url + args.id response = delete(url, headers=get_user_headers(), verify=args.ca_bundle, timeout=60) if response.status_code != 200: logger.error(response.json()['message']) else: logger.info(response.json()['message']) return response
def list_secrets(args): if args.verbose: all_secrets = get_secrets(args).json() logger.info('Secrects:') msg = "" for secret in all_secrets: msg += 'Name: %s' % secret['name']\ + '\nId: %s' % secret['id']\ + '\n---\n' logger.log(msg, print_header=False)
def list_collaborators(args): if args.verbose: all_collaborators = get_collaborators(args).json() logger.info('Collaborators:') msg = "" for collaborator in all_collaborators: msg += 'Username: %s' % collaborator['username']\ + '\nE-mail: %s' % collaborator['email']\ + '\n---\n' logger.log(msg, print_header=False)
def on_job_update(*args): u = args[0]['data'] job = u['job'] job_id = job['id'] if job_id not in jobs: s.emit('listen:console', job_id) color = colors[len(jobs) % len(colors)] job['color'] = color jobs[job_id] = job global job_name_len job_name_len = max(job_name_len, len(job['name'])) else: jobs[job_id]['state'] = job['state'] # no jobs yet if not jobs: return # check if create job failed if len(jobs) == 1: for job_id in jobs: state = jobs[job_id]['state'] name = jobs[job_id]['name'] if state == 'failure' or state == 'error' or state == 'killed': logger.error("Job %s failed with '%s'" % (name, state)) sys.exit(1) # wait until we received the real jobs if len(jobs) < 2: return rc = 0 active = False for job_id in jobs: state = jobs[job_id]['state'] if state == 'failure' or state == 'error': rc = 1 elif state == 'scheduled' or state == 'queued' or state == 'running': active = True if not active: for job_id in jobs: state = jobs[job_id]['state'] name = jobs[job_id]['name'] if state == 'finished': logger.info("Job %s finished successfully" % state) else: logger.error("Job %s failed with '%s'" % (name, state)) sys.exit(rc)
def run_docker_image(args, job): create_infrabox_directories(args, job) image_name = job['image'].replace('$INFRABOX_BUILD_NUMBER', 'local') if job.get('run', True): run_container(args, job, image_name) deployments = job.get('deployments', []) for d in deployments: new_image_name = "%s/%s:%s" % (d['host'], d['repository'], d.get('tag', 'build_local')) logger.info("Tagging image: %s" % new_image_name) execute(['docker', 'tag', image_name, new_image_name])
def list_project_tokens(args): if args.verbose: all_project_tokens = get_project_tokens(args).json() logger.info('Project tokens:') msg = "" for project_token in all_project_tokens: msg += 'Description: %s' % project_token['description']\ + '\nId: %s' % project_token['id']\ + '\nScope push: %s' % project_token['scope_push']\ + '\nScope pull: %s' % project_token['scope_pull']\ + '\n---\n' logger.log(msg, print_header=False)
def upload_zip(args, f): logger.info('Uploading ...') url = '%s/v1/project/%s/upload' % (args.host, args.project_id) files = {'project.zip': f} headers = {'Authorization': args.token} r = requests.post(url, files=files, headers=headers, timeout=120) d = r.json() if r.status_code != 200: logger.error("Upload failed: %s" % d['message']) sys.exit(1) return d['data']
def check_project_is_set(args): infraboxcli.env.check_env_cli_token(args) if args.remote_project_name: args.project_id = get_project_id_by_name(args) if args.project_id is not None: if 'project_name_printed' not in args \ and 'using_default_project' not in args: logger.info('Project: {project_name}'.format(project_name=args.remote_project_name)) args.project_name_printed = True return True exit(1)
def remove_collaborator(args): check_project_is_set(args) all_project_collaborators = get_collaborators(args).json() collaborator_id = None for collaborator in all_project_collaborators: if collaborator['username'] == args.username: collaborator_id = collaborator['id'] break if collaborator_id is None: logger.info('Specified user is not in collaborators list.') return url = args.url + api_projects_endpoint_url + args.project_id + '/collaborators/' + collaborator_id response = delete(url, get_user_headers(), verify=args.ca_bundle, timeout=60) logger.info(response.json()['message']) return response
def zipdir(args): logger.info('compressing %s' % args.project_root) dockerignore = os.path.join(args.project_root, '.dockerignore') ignore_list = [] if os.path.exists(dockerignore): logger.info('Using .dockerignore') with open(dockerignore) as di: ignore = di.read().splitlines() for i in ignore: i = i.strip() if not i.startswith("#"): ignore_list.append(i) ft = tempfile.TemporaryFile() ziph = zipfile.ZipFile(ft, 'w', zipfile.ZIP_DEFLATED) add_files(args, ignore_list, args.project_root, ziph) ziph.close() ft.seek(0, os.SEEK_END) size = ft.tell() logger.info('finished, file size is %s kb' % (size / 1024)) ft.seek(0) return ft
def print_status(args): if args.verbose: infraboxcli.env.check_env_cli_token(args) if args.remote_project_name: project = get_project_by_name(args) elif args.project_id: project = get_project_by_id(args) if project is None: logger.error('Current project is not set.') exit(1) num_collaborators = len(get_collaborators(args).json()) num_tokens = len(get_project_tokens(args).json()) num_secrets = len(get_secrets(args).json()) logger.info('Project status:') msg = 'Name: {}\nId: {}\nType: {}\nPublic: {}\n---\n' \ + 'Total collaborators: {}\nTotal tokens: {}\nTotal secrets: {}\n---\n' logger.log(msg.format(project['name'], project['id'], project['type'], project['public'], num_collaborators, num_tokens, num_secrets), print_header=False)
def upload_zip(args, f): logger.info('Uploading ...') url = '%s/api/v1/projects/%s/upload/' % (args.url, args.project_id) files = {'project.zip': f} headers = {'Authorization': 'bearer ' + args.token} r = requests.post(url, files=files, headers=headers, timeout=120, verify=args.ca_bundle) try: d = r.json() except: print(r.text) raise if r.status_code != 200: logger.error("Upload failed: %s" % d['message']) sys.exit(1) return d['data']
def create_project(args): infraboxcli.env.check_env_url(args) if not args.private and not args.public: logger.error('Specify if your project is going to be public or private, please.') return if args.private and args.public: logger.error('Project can\'t be public and private simultaneously. ' + 'Choose only one option, please.') return is_private_project = True if args.public: is_private_project = False args.type = args.type.lower() if args.type not in allowed_project_types: logger.error('Provided project type is not supported.' + '\nAllowed project types are: [ {allowed_types} ]' .format(allowed_types=', '.join(allowed_project_types))) return url = args.url + api_projects_endpoint_url data = { 'name': args.name, 'type': args.type, 'private': is_private_project } response = post(url, data=data, headers=get_user_headers(), verify=args.ca_bundle, timeout=60) if response.status_code != 200: logger.error(response.json()['message']) else: logger.info(response.json()['message']) return response