def put(self, **kwargs): deploy = self._get_deploy(**kwargs) if deploy is None: return self.error("Invalid deploy", name="invalid_resource", status_code=404) with lock(redis, f"deploy:{deploy.id}", timeout=5): # we have to refetch in order to ensure lock state changes deploy = Deploy.query.get(deploy.id) task = Task.query.get(deploy.task_id) args = self.put_parser.parse_args() if args.status: assert task.status in (TaskStatus.pending, TaskStatus.in_progress) assert args.status == "cancelled" did_cancel = task.status == TaskStatus.pending task.status = TaskStatus.cancelled db.session.add(task) db.session.commit() if args.status and did_cancel: send_task_notifications(task, NotifierEvent.TASK_FINISHED) return self.respond(serialize(deploy))
def test_task_started(self, mock_put): send_task_notifications(self.task, NotifierEvent.TASK_STARTED) mock_put.assert_called_once_with( task=self.task, type='dummy', config={}, event=NotifierEvent.TASK_STARTED, )
def test_task_started(self, mock_put): send_task_notifications(self.task, NotifierEvent.TASK_STARTED) mock_put.assert_called_once_with( task=self.task, type='dummy', config={}, event=NotifierEvent.TASK_STARTED, )
def execute_deploy(deploy_id): logging.debug( "ExecuteDeploy fired with %d active thread(s)", threading.active_count() ) with lock(redis, f"deploy:{deploy_id}", timeout=5): deploy = Deploy.query.get(deploy_id) task = Task.query.get(deploy.task_id) if not task: logging.warning("ExecuteDeploy fired with missing Deploy(id=%s)", deploy_id) return if task.status not in (TaskStatus.pending, TaskStatus.in_progress): logging.warning( "ExecuteDeploy fired with finished Deploy(id=%s)", deploy_id ) return task.date_started = datetime.utcnow() task.status = TaskStatus.in_progress db.session.add(task) db.session.commit() send_task_notifications(task, NotifierEvent.TASK_STARTED) provider_config = task.provider_config # wipe the log incase this is a retry LogChunk.query.filter(LogChunk.task_id == task.id).delete() taskrunner = TaskRunner( task=task, timeout=provider_config.get("timeout", current_app.config["DEFAULT_TIMEOUT"]), read_timeout=provider_config.get( "read_timeout", current_app.config["DEFAULT_READ_TIMEOUT"] ), ) taskrunner.start() taskrunner.wait() # reload the task from the database due to subprocess changes db.session.expire(task) db.session.refresh(task) if task.status in (TaskStatus.pending, TaskStatus.in_progress): logging.error("Task(id=%s) did not finish cleanly", task.id) task.status = TaskStatus.failed task.date_finished = datetime.utcnow() db.session.add(task) db.session.commit() send_task_notifications(task, NotifierEvent.TASK_FINISHED)
def execute_deploy(deploy_id): logging.debug('ExecuteDeploy fired with %d active thread(s)', threading.active_count()) with lock(redis, 'deploy:{}'.format(deploy_id), timeout=5): deploy = Deploy.query.get(deploy_id) task = Task.query.get(deploy.task_id) if not task: logging.warning('ExecuteDeploy fired with missing Deploy(id=%s)', deploy_id) return if task.status not in (TaskStatus.pending, TaskStatus.in_progress): logging.warning('ExecuteDeploy fired with finished Deploy(id=%s)', deploy_id) return task.date_started = datetime.utcnow() task.status = TaskStatus.in_progress db.session.add(task) db.session.commit() send_task_notifications(task, NotifierEvent.TASK_STARTED) provider_config = task.provider_config # wipe the log incase this is a retry LogChunk.query.filter( LogChunk.task_id == task.id, ).delete() taskrunner = TaskRunner( task=task, timeout=provider_config.get('timeout', current_app.config['DEFAULT_TIMEOUT']), read_timeout=provider_config.get('read_timeout', current_app.config['DEFAULT_READ_TIMEOUT']), ) taskrunner.start() taskrunner.wait() # reload the task from the database due to subprocess changes db.session.expire(task) db.session.refresh(task) if task.status in (TaskStatus.pending, TaskStatus.in_progress): logging.error('Task(id=%s) did not finish cleanly', task.id) task.status = TaskStatus.failed task.date_finished = datetime.utcnow() db.session.add(task) db.session.commit() send_task_notifications(task, NotifierEvent.TASK_FINISHED)
def execute_task(task_id): logging.debug('ExecuteTask fired with %d active thread(s)', threading.active_count()) task = Task.query.get(task_id) if not task: logging.warning('ExecuteTask fired with missing Task(id=%s)', task_id) return if task.status not in (TaskStatus.pending, TaskStatus.in_progress): logging.warning('ExecuteTask fired with finished Task(id=%s)', task_id) return send_task_notifications(task, NotifierEvent.TASK_STARTED) provider_config = task.provider_config # wipe the log incase this is a retry LogChunk.query.filter(LogChunk.task_id == task.id, ).delete() taskrunner = TaskRunner( task=task, timeout=provider_config.get('timeout', current_app.config['DEFAULT_TIMEOUT']), read_timeout=provider_config.get( 'read_timeout', current_app.config['DEFAULT_READ_TIMEOUT']), ) taskrunner.start() taskrunner.wait() # reload the task from the database due to subprocess changes db.session.expire(task) db.session.refresh(task) if task.status in (TaskStatus.pending, TaskStatus.in_progress): logging.error('Task(id=%s) did not finish cleanly', task.id) task.status = TaskStatus.failed task.date_finished = datetime.utcnow() db.session.add(task) db.session.commit() send_task_notifications(task, NotifierEvent.TASK_FINISHED)
def put(self, **kwargs): task = self._get_task(**kwargs) if task is None: return self.error('Invalid task', name='invalid_resource', status_code=404) with lock(redis, 'task:{}'.format(task.id), timeout=5): # we have to refetch in order to ensure lock state changes task = Task.query.get(task.id) args = self.put_parser.parse_args() if args.status: assert task.status in (TaskStatus.pending, TaskStatus.in_progress) assert args.status == 'cancelled' did_cancel = task.status == TaskStatus.pending task.status = TaskStatus.cancelled db.session.add(task) db.session.commit() if args.status and did_cancel: send_task_notifications(task, NotifierEvent.TASK_FINISHED) return self.respond(serialize(task))
def test_task_finished(self, mock_put): send_task_notifications(self.task, NotifierEvent.TASK_FINISHED) assert not mock_put.called
def post(self): """ Given any constraints for a task are within acceptable bounds, create a new task and enqueue it. """ args = self.post_parser.parse_args() app = App.query.filter(App.name == args.app).first() if not app: return self.error('Invalid app', name='invalid_resource', status_code=404) repo = Repository.query.get(app.repository_id) workspace = Workspace( path=repo.get_path(), ) vcs_backend = vcs.get( repo.vcs, url=repo.url, workspace=workspace, ) with lock(redis, 'repo:update:{}'.format(repo.id)): vcs_backend.clone_or_update() ref = args.ref or app.get_default_ref(args.env) # look for our special refs (prefixed via a colon) # TODO(dcramer): this should be supported outside of just this endpoint if ref.startswith(':'): sha = self._get_internal_ref(app, args.env, ref) if not sha: return self.error('Invalid ref', name='invalid_ref', status_code=400) else: try: sha = vcs_backend.get_sha(ref) except vcs.UnknownRevision: return self.error('Invalid ref', name='invalid_ref', status_code=400) if not args.force: for check_config in app.checks: check = checks.get(check_config['type']) try: check.check(app, sha, check_config['config']) except CheckPending: pass except CheckError as e: return self.error( message=unicode(e), name='check_failed', ) with lock(redis, 'task:create:{}'.format(app.id), timeout=5): # TODO(dcramer): this needs to be a get_or_create pattern and # ideally moved outside of the lock user = User.query.filter(User.name == args.user).first() if not user: user = User(name=args.user) db.session.add(user) db.session.flush() task = Task( app_id=app.id, environment=args.env, number=TaskSequence.get_clause(app.id, args.env), name=TaskName.deploy, # TODO(dcramer): ref should default based on app config ref=ref, sha=sha, status=TaskStatus.pending, user_id=user.id, provider=app.provider, data={ 'force': args.force, 'provider_config': app.provider_config, 'notifiers': app.notifiers, 'checks': app.checks, }, ) db.session.add(task) db.session.commit() send_task_notifications(task, NotifierEvent.TASK_QUEUED) return self.respond(serialize(task), status_code=201)
def test_task_finished(self, mock_put): send_task_notifications(self.task, NotifierEvent.TASK_FINISHED) assert not mock_put.called
def post(self): """ Given any constraints for a task are within acceptable bounds, create a new task and enqueue it. """ args = self.post_parser.parse_args() user = get_current_user() if not user: username = args.user if not username: return self.error('Missing required argument "user"', status_code=400) with lock(redis, f"user:create:{username}", timeout=5): # TODO(dcramer): this needs to be a get_or_create pattern and # ideally moved outside of the lock user = User.query.filter(User.name == username).first() if not user: user = User(name=username) db.session.add(user) db.session.flush() elif args.user: return self.error( "Cannot specify user when using session authentication.", status_code=400, ) app = App.query.filter(App.name == args.app).first() if not app: return self.error("Invalid app", name="invalid_resource", status_code=404) deploy_config = TaskConfig.query.filter( TaskConfig.app_id == app.id, TaskConfig.type == TaskConfigType.deploy).first() if not deploy_config: return self.error("Missing deploy config", name="missing_conf", status_code=404) params = None repo = Repository.query.get(app.repository_id) workspace = Workspace(path=repo.get_path()) vcs_backend = vcs.get(repo.vcs, url=repo.url, workspace=workspace) with lock(redis, f"repo:update:{repo.id}"): vcs_backend.clone_or_update() ref = args.ref or app.get_default_ref(args.env) # look for our special refs (prefixed via a colon) # TODO(dcramer): this should be supported outside of just this endpoint if ref.startswith(":"): sha = self._get_internal_ref(app, args.env, ref) if not sha: return self.error("Invalid ref", name="invalid_ref", status_code=400) else: try: sha = vcs_backend.get_sha(ref) except vcs.UnknownRevision: return self.error("Invalid ref", name="invalid_ref", status_code=400) if args.params is not None: params = args.params if not args.force: for check_config in deploy_config.checks: check = checks.get(check_config["type"]) try: check.check(app, sha, check_config["config"]) except CheckPending: pass except CheckError as e: return self.error(message=str(e), name="check_failed") with lock(redis, f"deploy:create:{app.id}", timeout=5): task = Task( app_id=app.id, # TODO(dcramer): ref should default based on app config ref=ref, sha=sha, params=params, status=TaskStatus.pending, user_id=user.id, provider=deploy_config.provider, data={ "force": args.force, "provider_config": deploy_config.provider_config, "notifiers": deploy_config.notifiers, "checks": deploy_config.checks, }, ) db.session.add(task) db.session.flush() db.session.refresh(task) deploy = Deploy( task_id=task.id, app_id=app.id, environment=args.env, number=DeploySequence.get_clause(app.id, args.env), ) db.session.add(deploy) db.session.commit() send_task_notifications(task, NotifierEvent.TASK_QUEUED) return self.respond(serialize(deploy), status_code=201)
def post(self): """ Given any constraints for a task are within acceptable bounds, create a new task and enqueue it. """ args = self.post_parser.parse_args() user = get_current_user() if not user: username = args.user if not username: return self.error('Missing required argument "user"', status_code=400) with lock(redis, 'user:create:{}'.format(username), timeout=5): # TODO(dcramer): this needs to be a get_or_create pattern and # ideally moved outside of the lock user = User.query.filter(User.name == username).first() if not user: user = User(name=username) db.session.add(user) db.session.flush() elif args.user: return self.error( 'Cannot specify user when using session authentication.', status_code=400) app = App.query.filter(App.name == args.app).first() if not app: return self.error('Invalid app', name='invalid_resource', status_code=404) params = None repo = Repository.query.get(app.repository_id) workspace = Workspace(path=repo.get_path(), ) vcs_backend = vcs.get( repo.vcs, url=repo.url, workspace=workspace, ) with lock(redis, 'repo:update:{}'.format(repo.id)): vcs_backend.clone_or_update() ref = args.ref or app.get_default_ref(args.env) # look for our special refs (prefixed via a colon) # TODO(dcramer): this should be supported outside of just this endpoint if ref.startswith(':'): sha = self._get_internal_ref(app, args.env, ref) if not sha: return self.error('Invalid ref', name='invalid_ref', status_code=400) else: try: sha = vcs_backend.get_sha(ref) except vcs.UnknownRevision: return self.error('Invalid ref', name='invalid_ref', status_code=400) if args.params is not None: params = args.params if not args.force: for check_config in app.checks: check = checks.get(check_config['type']) try: check.check(app, sha, check_config['config']) except CheckPending: pass except CheckError as e: return self.error( message=unicode(e), name='check_failed', ) with lock(redis, 'task:create:{}'.format(app.id), timeout=5): task = Task( app_id=app.id, environment=args.env, number=TaskSequence.get_clause(app.id, args.env), # TODO(dcramer): ref should default based on app config ref=ref, sha=sha, params=params, status=TaskStatus.pending, user_id=user.id, provider=app.provider, data={ 'force': args.force, 'provider_config': app.provider_config, 'notifiers': app.notifiers, 'checks': app.checks, }, ) db.session.add(task) db.session.commit() send_task_notifications(task, NotifierEvent.TASK_QUEUED) return self.respond(serialize(task), status_code=201)