def test_delete_task(tables, client, new_job, new_task): new_job.add_task(new_task) new_job.save() resp = client.delete(ENDPOINT + '/{}'.format(new_task.id), headers=HEADERS) assert resp.status_code == HTTPStatus.OK assert len(Task.all()) == 0 assert len(Job.all()) == 1 assert len(CommandSegment.all()) == 0 # checks if segments from deleted task are deleted by cascade
def business_get_all( user_id: Optional[int], sync_all: Optional[bool]) -> Tuple[Content, HttpStatusCode]: """Fetches either all Task records or only those in relation with specific user. Allows for synchronizing state of each Task out-of-the-box. In typical scenario API client would want to get all records without sync and then run sync each records individually. """ # TODO Exceptions should never occur, but need to experiment more if user_id: # Returns [] if such User with such id does not exist (SQLAlchemy behavior) tasks = Task.query.filter(Task.user_id == user_id).all() else: tasks = Task.all() # Wanted to decouple syncing from dict conversion with 2 oneliners (using list comprehension), # but this code is O(n) instead of O(2n) results = [] for task in tasks: if sync_all: synchronize(task.id) results.append(task.as_dict) return {'msg': T['all']['success'], 'tasks': results}, 200