def create_node(self): """ Creates and returns a new Node instance. It is not saved into the database. """ new_node = Node() new_node.owner = self._msg.owner new_node.text = self._msg.message_text return new_node
def create_node(self): """ Behavior varies depending on its source Node object's repeating status. If Node repeats, then create a new Node, change the rcvd_date and add to spawned_nodes. If Node does not repeat, then return the source Node and delete the Message object. """ source_node = self._msg.source_node if source_node.repeats and not source_node.repeats_from_completion: # Tuple of fields to be copied to new Node() FIELDS = ('owner', 'title', 'slug', 'deadline_date', 'deadline_time', 'priority', 'tag_string', 'energy', 'time_needed', ) M2M_FIELDS = ('focus_areas',) # Make a duplicate of the Node new_node = Node() # Set the appropriate fields for field in FIELDS: value = getattr(source_node, field, None) setattr(new_node, field, value) new_node.insert_at(source_node, position='last-child', save=True) # Set m2m fields for field in M2M_FIELDS: for obj in getattr(source_node, field).all(): getattr(new_node ,field).add(obj) self._msg.spawned_nodes.add(new_node) # Now update the scheduled dates on the original Node source_node.todo_state = TodoState.objects.get(abbreviation='DONE') source_node.auto_update = True source_node.save() # Now reschedule the message itself self._msg.rcvd_date = dt.datetime.combine( source_node.scheduled_date, dt.time(0, tzinfo=get_current_timezone()), ) self._msg.save() else: new_node = source_node return new_node
def post(self, request, pk=None, *args, **kwargs): """ Create a new Node, conducted through JSON format: { id: [node primary key], title: 'Everything's shiny, captn', todo_state: 2, etc... } Ignores fields related to MPTT for new nodes as these get set automatically based on the 'parent' attribute. Returns: JSON object of all node fields, with changes. """ data = request.data.copy() if pk is not None: # Cannot POST if a node is specified by primary key return HttpResponseNotAllowed(['GET', 'PUT']) # Create new node self.node = Node() if not request.user.is_anonymous: self.node.owner = request.user self.node.save() # Set fields (ignore mptt fields for new nodes) for key in ('id', 'tree_id', 'lft', 'rght', 'level'): try: data.pop(key) except KeyError: pass self.node.set_fields(data) self.node.save() # Return newly saved node as json self.node = Node.objects.get(pk=self.node.pk) serializer = NodeSerializer(self.node, request=request) data = serializer.data # Don't keep nodes sent via the public interface if request.user.is_anonymous: self.node.delete() return Response(data)
def test_defer_message(self): """Verify that a message can be rescheduled for the future.""" dfrd = TodoState.objects.get(abbreviation="DFRD") node = Node(title='Sample Node', todo_state=dfrd, owner=User.objects.get(pk=1)) node.save() msg = node.deferred_message msg = Message.objects.get(pk=msg.pk) # Defer the message and refresh new_date = dt.datetime(2014, 6, 15, tzinfo=timezone.get_current_timezone()) msg.handler.defer(new_date) msg = Message.objects.get(pk=msg.pk) self.assertEqual( msg.rcvd_date, new_date, ) # Check that the Node was also rescheduled self.assertEqual(msg.source_node.scheduled_date, new_date.date())
def test_defer_message(self): """Verify that a message can be rescheduled for the future.""" dfrd = TodoState.objects.get(abbreviation="DFRD") node = Node(title='Sample Node', todo_state=dfrd, owner=User.objects.get(pk=1)) node.save() msg = node.deferred_message msg = Message.objects.get(pk=msg.pk) # Defer the message and refresh new_date = dt.datetime(2014, 6, 15, tzinfo=timezone.get_current_timezone()) msg.handler.defer(new_date) msg = Message.objects.get(pk=msg.pk) self.assertEqual( msg.rcvd_date, new_date, ) # Check that the Node was also rescheduled self.assertEqual( msg.source_node.scheduled_date, new_date.date() )
def create_node(data, parent=None): """ Recursive function for seting a Node and then processing its children. """ node = Node(title=data['title']) node.save() node.set_fields(data) # Remove tree fields so they can be set by MPTT framework for field in TREE_FIELDS: setattr(node, field, None) node.parent = parent node.save() pk_list.append(node.pk) children = [ x for x in json_list if (x['tree_id'] == data['tree_id'] and x['lft'] > data['lft'] and x['rght'] < data['rght'] and x['level'] == data['level'] + 1) ] for child in children: create_node(child, node)
def test_auto_message(self): """ Verify that creating and modifying a DFRD Node will trigger Message creation. """ node = Node() node.owner = self.user node.title = 'New deferred node' node.scheduled_date = dt.datetime.now().date() dfrd = TodoState.objects.get(abbreviation='DFRD') nxt = TodoState.objects.get(abbreviation='NEXT') # Create a new deferred node node.todo_state = dfrd self.assertRaises( Message.DoesNotExist, lambda x: node.deferred_message, 'New Node() starts out with a message' ) node.save() node = Node.objects.get(pk=node.pk) self.assertTrue( isinstance(node.deferred_message, Message) ) self.assertEqual( node.deferred_message.handler_path, 'plugins.deferred' ) # Now make the node NEXT and see that the message disappears node.todo_state = nxt node.save() node = Node.objects.get(pk=node.pk) self.assertRaises( Message.DoesNotExist, lambda x: node.deferred_message, 'New Node() starts out with a message' )
def create_node(data, parent=None): """ Recursive function for seting a Node and then processing its children. """ node = Node(title=data['title']) node.save() node.set_fields(data) # Remove tree fields so they can be set by MPTT framework for field in TREE_FIELDS: setattr(node, field, None) node.parent = parent node.save() pk_list.append(node.pk) children = [x for x in json_list if (x['tree_id'] == data['tree_id'] and x['lft'] > data['lft'] and x['rght'] < data['rght'] and x['level'] == data['level']+1)] for child in children: create_node(child, node)
def import_structure(file=None, string=None, request=None, scope=None): """ Parses either an org-mode file or an org-mode string and saves the resulting heirerarchy to the OrgWolf models in the gtd module. # TODO: rewrite this without PyOrgMode """ # We want pre & post save signals skipped if file and string: raise AttributeError("Please supply either a file or a string, not both.") elif string: source = StringIO(string) elif file: source = io.open(file, 'r', encoding='utf8') if not scope: # Automatic scope detection scope_match = re.search(r'([^/]+)\.org', file) if scope_match: scope_string = scope_match.groups()[0] scope = Scope.objects.filter(name__iexact=scope_string) if scope.exists(): scope = scope[0] else: scope = Scope(name=scope_string, display=scope_string) scope.save() else: raise AttributeError("Please supply a file or a string") # First, build a list of dictionaries that hold the pieces of each line. data_list = [] if request: current_user = request.user else: current_user = User.objects.get(id=1) for line in source: data_list.append({'original': line}) def save_text(parent, text): # A helper function the tries to save some new text if parent: parent.text = current_text parent.save() elif current_text: # Warn the user about some dropped text print("---") print("Warning, dropping text (no parent node)") print("Text: %s", current_text) print("---") # Now go through each line and see if it matches a regex current_indent = 0 # counter current_order = 0 parent_stack = [] todo_state_list = TodoState.objects.all() # Todo: filter by current user current_text = '' for line in data_list: heading_match = HEADING_RE.search(line['original'].strip("\n")) if heading_match: # It's a heading line_indent = len(heading_match.groups()[0]) line['todo'] = heading_match.groups()[1] line['priority'] = heading_match.groups()[2] line['heading'] = heading_match.groups()[3] line['tag_string'] = heading_match.groups()[4] new_node = Node() if line_indent > current_indent: # New child # TODO: what if the user skips a level current_indent = current_indent + 1 current_order = 0 # Save any text associated with the parent parent = getattr(parent_stack.head, 'value', None) save_text(parent, current_text) current_text = '' elif line_indent == current_indent: # Another child # Save any text associated with the parent parent = getattr(parent_stack.head, 'value', None) save_text(parent, current_text) current_text = '' # Adjust the parent parent_stack.pop() elif line_indent < current_indent: # Back up to parent # Save any text associated with the parent parent = getattr(parent_stack.head, 'value', None) save_text(parent, current_text) current_text = '' parent_stack.pop() for x in range(current_indent - line_indent): # Move up the stack current_order = parent_stack.head.value.order parent_stack.pop() current_indent = current_indent - 1 # See if the 'todo' captured by the regex matches a current TodoState, # if not then it's parts of the heading # TODO: find a way to not destroy whitespace if line['todo']: found = False for todo_state in todo_state_list: if todo_state.abbreviation.lower() == line['todo'].lower(): new_node.todo_state = todo_state found = True break if found == False: if line['heading']: line['heading'] = line['todo'] + ' ' + str(line['heading']) else: line['heading'] = line['todo'] if current_indent > 1: new_node.parent = parent_stack.head.value if line['heading']: new_node.title = line['heading'] else: new_node.title = '' new_node.owner = current_user new_node.order = current_order + 10 if line['priority']: new_node.priority = line['priority'] else: new_node.priority = '' if line['tag_string']: new_node.tag_string = line['tag_string'] else: new_node.tag_string = '' new_node.auto_close = False # Disable closed timestamp new_node.save() # Add scope (passed as argument or auto-detected) if scope: new_node.scope.add(scope) # Update current state variables current_order = new_node.order parent_stack.push(new_node) else: # Some sort of text item # Test to see if it's a scheduled, deadline or closed modifier time_sensitive_match = TIME_SENSITIVE_RE.findall(line['original']) if time_sensitive_match: parent = parent_stack.head.value for match in time_sensitive_match: # Bump a match for "CLOSED:" up to the 0 and 1 position if match[2] and match[3]: match = (match[2], match[3], "", "") date_match = DATE_RE.search(match[1]).groups() if date_match: # Set some variables to make things easier to read year = int(date_match[0]) month = int(date_match[1]) day = int(date_match[2]) if date_match[4]: hour = int(date_match[4]) else: hour = 0 if date_match[5]: minute = int(date_match[5]) else: minute = 0 naive_datetime = datetime(year, month, day, hour, minute) new_datetime = timezone.get_current_timezone().localize(naive_datetime) # TODO: set to user's preferred timezone if date_match[4] and date_match[5]: time_specific = True else: time_specific = False if date_match[6]: # repeating parent.repeats = True parent.repeating_number = date_match[6][1] parent.repeating_unit = date_match[6][2] if date_match[6][0] == "+": parent.repeats_from_completion = False elif date_match[6][0] == ".": parent.repeats_from_completion = True # Set the appropriate fields if match[0] == "SCHEDULED:": parent.scheduled_date = new_datetime.date() if time_specific: parent.scheduled_time = new_datetime.time() elif match[0] == "DEADLINE:": parent.deadline_date = new_datetime.date() if time_specific: parent.deadline_time = new_datetime.date() elif match[0] == "CLOSED:": parent.closed = new_datetime parent.auto_close = False # Disable closed timestamp parent.save() else: # It's just a regular text item current_text += line['original']
class NodeView(APIView): """ API for interacting with Node objects. Unauthenticated requests are permitted but do not alter the database. Several query parameters have special significance, otherwise query parameters are treated as filters: - 'context=[integer]': Any query with context (even if it is null) is treated as an actions-list. The query indicates which Context object should be used. - 'field_group=[string]': Allows for an alternate set of fields to be returned. - 'upcoming=[date-string]': Requests a list of Node objects that are due soon. The date-string should be UTC and ISO formatted (YYYY-mm-dd). """ def get(self, request, *args, **kwargs): """Returns the details of the node as a json encoded object""" SERIALIZERS = { 'default': NodeSerializer, 'actions_list': NodeListSerializer, 'outline': NodeOutlineSerializer, 'calendar': CalendarSerializer, 'calendar_deadlines': CalendarDeadlineSerializer, } import time get_dict = request.GET.copy() node_id = kwargs.get('pk') # Look for the reserved query parameters if get_dict.get('upcoming', None): # Get Nodes with upcoming deadline nodes = self.get_upcoming(request, *args, **kwargs) default_serializer = 'actions_list' elif get_dict.get('context', None): # Context is given, so this is an actions list nodes = self.get_actions_list(request, *args, **kwargs) default_serializer = 'actions_list' elif node_id is not None: # A specific Node object is requested nodes = get_object_or_404(Node, pk=node_id) default_serializer = 'default' else: nodes = self.get_queryset(request, *args, **kwargs) default_serializer = 'default' # Check for alternate serializer try: field_group = get_dict.pop('field_group')[0] except KeyError: field_group = default_serializer Serializer = SERIALIZERS[field_group] # Serialize and return the queryset or object is_many = isinstance(nodes, QuerySet) serializer = Serializer(nodes, many=is_many, request=request) return Response(serializer.data) def get_queryset(self, request, *args, **kwargs): """ Return a queryset for regular GET queries. If a context is given as a query parameter, then the get_action_list() method is to be used instead. """ BOOLS = ('archived',) # Translate 'False' -> False for these fields M2M = ['todo_state'] # For filtering on arrays nodes = Node.objects.mine(request.user, get_archived=True) get_dict = request.GET.copy() # get_dict = dict(request.GET) parent_id = get_dict.get('parent_id', None) if parent_id == '0': nodes = nodes.filter(parent=None) get_dict.pop('parent_id') # Apply each criterion to the queryset for key in get_dict.keys(): if key in BOOLS: query = {key: False if get_dict[key] == 'false' else True} elif key in M2M: # Convert to (param__in=[]) style list filtering value_list = get_dict.getlist(key) param = "{}__in".format(key) query = {param: value_list} else: query = {key: get_dict[key]} try: nodes = nodes.filter(**query) except FieldError: pass nodes = nodes.select_related('owner') nodes = nodes.prefetch_related('users', 'focus_areas') return nodes def get_actions_list(self, request, *args, **kwargs): """ Fetches a queryset for the requested "Next Actions" list. Only called if 'context' is passed as a GET query parameter (even if it equals None). """ # Filter by parent parent_id = request.GET.get('parent', None) if parent_id is not None: parent = Node.objects.get(pk=parent_id) nodes = parent.get_descendants(include_self=True) else: nodes = Node.objects.all() nodes = nodes.assigned(request.user).select_related('todo_state') # Filter by todo state final_Q = Q() todo_states_params = request.GET.getlist('todo_state') todo_string = '' for todo_state in todo_states_params: final_Q = final_Q | Q(todo_state=todo_state) nodes = nodes.filter(final_Q) # Filter by FocusArea focus_area = request.GET.get('focus_area', None) if focus_area: nodes = nodes.filter(focus_areas=focus_area) # Filter by context context_id = request.GET.get('context', None) if context_id == '0': request.session['context_id'] = None elif context_id != 'None' and context_id is not None: context = Context.objects.get(id=context_id) nodes = context.apply(nodes) request.session['context_id'] = context_id request.session['context_name'] = context.name # DB optimization nodes = nodes.select_related('owner') nodes = nodes.prefetch_related('users', 'focus_areas') return nodes def get_upcoming(self, request, *args, **kwargs): """ Get QuerySet with deadlines coming up based on 'upcoming' query parameter. """ deadline_period = 7 # in days all_nodes_qs = Node.objects.mine(request.user) target_date = dt.datetime.strptime(request.GET['upcoming'], '%Y-%m-%d').date() # Determine query filters for "Upcoming Deadlines" section undone_Q = Q(todo_state__closed = False) | Q(todo_state = None) deadline = target_date + dt.timedelta(days=deadline_period) upcoming_deadline_Q = Q(deadline_date__lte = deadline) # TODO: fix this deadline_nodes = all_nodes_qs.filter(undone_Q, upcoming_deadline_Q) deadline_nodes = deadline_nodes.order_by("deadline_date") # DB optimization deadline_nodes = deadline_nodes.select_related('owner') deadline_nodes = deadline_nodes.prefetch_related('focus_areas') return deadline_nodes def post(self, request, pk=None, *args, **kwargs): """ Create a new Node, conducted through JSON format: { id: [node primary key], title: 'Everything's shiny, captn', todo_state: 2, etc... } Ignores fields related to MPTT for new nodes as these get set automatically based on the 'parent' attribute. Returns: JSON object of all node fields, with changes. """ data = request.data.copy() if pk is not None: # Cannot POST if a node is specified by primary key return HttpResponseNotAllowed(['GET', 'PUT']) # Create new node self.node = Node() if not request.user.is_anonymous: self.node.owner = request.user self.node.save() # Set fields (ignore mptt fields for new nodes) for key in ('id', 'tree_id', 'lft', 'rght', 'level'): try: data.pop(key) except KeyError: pass self.node.set_fields(data) self.node.save() # Return newly saved node as json self.node = Node.objects.get(pk=self.node.pk) serializer = NodeSerializer(self.node, request=request) data = serializer.data # Don't keep nodes sent via the public interface if request.user.is_anonymous: self.node.delete() return Response(data) def put(self, request, pk=None, *args, **kwargs): """ Edit existing nodes through JSON format: { id: [node primary key], title: 'Everything's shiny, captn', todo_state: 2, etc... } """ if pk is None: # Throw error response if user is trying to # PUT without specifying a pk return HttpResponseNotAllowed(['GET', 'POST']) data = request.data.copy() # Remove tree metadata from the request TREE_FIELDS = ('lft', 'rght', 'level', 'tree_id') for key in TREE_FIELDS: try: data.pop(key) except KeyError: pass # Check the permissions of the Node node = get_object_or_404(Node, pk=pk) access = node.access_level(request.user) if ((request.user.is_anonymous and node.owner is not None) or (not request.user.is_anonymous and access != 'write')): # Not authorized return HttpResponse( json.dumps({'status': 'failure', 'reason': 'unauthorized'}), status=401) # Update and return the Node node.set_fields(data) if not request.user.is_anonymous: node.save() node = Node.objects.get(pk=node.pk) serializer = NodeSerializer(node, request=request) return Response(serializer.data)
def test_auto_message(self): """ Verify that creating and modifying a DFRD Node will trigger Message creation. """ node = Node() node.owner = self.user node.title = 'New deferred node' node.scheduled_date = dt.datetime.now().date() dfrd = TodoState.objects.get(abbreviation='DFRD') nxt = TodoState.objects.get(abbreviation='NEXT') # Create a new deferred node node.todo_state = dfrd self.assertRaises(Message.DoesNotExist, lambda x: node.deferred_message, 'New Node() starts out with a message') node.save() node = Node.objects.get(pk=node.pk) self.assertTrue(isinstance(node.deferred_message, Message)) self.assertEqual(node.deferred_message.handler_path, 'plugins.deferred') # Now make the node NEXT and see that the message disappears node.todo_state = nxt node.save() node = Node.objects.get(pk=node.pk) self.assertRaises(Message.DoesNotExist, lambda x: node.deferred_message, 'New Node() starts out with a message')
class NodeView(APIView): """ API for interacting with Node objects. Unauthenticated requests are permitted but do not alter the database. Several query parameters have special significance, otherwise query parameters are treated as filters: - 'context=[integer]': Any query with context (even if it is null) is treated as an actions-list. The query indicates which Context object should be used. - 'field_group=[string]': Allows for an alternate set of fields to be returned. - 'upcoming=[date-string]': Requests a list of Node objects that are due soon. The date-string should be UTC and ISO formatted (YYYY-mm-dd). """ def get(self, request, *args, **kwargs): """Returns the details of the node as a json encoded object""" SERIALIZERS = { 'default': NodeSerializer, 'actions_list': NodeListSerializer, 'outline': NodeOutlineSerializer, 'calendar': CalendarSerializer, 'calendar_deadlines': CalendarDeadlineSerializer, } get_dict = request.GET.copy() node_id = kwargs.get('pk') # Look for the reserved query parameters if get_dict.get('upcoming', None): # Get Nodes with upcoming deadline nodes = self.get_upcoming(request, *args, **kwargs) default_serializer = 'actions_list' elif get_dict.get('context', None): # Context is given, so this is an actions list nodes = self.get_actions_list(request, *args, **kwargs) default_serializer = 'actions_list' elif node_id is not None: # A specific Node object is requested nodes = get_object_or_404(Node, pk=node_id) default_serializer = 'default' else: nodes = self.get_queryset(request, *args, **kwargs) default_serializer = 'default' # Check for alternate serializer try: field_group = get_dict.pop('field_group')[0] except KeyError: field_group = default_serializer Serializer = SERIALIZERS[field_group] # Serialize and return the queryset or object is_many = isinstance(nodes, QuerySet) serializer = Serializer(nodes, many=is_many, request=request) return Response(serializer.data) def get_queryset(self, request, *args, **kwargs): """ Return a queryset for regular GET queries. If a context is given as a query parameter, then the get_action_list() method is to be used instead. """ BOOLS = ('archived', ) # Translate 'False' -> False for these fields M2M = ['todo_state'] # For filtering on arrays nodes = Node.objects.mine(request.user, get_archived=True) get_dict = request.GET.copy() # get_dict = dict(request.GET) parent_id = get_dict.get('parent_id', None) if parent_id == '0': nodes = nodes.filter(parent=None) get_dict.pop('parent_id') # Apply each criterion to the queryset for key in get_dict.keys(): if key in BOOLS: query = {key: False if get_dict[key] == 'false' else True} elif key in M2M: # Convert to (param__in=[]) style list filtering value_list = get_dict.getlist(key) param = "{}__in".format(key) query = {param: value_list} else: query = {key: get_dict[key]} try: nodes = nodes.filter(**query) except FieldError: pass nodes = nodes.select_related('owner') nodes = nodes.prefetch_related('users', 'focus_areas') return nodes def get_actions_list(self, request, *args, **kwargs): """ Fetches a queryset for the requested "Next Actions" list. Only called if 'context' is passed as a GET query parameter (even if it equals None). """ # Filter by parent parent_id = request.GET.get('parent', None) if parent_id is not None: parent = Node.objects.get(pk=parent_id) nodes = parent.get_descendants(include_self=True) else: nodes = Node.objects.all() nodes = nodes.assigned(request.user).select_related('todo_state') # Filter by todo state final_Q = Q() todo_states_params = request.GET.getlist('todo_state') todo_string = '' for todo_state in todo_states_params: final_Q = final_Q | Q(todo_state=todo_state) nodes = nodes.filter(final_Q) # Filter by FocusArea focus_area = request.GET.get('focus_area', None) if focus_area: nodes = nodes.filter(focus_areas=focus_area) # Filter by context context_id = request.GET.get('context', None) if context_id == '0': request.session['context_id'] = None elif context_id != 'None' and context_id is not None: context = Context.objects.get(id=context_id) nodes = context.apply(nodes) request.session['context_id'] = context_id request.session['context_name'] = context.name # DB optimization nodes = nodes.select_related('owner') nodes = nodes.prefetch_related('users', 'focus_areas') return nodes def get_upcoming(self, request, *args, **kwargs): """ Get QuerySet with deadlines coming up based on 'upcoming' query parameter. """ deadline_period = 7 # in days all_nodes_qs = Node.objects.mine(request.user) target_date = dt.datetime.strptime(request.GET['upcoming'], '%Y-%m-%d').date() # Determine query filters for "Upcoming Deadlines" section undone_Q = Q(todo_state__closed=False) | Q(todo_state=None) deadline = target_date + dt.timedelta(days=deadline_period) upcoming_deadline_Q = Q(deadline_date__lte=deadline) scheduled_Q = ~Q(scheduled_date__gt=target_date) | Q( deadline_date__lte=target_date) deadline_nodes = all_nodes_qs.filter(undone_Q, upcoming_deadline_Q, scheduled_Q) deadline_nodes = deadline_nodes.order_by("deadline_date") # DB optimization deadline_nodes = deadline_nodes.select_related('owner') deadline_nodes = deadline_nodes.prefetch_related('focus_areas') return deadline_nodes def post(self, request, pk=None, *args, **kwargs): """ Create a new Node, conducted through JSON format: { id: [node primary key], title: 'Everything's shiny, captn', todo_state: 2, etc... } Ignores fields related to MPTT for new nodes as these get set automatically based on the 'parent' attribute. Returns: JSON object of all node fields, with changes. """ data = request.data.copy() if pk is not None: # Cannot POST if a node is specified by primary key return HttpResponseNotAllowed(['GET', 'PUT']) # Create new node self.node = Node() if not request.user.is_anonymous: self.node.owner = request.user self.node.save() # Set fields (ignore mptt fields for new nodes) for key in ('id', 'tree_id', 'lft', 'rght', 'level'): try: data.pop(key) except KeyError: pass self.node.set_fields(data) self.node.save() # Return newly saved node as json self.node = Node.objects.get(pk=self.node.pk) serializer = NodeSerializer(self.node, request=request) data = serializer.data # Don't keep nodes sent via the public interface if request.user.is_anonymous: self.node.delete() return Response(data) def put(self, request, pk=None, *args, **kwargs): """ Edit existing nodes through JSON format: { id: [node primary key], title: 'Everything's shiny, captn', todo_state: 2, etc... } """ if pk is None: # Throw error response if user is trying to # PUT without specifying a pk return HttpResponseNotAllowed(['GET', 'POST']) data = request.data.copy() # Remove tree metadata from the request TREE_FIELDS = ('lft', 'rght', 'level', 'tree_id') for key in TREE_FIELDS: try: data.pop(key) except KeyError: pass # Check the permissions of the Node node = get_object_or_404(Node, pk=pk) access = node.access_level(request.user) if ((request.user.is_anonymous and node.owner is not None) or (not request.user.is_anonymous and access != 'write')): # Not authorized return HttpResponse(json.dumps({ 'status': 'failure', 'reason': 'unauthorized' }), status=401) # Update and return the Node node.set_fields(data) if not request.user.is_anonymous: node.save() node = Node.objects.get(pk=node.pk) serializer = NodeSerializer(node, request=request) return Response(serializer.data)
def import_structure(file=None, string=None, request=None, scope=None): """ Parses either an org-mode file or an org-mode string and saves the resulting heirerarchy to the OrgWolf models in the gtd module. # TODO: rewrite this without PyOrgMode """ # We want pre & post save signals skipped if file and string: raise AttributeError( "Please supply either a file or a string, not both.") elif string: source = StringIO(string) elif file: source = io.open(file, 'r', encoding='utf8') if not scope: # Automatic scope detection scope_match = re.search(r'([^/]+)\.org', file) if scope_match: scope_string = scope_match.groups()[0] scope = Scope.objects.filter(name__iexact=scope_string) if scope.exists(): scope = scope[0] else: scope = Scope(name=scope_string, display=scope_string) scope.save() else: raise AttributeError("Please supply a file or a string") # First, build a list of dictionaries that hold the pieces of each line. data_list = [] if request: current_user = request.user else: current_user = User.objects.get(id=1) for line in source: data_list.append({'original': line}) def save_text(parent, text): # A helper function the tries to save some new text if parent: parent.text = current_text parent.save() elif current_text: # Warn the user about some dropped text print("---") print("Warning, dropping text (no parent node)") print("Text: %s", current_text) print("---") # Now go through each line and see if it matches a regex current_indent = 0 # counter current_order = 0 parent_stack = [] todo_state_list = TodoState.objects.all() # Todo: filter by current user current_text = '' for line in data_list: heading_match = HEADING_RE.search(line['original'].strip("\n")) if heading_match: # It's a heading line_indent = len(heading_match.groups()[0]) line['todo'] = heading_match.groups()[1] line['priority'] = heading_match.groups()[2] line['heading'] = heading_match.groups()[3] line['tag_string'] = heading_match.groups()[4] new_node = Node() if line_indent > current_indent: # New child # TODO: what if the user skips a level current_indent = current_indent + 1 current_order = 0 # Save any text associated with the parent parent = getattr(parent_stack.head, 'value', None) save_text(parent, current_text) current_text = '' elif line_indent == current_indent: # Another child # Save any text associated with the parent parent = getattr(parent_stack.head, 'value', None) save_text(parent, current_text) current_text = '' # Adjust the parent parent_stack.pop() elif line_indent < current_indent: # Back up to parent # Save any text associated with the parent parent = getattr(parent_stack.head, 'value', None) save_text(parent, current_text) current_text = '' parent_stack.pop() for x in range(current_indent - line_indent): # Move up the stack current_order = parent_stack.head.value.order parent_stack.pop() current_indent = current_indent - 1 # See if the 'todo' captured by the regex matches a current TodoState, # if not then it's parts of the heading # TODO: find a way to not destroy whitespace if line['todo']: found = False for todo_state in todo_state_list: if todo_state.abbreviation.lower() == line['todo'].lower(): new_node.todo_state = todo_state found = True break if found == False: if line['heading']: line['heading'] = line['todo'] + ' ' + str( line['heading']) else: line['heading'] = line['todo'] if current_indent > 1: new_node.parent = parent_stack.head.value if line['heading']: new_node.title = line['heading'] else: new_node.title = '' new_node.owner = current_user new_node.order = current_order + 10 if line['priority']: new_node.priority = line['priority'] else: new_node.priority = '' if line['tag_string']: new_node.tag_string = line['tag_string'] else: new_node.tag_string = '' new_node.auto_close = False # Disable closed timestamp new_node.save() # Add scope (passed as argument or auto-detected) if scope: new_node.scope.add(scope) # Update current state variables current_order = new_node.order parent_stack.push(new_node) else: # Some sort of text item # Test to see if it's a scheduled, deadline or closed modifier time_sensitive_match = TIME_SENSITIVE_RE.findall(line['original']) if time_sensitive_match: parent = parent_stack.head.value for match in time_sensitive_match: # Bump a match for "CLOSED:" up to the 0 and 1 position if match[2] and match[3]: match = (match[2], match[3], "", "") date_match = DATE_RE.search(match[1]).groups() if date_match: # Set some variables to make things easier to read year = int(date_match[0]) month = int(date_match[1]) day = int(date_match[2]) if date_match[4]: hour = int(date_match[4]) else: hour = 0 if date_match[5]: minute = int(date_match[5]) else: minute = 0 naive_datetime = datetime(year, month, day, hour, minute) new_datetime = timezone.get_current_timezone( ).localize(naive_datetime ) # TODO: set to user's preferred timezone if date_match[4] and date_match[5]: time_specific = True else: time_specific = False if date_match[6]: # repeating parent.repeats = True parent.repeating_number = date_match[6][1] parent.repeating_unit = date_match[6][2] if date_match[6][0] == "+": parent.repeats_from_completion = False elif date_match[6][0] == ".": parent.repeats_from_completion = True # Set the appropriate fields if match[0] == "SCHEDULED:": parent.scheduled_date = new_datetime.date() if time_specific: parent.scheduled_time = new_datetime.time() elif match[0] == "DEADLINE:": parent.deadline_date = new_datetime.date() if time_specific: parent.deadline_time = new_datetime.date() elif match[0] == "CLOSED:": parent.closed = new_datetime parent.auto_close = False # Disable closed timestamp parent.save() else: # It's just a regular text item current_text += line['original']