Ejemplo n.º 1
0
    def prepare_dict(self, response_data_dict, request=None):
        from apps.fbschema.parse_utils import parse_clean_field_value
        '''
        Receives the response from fql graph query. Cleans/Parses the data and returns a valid
        dict that can be used to create and save a tuple/data-record.
        Applied operations on data in this process is in context with the model in concern, hence
        it should be called with proper database model, otherwise compare key function will fail
        '''
        if not compare_keys_with_fields(
                self, response_data_dict.keys()
        ):  #TODO: This compare test is happening for each tuple, can we do better ?
            raise ValueError  #TODO: Use proper exception

        model = self  # Just to make code more readable

        data_dict = {}
        fields = [
            field for field in model._meta.fields
            if getattr(field, 'name') not in model.ignore_fields
        ]
        for field in fields:
            field_name = getattr(field, 'name')
            key = field_name
            value = parse_clean_field_value(field, response_data_dict[key],
                                            request)
            data_dict[key] = value

        return data_dict
Ejemplo n.º 2
0
    def prepare_dict(self, response_data_dict, request=None):
        from apps.fbschema.parse_utils import parse_clean_field_value
        '''
        Receives the response from fql graph query. Cleans/Parses the data and returns a valid
        dict that can be used to create and save a tuple/data-record.
        Applied operations on data in this process is in context with the model in concern, hence
        it should be called with proper database model, otherwise compare key function will fail
        '''
        if not compare_keys_with_fields(self, response_data_dict.keys()): #TODO: This compare test is happening for each tuple, can we do better ?
            raise ValueError #TODO: Use proper exception

        model = self # Just to make code more readable

        data_dict = {}
        fields = [field for field in model._meta.fields if getattr(field, 'name') not in model.ignore_fields]
        for field in fields:
            field_name = getattr(field, 'name')
            key = field_name
            value = parse_clean_field_value(field, response_data_dict[key], request)
            data_dict[key] = value

        return data_dict
Ejemplo n.º 3
0
    def save_update_delete(self, request, response_data, stream_nature=False):
        from apps.fbschema.parse_utils import parse_clean_field_value
        ''' 
        Main function which saves, updates and deletes on updated result sets 
        This function is always called with subclasses 
        @stream_nature = For example facebook stream is a stream of incoming items. If we compare last 'n' number of database items with incoming
                         response items and if we don't define stream_nature then existing code will delete those old database items assuming 
                         they don't exist, hence is a stream in which we always save and update, we never delete
        @ONLY_SESSION_USER = local_fql_query uses 'owner_identifier' so that it can bring database items in context to compare with response items.
                             context_uid is usally me() or a freind's uid, but in some tables only session user is allowed so there is no need of 
                             context_uid so we return 'n' last number of records OR all records
        '''
        if self.owner_identifier == "ONLY_SESSION_USER":
            context_uid = None
        else:
            context_uid = response_data[0][self.owner_identifier]
            #Assumptions response_data (list) has some values (dict)

        model_data_list = self.local_fql_query(request, context_uid)

        primary_identifier = self.primary_identifier

        #Response data set of 'primary key', we also need to make sure that data we are compared are 'cleaned'
        response_data_set = set([parse_clean_field_value( self.primary_identifier_class(), response_data_dict[primary_identifier] ) \
                                for response_data_dict in response_data])

        #Model data set of 'primary key'
        model_data_set = set([
            getattr(model_data, primary_identifier)
            for model_data in model_data_list
        ])

        #Delete list
        if stream_nature:
            delete_list = []
        else:
            delete_list = list(model_data_set.difference(response_data_set))
        #Add List
        add_list = list(response_data_set.difference(model_data_set))
        #Update List
        update_list = list(response_data_set.intersection(model_data_set))
        print response_data_set
        print model_data_set
        print add_list
        print update_list

        for response_data_dict in response_data:
            data_dict = self.prepare_dict(response_data_dict, request)
            if data_dict[primary_identifier] in add_list:
                #Add here
                try:
                    data_tuple = self(user=request.user, **data_dict)
                    data_tuple.save()
                except IntegrityError:
                    '''
                    There are some cases where due to facebook data nature we expect integrity error within ids being saved.
                    This exception should be handled by that particular model which is expecting this case.
                    A use case : FacebookStream
                    with some post_id : 'x' changed his profile picture
                    with same post_id : 'x' and seven others changed profile pictures
                    '''
                    logger.info("Integrity Exception handled, Model = %s" %
                                self.__class__)
                    print("Integrity Exception handled, Model = %s" %
                          self.__class__)
                    data_tuple = self.handle_integrity_exception(
                        request, data_dict)
                    data_tuple.save()
            elif data_dict[primary_identifier] in update_list:
                #Update here
                kwargs = {primary_identifier: data_dict[primary_identifier]}
                data_tuple = self.objects.get(user=request.user, **kwargs)
                data_tuple = self(user=request.user,
                                  id=data_tuple.id,
                                  **data_dict)
                data_tuple.save()
Ejemplo n.º 4
0
    def save_update_delete(self, request, response_data, stream_nature=False):
        from apps.fbschema.parse_utils import parse_clean_field_value
        ''' 
        Main function which saves, updates and deletes on updated result sets 
        This function is always called with subclasses 
        @stream_nature = For example facebook stream is a stream of incoming items. If we compare last 'n' number of database items with incoming
                         response items and if we don't define stream_nature then existing code will delete those old database items assuming 
                         they don't exist, hence is a stream in which we always save and update, we never delete
        @ONLY_SESSION_USER = local_fql_query uses 'owner_identifier' so that it can bring database items in context to compare with response items.
                             context_uid is usally me() or a freind's uid, but in some tables only session user is allowed so there is no need of 
                             context_uid so we return 'n' last number of records OR all records
        '''
        if self.owner_identifier == "ONLY_SESSION_USER":
            context_uid = None
        else:
            context_uid = response_data[0][self.owner_identifier]
            #Assumptions response_data (list) has some values (dict)

        model_data_list = self.local_fql_query(request, context_uid) 

        primary_identifier = self.primary_identifier
    
        #Response data set of 'primary key', we also need to make sure that data we are compared are 'cleaned'
        response_data_set = set([parse_clean_field_value( self.primary_identifier_class(), response_data_dict[primary_identifier] ) \
                                for response_data_dict in response_data])
    
        #Model data set of 'primary key'
        model_data_set = set([getattr(model_data, primary_identifier) for model_data in model_data_list])
   
        #Delete list
        if stream_nature:
            delete_list = []
        else:
            delete_list = list(model_data_set.difference(response_data_set))
        #Add List
        add_list = list(response_data_set.difference(model_data_set))
        #Update List
        update_list = list(response_data_set.intersection(model_data_set))
        print response_data_set
        print model_data_set
        print add_list
        print update_list
        

        for response_data_dict in response_data:
            data_dict = self.prepare_dict(response_data_dict, request) 
            if data_dict[primary_identifier] in add_list:
                #Add here
                try:
                    data_tuple = self(user=request.user, **data_dict) 
                    data_tuple.save()
                except IntegrityError:
                    '''
                    There are some cases where due to facebook data nature we expect integrity error within ids being saved.
                    This exception should be handled by that particular model which is expecting this case.
                    A use case : FacebookStream
                    with some post_id : 'x' changed his profile picture
                    with same post_id : 'x' and seven others changed profile pictures
                    '''
                    logger.info("Integrity Exception handled, Model = %s" % self.__class__)
                    print("Integrity Exception handled, Model = %s" % self.__class__)
                    data_tuple = self.handle_integrity_exception(request, data_dict)
                    data_tuple.save()
            elif data_dict[primary_identifier] in update_list:
                #Update here
                kwargs = { primary_identifier : data_dict[primary_identifier] }
                data_tuple = self.objects.get(user=request.user, **kwargs)
                data_tuple = self(user=request.user, id=data_tuple.id, **data_dict) 
                data_tuple.save()