コード例 #1
0
ファイル: sync_odk.py プロジェクト: zhangsichu/rhizome
    def get_object_list(self, request):

        required_param = 'odk_form_id'
        odk_form_id = None

        try:
            odk_form_id = request.GET[required_param]
        except KeyError:
            pass

        try:
            document_id = request.GET['document_id']
            odk_form_id = DocumentDetail.objects.get(
                document_id=document_id,
                doc_detail_type__name='odk_form_name').doc_detail_value
        except KeyError:
            pass

        if not odk_form_id:
            raise DatapointsException(
                '"{0}" is a required parameter for this request'.format(
                    required_param))

        try:
            odk_sync_object = OdkSync(odk_form_id,
                                      **{'user_id': request.user.id})
            document_id_list, sync_result_data = odk_sync_object.main()

        except OdkJarFileException as e:
            raise DatapointsException(e.errorMessage)

        return Document.objects.filter(id__in=document_id_list).values()
コード例 #2
0
ファイル: doc_trans_form.py プロジェクト: zhangsichu/rhizome
    def get_object_list(self, request):
        '''

        ## when you upload a file, step one is getting data into source submission
            ## --> DocTransform <-- ##

        ## Step two is translating form source_submission into datapoints
            ## --> REfreshMaster <----

        ## step three is aggregation
            ## agg refresh ##

        '''
        try:
            doc_id = request.GET['document_id']
        except KeyError:
            raise DatapointsException(
                message='Document_id is a required API param')
        # dt = DocTransform(request.user.id, doc_id)

        ran_complex_doc_transform = False

        try:
            dt = ComplexDocTransform(request.user.id, doc_id)
            dt.main()
            ran_complex_doc_transform = True
        except Exception as err:
            try:
                dt = DateDocTransform(request.user.id, doc_id)
                ssids = dt.process_file()

            except Exception as err:
                raise DatapointsException(message=err.message)

        mr = MasterRefresh(request.user.id, doc_id)
        mr.main()

        if ran_complex_doc_transform:
            doc_campaign_ids = set(list(DataPoint.objects\
                .filter(source_submission__document_id = doc_id)\
                .values_list('campaign_id',flat=True)))

            for c_id in doc_campaign_ids:
                ar = AggRefresh(c_id)
                # try/except block hack because tests fail otherwise
                try:
                    with transaction.atomic():
                        ar.main()
                except TransactionManagementError as e:
                    pass
        return Document.objects.filter(id=doc_id).values()
コード例 #3
0
    def get_object_list(self, request):
        try:
            doc_id = request.GET['document_id']
        except KeyError:
            raise DatapointsException(
                message='Document_id is a required API param')
        # dt = DocTransform(request.user.id, doc_id)

        try:
            dt = SimpleDocTransform(request.user.id, doc_id)
            dt.main()
        except Exception as err:
            raise DatapointsException(message=err.message)

        return Document.objects.filter(id=doc_id).values()
コード例 #4
0
    def post_doc_data(self, post_data, user_id, doc_title, doc_id):

        # when posting from ODK, i dont add the file_meta.. from the webapp
        # i do.  I should change so the post requests are consistent but
        # tryign to get this working for now.

        #TODO: better exception handling. This is kind of lame but handles the fact that test posts are different from
        #application posts. Need to investigate.
        if post_data == 'data:' or len(post_data) == 0:
            raise DatapointsException(
                message='file is empty please check the upload and try again')
        try:
            file_meta, base64data = post_data.split(',')
        except ValueError:
            base64data = post_data
        file_header = None
        file_content = None
        if '.csv' in doc_title:
            file_content = ContentFile(base64.b64decode(base64data))
            file_header = file_content.readline()
        elif '.xlsx' in doc_title or '.xls' in doc_title:
            # workaround-- need to create the excel file in order to read from it
            new_file_path = settings.MEDIA_ROOT + doc_title
            new_file = open(new_file_path, 'wr')
            new_file.write(base64.b64decode(base64data))
            new_file.close()
            the_file = open(new_file_path)
            try:
                file_df = read_excel(the_file)
            except Exception as err:
                os.remove(new_file_path)
                raise DatapointsException(
                    message=
                    'There was an error with your file. Please check the upload and try again'
                )
            file_content = ContentFile(file_df.to_csv())
            file_header = file_content.readline()
            # delete the excel file
            os.remove(new_file_path)
        sd, created = Document.objects.update_or_create(
            id=doc_id,
            defaults={'doc_title': doc_title, 'created_by_id': user_id, \
                'file_header': file_header}
        )
        sd.docfile.save(sd.guid, file_content)
        return sd
コード例 #5
0
ファイル: campaign.py プロジェクト: default50/rhizome
    def obj_create(self, bundle, **kwargs):

        post_data = bundle.data

        try:
            campaign_id = int(post_data['id'])
            if campaign_id == -1:
                campaign_id = None
        except KeyError:
            campaign_id = None

        try:
            defaults = {
                'name':
                str(post_data['name']),
                'top_lvl_location_id':
                post_data['top_lvl_location_id'],
                'top_lvl_indicator_tag_id':
                post_data['top_lvl_indicator_tag_id'],
                'office_id':
                post_data['office_id'],
                'campaign_type_id':
                post_data['campaign_type_id'],
                'start_date':
                datetime.strptime(post_data['start_date'], '%Y-%m-%d'),
                'end_date':
                datetime.strptime(post_data['end_date'], '%Y-%m-%d'),
                'pct_complete':
                post_data['pct_complete']
            }
        except Exception as error:
            print 'Please provide "{0}" for the campaign.'.format(error)
            raise DatapointsException(
                'Please provide "{0}" for the campaign.'.format(error))

        try:
            campaign, created = Campaign.objects.update_or_create(
                id=campaign_id, defaults=defaults)
        except Exception as error:
            raise DatapointsException(error)

        bundle.obj = campaign
        bundle.data['id'] = campaign.id

        return bundle
コード例 #6
0
ファイル: refresh_master.py プロジェクト: default50/rhizome
    def get_object_list(self, request):
        document_id = request.GET['document_id']

        try:
            dt = SimpleDocTransform(request.user.id, document_id)
            dt.main()
        except Exception as err:
            raise DatapointsException(message=err.message)

        return Document.objects.filter(id=document_id).values()
コード例 #7
0
ファイル: refresh_master.py プロジェクト: zhangsichu/rhizome
    def get_object_list(self, request):
        try:
            doc_id = request.GET['document_id']
        except KeyError:
            raise DatapointsException(
                message='Document_id is a required API param')
        # dt = DocTransform(request.user.id, doc_id)

        mr = MasterRefresh(request.user.id, doc_id)
        mr.main()

        doc_campaign_ids = set(list(DataPoint.objects\
            .filter(source_submission__document_id = doc_id)\
            .values_list('campaign_id',flat=True)))
        for c_id in doc_campaign_ids:
            ar = AggRefresh(c_id)

        return Document.objects.filter(id=doc_id).values()
コード例 #8
0
    def obj_create(self, bundle, **kwargs):

        post_data = bundle.data
        user_id = bundle.request.user.id

        try:
            dash_id = int(post_data['id'])
        except KeyError:
            dash_id = None

        title = post_data['title']

        try:
            description = post_data['description']
        except KeyError:
            description = ''

        try:
            layout = int(post_data['layout'])
        except KeyError:
            layout = 0

        try:
            rows = json.loads(post_data['rows'])
        except KeyError:
            rows = None

        defaults = {
            'id': dash_id,
            'title': title,
            'description': description,
            'layout': layout,
            'rows': rows
        }

        if (CustomDashboard.objects.filter(title=title).count() > 0
                and (dash_id is None)):
            raise DatapointsException(
                'the custom dashboard "{0}" already exists'.format(title))
        dashboard, created = CustomDashboard.objects.update_or_create(
            id=dash_id, defaults=defaults)
        bundle.obj = dashboard
        bundle.data['id'] = dashboard.id
        return bundle
コード例 #9
0
    def file_to_source_submissions(self):
        #use a dictionary to make sure that there is a single value for each instance_guid.
        #duplicates are handled by overwriting old values
        batch = {}
        for submission in self.csv_df.itertuples():

            ss, instance_guid = self.process_raw_source_submission(submission)
            if ss is not None and instance_guid is not None:
                ss['instance_guid'] = instance_guid
                batch[instance_guid] = ss

        object_list = [SourceSubmission(**v) for k, v in batch.iteritems()]

        try:
            ss = SourceSubmission.objects.bulk_create(object_list)
        except IntegrityError as e:
            raise DatapointsException(e.message)

        return
コード例 #10
0
    def file_to_source_submissions(self):

        batch = {}
        for submission in self.csv_df.itertuples():

            ss, instance_guid = self.process_raw_source_submission(submission)
            if ss is not None and instance_guid is not None:

                ss['instance_guid'] = instance_guid
                batch[instance_guid] = ss

        object_list = [SourceSubmission(**v) for k, v in batch.iteritems()]

        try:
            ss = SourceSubmission.objects.bulk_create(object_list)
        except IntegrityError as e:
            raise DatapointsException(e.message)

        return
コード例 #11
0
    def __init__(self, user_id, document_id, raw_csv_df = None):
        self.user_id = user_id

        self.location_column, self.campaign_column, self.uq_id_column = \
            ['geocode', 'campaign', 'unique_key']

        self.date_column = 'data_date'
        self.document = Document.objects.get(id=document_id)
        self.file_path = str(self.document.docfile)

        if not isinstance(raw_csv_df, DataFrame):
            raw_csv_df = read_csv(settings.MEDIA_ROOT + self.file_path)

        csv_df = raw_csv_df.where((notnull(raw_csv_df)), None)
        ## if there is no uq id column -- make one ##
        if not self.uq_id_column in raw_csv_df.columns:

            try:
                csv_df[self.uq_id_column] = csv_df[self.location_column].map(str)+ csv_df[self.campaign_column]
            except Exception as err:
                if not self.date_column in csv_df.columns:
                    dp_error_message = '%s is a required column.' %err.message
                    raise DatapointsException(message=dp_error_message)

        self.csv_df = csv_df
        self.file_header = csv_df.columns

        self.meta_lookup = {
            'location':{},
            'indicator':{},
            'campaign':{}
        }
        self.indicator_ids_to_exclude = Set([-1])
        self.existing_submission_keys = SourceSubmission.objects.filter(
            document_id = self.document.id).values_list('instance_guid',flat=True)

        self.file_path = str(self.document.docfile)