Пример #1
0
    def get(self,
            output_format='xlsx',
            user_id=None,
            start_time=None,
            end_time=None,
            limit=None,
            offset=None):
        """Export all charges of special user, output formats supported:
           * Excel (Sets + Books)
           * JSON (Sets + Books)
           * YAML (Sets + Books)
           * HTML (Sets)
           * TSV (Sets)
           * CSV (Sets)
        """
        if output_format.lower() not in ["xls", "xlsx", "csv", "json", "yaml"]:
            raise exception.InvalidOutputFormat(output_format=output_format)

        if limit and limit < 0:
            raise exception.InvalidParameterValue(err="Invalid limit")
        if offset and offset < 0:
            raise exception.InvalidParameterValue(err="Invalid offset")

        limit_user_id = acl.get_limited_to_user(request.headers,
                                                'export_charges')

        if limit_user_id:
            user_id = limit_user_id

        headers = (u"充值记录ID", u"充值对象用户名", u"充值对象ID", u"充值对象真实姓名", u"充值对象邮箱",
                   u"充值对象公司", u"充值金额", u"充值类型", u"充值来源", u"充值人员ID", u"充值人员用户名",
                   u"充值时间", u"状态")
        data = []

        users = {}

        def _get_user(user_id):
            user = users.get(user_id)
            if user:
                return user
            contact = kunkka.get_uos_user(user_id) or {}
            user_name = contact.get('name')
            email = contact.get('email')
            real_name = contact.get('real_name') or 'unknown'
            mobile = contact.get('phone') or 'unknown'
            company = contact.get('company') or 'unknown'
            users[user_id] = models.User(user_id=user_id,
                                         user_name=user_name,
                                         email=email,
                                         real_name=real_name,
                                         mobile=mobile,
                                         company=company)
            return users[user_id]

        self.conn = pecan.request.db_conn
        charges = self.conn.get_charges(request.context,
                                        user_id=user_id,
                                        limit=limit,
                                        offset=offset,
                                        start_time=start_time,
                                        end_time=end_time)
        for charge in charges:
            charge.charge_time += datetime.timedelta(hours=8)
            acharge = models.Charge.from_db_model(charge)
            acharge.actor = _get_user(charge.operator)
            acharge.target = _get_user(charge.user_id)
            charge_time = \
                timeutils.strtime(charge.charge_time, fmt=OUTPUT_TIME_FORMAT)

            adata = (acharge.charge_id, acharge.target.user_name,
                     acharge.target.user_id, acharge.target.real_name,
                     acharge.target.email, acharge.target.company,
                     str(acharge.value), acharge.type, acharge.come_from,
                     acharge.actor.user_id, acharge.actor.user_name,
                     charge_time, u"正常")
            data.append(adata)

        data = tablib.Dataset(*data, headers=headers)

        response.content_type = "application/binary; charset=UTF-8"
        response.content_disposition = \
            "attachment; filename=charges.%s" % output_format
        content = getattr(data, output_format)
        if output_format == 'csv':
            content = content.decode("utf-8").encode("gb2312")
        response.write(content)
        return response
Пример #2
0
def subscriber_export(request):
    """Export CSV file of subscriber record

    **Important variable**:

        * ``request.session['subscriber_list_kwargs']`` - stores subscriber_list kwargs

    **Exported fields**: ['contact', 'updated_date', 'count_attempt',
                          'completion_count_attempt', 'status', 'disposition',
                          'collected_data', 'agent']
    """
    format_type = request.GET['format']
    # get the response object, this can be used as a stream.
    response = HttpResponse(content_type='text/%s' % format_type)

    # force download.
    response[
        'Content-Disposition'] = 'attachment;filename=export.%s' % format_type

    if request.session.get('subscriber_list_kwargs'):
        kwargs = request.session['subscriber_list_kwargs']
        if request.user.is_superuser:
            subscriber_list = Subscriber.objects.all()
        else:
            subscriber_list = Subscriber.objects.filter(
                campaign__user=request.user)

        if kwargs:
            subscriber_list = subscriber_list.filter(**kwargs)

        headers = (
            'contact',
            'updated_date',
            'count_attempt',
            'completion_count_attempt',
            'status',
            'disposition',
            'collected_data',
        )  # 'agent',

        list_val = []
        for i in subscriber_list:
            updated_date = i.updated_date
            if format_type == Export_choice.JSON or Export_choice.XLS:
                updated_date = str(i.updated_date)

            list_val.append((
                i.contact.contact,
                updated_date,
                i.count_attempt,
                i.completion_count_attempt,
                get_subscriber_status(i.status),
                get_subscriber_disposition(i.campaign_id, i.disposition),
                i.collected_data,
                # i.agent,
            ))

        data = tablib.Dataset(*list_val, headers=headers)

        if format_type == Export_choice.XLS:
            response.write(data.xls)
        elif format_type == Export_choice.CSV:
            response.write(data.csv)
        elif format_type == Export_choice.JSON:
            response.write(data.json)

    return response
def main():
    downloader = None

    parser = argparse.ArgumentParser()
    parser.add_argument("-i", "--insecure", help="use HTTP instead of HTTPS", action="store_true")
    parser.add_argument("-e", "--export", help="export immediately without downloading (Only useful if you already downloaded something to the .pickle file)", action="store_true")
    parser.add_argument('-E', '--Exchange', help='Only export ticker symbols from this exchange (the filtering is done during the export phase)')
    parser.add_argument('type', nargs='?', default='generic', help='The type to download, this can be: '+" ".join(list(options.keys())))
    parser.add_argument("-s", "--sleep", help="The time to sleep in seconds between requests", type=float, default=0)
    parser.add_argument("-p", "--pandantic", help="Stop and warn the user if some rare assertion fails", action="store_true")

    args = parser.parse_args()

    protocol = 'http' if args.insecure else 'https'
    if args.insecure:
        print("Using insecure connection")

    if args.export:
        print("Exporting pickle file")

    tickerType = args.type = args.type.lower()

    print("Checking if we can resume a old download session")
    try:
        downloader = loadDownloader(tickerType)
        print("Downloader found on disk, resuming")
    except:
        print("No old downloader found on disk")
        print("Starting a new session")
        if tickerType not in options:
            print("Error: " + tickerType + " is not a valid type option. See --help")
            exit(1)
        else:
            downloader = options[tickerType]

    rp = robotparser.RobotFileParser()
    rp.set_url(protocol + '://finance.yahoo.com/robots.txt')
    rp.read()
    try:
        if not args.export:
            
            if(not rp.can_fetch(user_agent, protocol + '://finance.yahoo.com/_finance_doubledown/api/resource/searchassist')):
                print('Execution of script halted due to robots.txt')
                return 1
            
            if not downloader.isDone():
                print("Downloading " + downloader.type)
                print("")
                downloadEverything(downloader, tickerType, args.insecure, args.sleep, args.pandantic)
                print ("Saving downloader to disk...")
                saveDownloader(downloader, tickerType)
                print ("Downloader successfully saved.")
                print ("")
            else:
                print("The downloader has already finished downloading everything")
                print("")

    except Exception as ex:
        print("A exception occurred while downloading. Suspending downloader to disk")
        saveDownloader(downloader, tickerType)
        print("Successfully saved download state")
        print("Try removing {type}.pickle file if this error persists")
        print("Issues can be reported on https://github.com/Benny-/Yahoo-ticker-symbol-downloader/issues")
        print("")
        raise
    except KeyboardInterrupt as ex:
        print("Suspending downloader to disk as .pickle file")
        saveDownloader(downloader, tickerType)

    if downloader.isDone() or args.export:
        print("Exporting "+downloader.type+" symbols")

        data = tablib.Dataset()
        data.headers = downloader.getRowHeader()

        for symbol in downloader.getCollectedSymbols():
            if(args.Exchange == None):
                data.append(symbol.getRow())
            elif (symbol.exchange == args.Exchange):
                data.append(symbol.getRow())

        with io.open(downloader.type + '.csv', 'w', encoding='utf-8') as f:
            f.write(text.join(u',', data.headers) + '\n')
            writer = csv.writer(f)
            for i in range(0, len(data)):
                row = [text(y) if not y is None else u"" for y in data[i]]
                writer.writerow(row)

        try:
            with open(downloader.type + '.xlsx', 'wb') as f:
                f.write(data.xlsx)
        except:
            print("Could not export .xlsx due to a internal error")

        try:
            with open(downloader.type + '.json', 'wb') as f:
                f.write(data.json.encode('UTF-8'))
        except:
            print("Could not export .json due to a internal error")

        try:
            with open(downloader.type + '.yaml', 'wb') as f:
                f.write(data.yaml.encode('UTF-8'))
        except:
            print("Could not export .yaml due to a internal error")
Пример #4
0
def export_surveycall_report(request):
    """Export CSV file of Survey VoIP call record

    **Important variable**:

        * ``request.session['surveycall_record_qs']`` - stores survey voipcall
            query set

    **Exported fields**: ['starting_date', 'phone_number', 'duration',
                          'disposition', 'survey results']
    """
    format = request.GET['format']
    # get the response object, this can be used as a stream.
    response = HttpResponse(mimetype='text/' + format)
    # force download.
    response['Content-Disposition'] = 'attachment;filename=export.' + format
    #writer = csv.writer(response)
    if request.session.get('session_surveycalls_kwargs'):
        kwargs = request.session.get('session_surveycalls_kwargs')
        qs = VoIPCall.objects.filter(**kwargs)
        campaign_id = request.session['session_campaign_id']
        campaign_obj = Campaign.objects.get(pk=campaign_id)
        column_list_base = [
            'starting_date', 'phone_number', 'duration', 'disposition'
        ]
        column_list = list(column_list_base)

        survey_qst = False
        if campaign_obj.content_type.model == 'survey':
            survey_qst = Section.objects.filter(
                survey_id=int(campaign_obj.object_id))
            for i in survey_qst:
                column = unicode(i.question.replace(',', ' '))
                column_list.append(column.encode('utf-8'))

        result_row = []
        for voipcall in qs:
            result_row_list = []
            #For each voip call retrieve the results of the survey nodes
            results = Result.objects.filter(
                callrequest=voipcall.callrequest_id).order_by('section')

            result_list = {}
            #We will prepare a dictionary result_list to help exporting the result
            for result in results:
                if result.record_file and len(result.record_file) > 0:
                    result_list[result.section.question] = result.record_file
                else:
                    result_list[result.section.question] = result.response

            #We will build result_row_list which will be a value for each element from column_list
            for ikey in column_list:
                if ikey in column_list_base:
                    #This is not a Section result
                    if ikey == 'starting_date' and format == 'json':
                        starting_date = str(voipcall.__dict__[ikey])
                        result_row_list.append(starting_date)
                    else:
                        result_row_list.append(voipcall.__dict__[ikey])
                else:
                    #This is a Section result
                    if ikey in result_list:
                        result_row_list.append(
                            result_list[ikey].encode('utf-8'))
                    else:
                        #Add empty result
                        result_row_list.append("")

            result_row.append(result_row_list)

        data = tablib.Dataset(*result_row, headers=tuple(column_list))
        if format == 'xls':
            response.write(data.xls)

        if format == 'csv':
            response.write(data.csv)

        if format == 'json':
            response.write(data.json)
    return response
Пример #5
0
def export_aggregated_events(pj, parameters, obsId):
    """
    export aggregated events

    Args:
        pj (dict): BORIS project
        parameters (dict): subjects, behaviors
        obsId (str): observation id

    Returns:
        tablib.Dataset:

    """
    data = tablib.Dataset()
    observation = pj[OBSERVATIONS][obsId]

    duration1 = []  # in seconds
    if observation[TYPE] in [MEDIA]:
        try:
            for mediaFile in observation[FILE][PLAYER1]:
                if "media_info" in observation:
                    duration1.append(
                        observation["media_info"]["length"][mediaFile])
        except:
            duration1 = []

    total_length = "{0:.3f}".format(
        project_functions.observation_total_length(observation))

    ok, msg, connector = db_functions.load_aggregated_events_in_db(
        pj, parameters["selected subjects"], [obsId],
        parameters["selected behaviors"])
    if not ok:
        data

    cursor = connector.cursor()

    for subject in parameters["selected subjects"]:

        for behavior in parameters["selected behaviors"]:

            cursor.execute(
                "select distinct modifiers from aggregated_events where subject=? AND behavior=? order by modifiers",
                (
                    subject,
                    behavior,
                ))
            rows_distinct_modifiers = list(x[0].strip()
                                           for x in cursor.fetchall())

            for distinct_modifiers in rows_distinct_modifiers:

                cursor.execute((
                    "SELECT start, stop, type, modifiers, comment, comment_stop FROM aggregated_events "
                    "WHERE subject = ? AND behavior = ? AND modifiers = ? ORDER by start"
                ), (subject, behavior, distinct_modifiers))
                rows = list(cursor.fetchall())

                for row in rows:

                    if observation[TYPE] in [MEDIA]:
                        if duration1:
                            mediaFileIdx = [
                                idx1 for idx1, x in enumerate(duration1)
                                if row["start"] >= sum(duration1[0:idx1])
                            ][-1]
                            mediaFileString = observation[FILE][PLAYER1][
                                mediaFileIdx]
                            fpsString = observation["media_info"]["fps"][
                                observation[FILE][PLAYER1][mediaFileIdx]]
                        else:
                            mediaFileString = "-"
                            fpsString = "NA"

                    if observation[TYPE] in [LIVE]:
                        mediaFileString = "LIVE"
                        fpsString = "NA"

                    #if POINT in project_functions.event_type(behavior, pj[ETHOGRAM]):
                    if row["type"] == POINT:

                        row_data = []
                        row_data.extend([
                            obsId, observation["date"].replace("T", " "),
                            mediaFileString, total_length, fpsString
                        ])

                        # independent variables
                        if INDEPENDENT_VARIABLES in pj:
                            for idx_var in utilities.sorted_keys(
                                    pj[INDEPENDENT_VARIABLES]):
                                if pj[INDEPENDENT_VARIABLES][idx_var][
                                        "label"] in observation[
                                            INDEPENDENT_VARIABLES]:
                                    row_data.append(
                                        observation[INDEPENDENT_VARIABLES][
                                            pj[INDEPENDENT_VARIABLES][idx_var]
                                            ["label"]])
                                else:
                                    row_data.append("")

                        row_data.extend([
                            subject,
                            behavior,
                            row["modifiers"].strip(),
                            POINT,
                            "{0:.3f}".format(row["start"]),  # start
                            "{0:.3f}".format(row["stop"]),  # stop
                            "NA",  # duration
                            row["comment"],
                            ""
                        ])
                        data.append(row_data)

                    #if STATE in project_functions.event_type(behavior, pj[ETHOGRAM]):
                    if row["type"] == STATE:
                        if idx % 2 == 0:
                            row_data = []
                            row_data.extend([
                                obsId, observation["date"].replace("T", " "),
                                mediaFileString, total_length, fpsString
                            ])

                            # independent variables
                            if INDEPENDENT_VARIABLES in pj:
                                for idx_var in utilities.sorted_keys(
                                        pj[INDEPENDENT_VARIABLES]):
                                    if pj[INDEPENDENT_VARIABLES][idx_var][
                                            "label"] in observation[
                                                INDEPENDENT_VARIABLES]:
                                        row_data.append(
                                            observation[INDEPENDENT_VARIABLES]
                                            [pj[INDEPENDENT_VARIABLES][idx_var]
                                             ["label"]])
                                    else:
                                        row_data.append("")

                            row_data.extend([
                                subject, behavior, row["modifiers"].strip(),
                                STATE, "{0:.3f}".format(row["start"]),
                                "{0:.3f}".format(row["stop"]),
                                "{0:.3f}".format(row["stop"] - row["start"]),
                                row["comment"], row["comment_stop"]
                            ])
                            data.append(row_data)

    return data
Пример #6
0
    def export_assets(self, request, queryset):
        asset_workbook = tablib.Databook()

        participants_headers = (
            'DB_id',
            'Nr Procesu',
            'Lp. procedury do procesu',
            'Nazwa uczestnika',
            'Skrót nazwy',
            'Nazwa zadania realizowanego przez uczestnika',
            'Zakres niezbędny do realizacji działań na minimalnym poziomie\
, wymienionych w kolumnach po prawej',
        )
        participants = tablib.Dataset(
            title='Uczestnicy',
            headers=participants_headers,
        )
        participants.append(['', 'Priority', '', '', '', '', ''])

        external_suppliers_headers = (
            'DB_id',
            'Nr Procesu',
            'Lp. procedury do procesu',
            'Nazwa uczestnika',
            'Nazwa zadania realizowanego przez uczestnika',
            'Zakres niezbędny do realizacji działań na minimalnym poziomie\
, wymienionych w kolumnach po prawej',
        )
        external_suppliers = tablib.Dataset(
            title='Uczestnicy zewn.',
            headers=external_suppliers_headers,
        )
        external_suppliers.append(['', 'Priority', '', '', '', ''])

        personnel_headers = (
            'Nr Procesu',
            'Nazwa komórki org.',
            'Skrót komórki org.',
            'Nazwa stanowiska',
            'Wymagane uprawnienia, kompetencje (uwagi)',
            'Liczba osób',
        )
        personnel = tablib.Dataset(
            title='Pracownicy',
            headers=personnel_headers,
        )
        personnel.append(['Priority', '', '', '', '', ''])

        locations_headers = (
            'Nr Procesu',
            'Nazwa komórki org. zarządzającej lokalizacją',
            'Skrót komórki org.',
            'Nazwa lokalizacji',
            'Dodatkowe wymagania/uwagi dot. lokalizacji',
            'Liczba lokalizacji',
            'Adres (dla lokalizacji pojedynczych)',
        )
        locations = tablib.Dataset(
            title='Lokalizacje',
            headers=locations_headers,
        )
        locations.append(['Priority', '', '', '', '', '', ''])

        office_equipment_headers = (
            'Nr Procesu',
            'Nazwa urządzenia',
            'Model, typ (jeśli ma znaczenie)',
            'Dodatkowe wymagania/uwagi dot. urządzenia',
            'Ilość',
            'Nazwy stanowiska użytkownika (dla urządzeń przyporządkowanych \
konkretnej osobie)',
        )
        office_equipment = tablib.Dataset(
            title='Infr. biurowa i peryferia',
            headers=office_equipment_headers,
        )
        office_equipment.append(['Priority', '', '', '', '', ''])

        supplies_headers = (
            'Nr Procesu',
            'Nazwa materiału',
            'Model, typ (jeśli ma znaczenie)',
            'Dodatkowe wymagania/uwagi',
            'Minimalny zapas',
            'Nazwy stanowiska użytkownika (dla urządzeń przyporządkowanych \
konkretnej osobie)',
        )
        supplies = tablib.Dataset(
            title='Materiały eksploat.',
            headers=supplies_headers,
        )
        supplies.append(['Priority', '', '', '', '', ''])

        applications_headers = (
            'DB_id',
            'Nr Procesu',
            'Lp. procedury do procesu',
            'Nazwa zadania realizowanego z wykorzystaniem aplikacji',
            'Skrót nazwy aplikacji',
            'Dodatkowe uwagi dot. aplikacji',
            'Liczba licencji',
            'Nazwy stanowiska użytkownika (dla licencji pojedynczych)',
        )
        applications = tablib.Dataset(
            title='Aplikacje',
            headers=applications_headers,
        )
        applications.append(['', 'Priority', '', '', '', '', '', ''])

        documentation_headers = (
            'DB_id',
            'Nr Procesu',
            'Lp. procedury do procesu',
            'Opis zadania',
            'Nazwa dokumentu/informacji',
            'Wejście / wyjście',
            'Opis, uwagi dodatkowe',
            'Miejsce przechowywania/ścieżka dostępu/źródło zewnetrzne',
            'Klasa bezpieczeństwa',
        )
        documentation = tablib.Dataset(title='Dokumentacja',
                                       headers=documentation_headers)
        documentation.append(['', 'Priority', '', '', '', '', '', '', ''])

        app_list = tablib.Dataset(title='Lista aplikacji', )
        app_list.dict = models.BusinessApp.objects.\
            order_by('name').all().values()

        org_list = tablib.Dataset(title='Lista jednostek', )
        org_list.dict = models.OrgUnit.objects.order_by(
            'org_unit_type', 'name').all().values()

        print('START OF PROCESSING')

        for activity in queryset.order_by('MTPD'):
            print('ACTIVITY: %s' % (activity))
            act_col = []
            act_col.append(activity.MTPD)
            while len(participants) > len(act_col):
                act_col.append('')
            participants.append_col(act_col, header=activity.name)

            act_col = []
            act_col.append(activity.MTPD)
            while len(external_suppliers) > len(act_col):
                act_col.append('')
            external_suppliers.append_col(act_col, header=activity.name)

            act_col = []
            act_col.append(activity.MTPD)
            while len(personnel) > len(act_col):
                act_col.append('')
            personnel.append_col(act_col, header=activity.name)

            act_col = []
            act_col.append(activity.MTPD)
            while len(locations) > len(act_col):
                act_col.append('')
            locations.append_col(act_col, header=activity.name)

            act_col = []
            act_col.append(activity.MTPD)
            while len(office_equipment) > len(act_col):
                act_col.append('')
            office_equipment.append_col(act_col, header=activity.name)

            act_col = []
            act_col.append(activity.MTPD)
            while len(supplies) > len(act_col):
                act_col.append('')
            supplies.append_col(act_col, header=activity.name)

            act_col = []
            act_col.append(activity.MTPD)
            while len(applications) > len(act_col):
                act_col.append('')
            applications.append_col(act_col, header=activity.name)

            act_col = []
            act_col.append(activity.MTPD)
            while len(documentation) > len(act_col):
                act_col.append('')
            documentation.append_col(act_col, header=activity.name)

            print('PREPARED FOR WORK TASKS')

            bcm_process = activity.process
            print('ACTIONS:')
            io_data = tablib.Dataset()
            for act in bcm_process.procedure.action_set.all():

                if act.trigger_type != 1 and act.effects_type != 1:
                    continue
                existing = False
                iter = 0
                for r in documentation:
                    iter = iter + 1
                    if r[0] != act.id or \
                        r[1] != act.procedure.process.proc_id or \
                        r[2] != act.in_process_step:
                        continue
                    if (r[5] == 'Wejścia do działania' and r[4] == act.trigger)\
                         or (r[5] == 'Wyjście z działania' and \
                         r[4] == act.effects):
                        existing = True
                        doc_row = []
                        for idx in range(len(r) - 1):
                            doc_row.append(r[idx])
                        print('FOUND:  %s' % (doc_row, ))
                        print('USUWAM: %s' % (documentation[iter - 1], ))
                        del documentation[iter - 1]

                        while (documentation.width - len(doc_row)) > 1:
                            doc_row.append('')
                        doc_row.append(activity.MTPD)
                        print('DODAJĘ: %s' % (doc_row, ))
                        documentation.append(doc_row)

                if existing:
                    continue
                else:
                    if act.trigger_type == 1:
                        doc_row = []
                        doc_row.append(act.id)
                        doc_row.append(act.procedure.process.proc_id)
                        doc_row.append(act.in_process_step)
                        doc_row.append(act.name)
                        doc_row.append(act.trigger)
                        doc_row.append('Wejścia do działania')
                        doc_row.append('')
                        doc_row.append('')
                        doc_row.append('')

                        while (documentation.width - len(doc_row)) > 1:
                            doc_row.append('')
                        doc_row.append(activity.MTPD)
                        print('DODAJĘ: %s' % (doc_row, ))
                        documentation.append(doc_row)

                    if act.effects_type == 1:
                        doc_row = []
                        doc_row.append(act.id)
                        doc_row.append(act.procedure.process.proc_id)
                        doc_row.append(act.in_process_step)
                        doc_row.append(act.name)
                        doc_row.append(act.effects)
                        doc_row.append('Wyjście z działania')
                        doc_row.append('')
                        doc_row.append('')
                        doc_row.append('')

                        while (documentation.width - len(doc_row)) > 1:
                            doc_row.append('')
                        doc_row.append(activity.MTPD)
                        print('DODAJĘ: %s' % (doc_row, ))
                        documentation.append(doc_row)

                # print('MERGING DOCUMENTATION DATASETS')
                # documentation = documentation.stack(io_data)
                print(documentation)
                io_data.wipe()

            for wt in activity.tasks.order_by(
                    'action__procedure__process__proc_id',
                    'action__in_process_step',
            ).all():

                for eu in wt.executive_units.all():

                    existing = False
                    iter = 0
                    for r in participants:
                        iter = iter + 1
                        if r[0] != eu.id:
                            continue
                        if r[1] != activity.process.proc_id:
                            continue
                        if r[2] != wt.action.in_process_step:
                            continue
                        if r[5] != wt.task:
                            continue
                        existing = True
                        eu_row = []
                        for idx in range(len(r) - 1):
                            eu_row.append(r[idx])
                        print('FOUND:  %s' % (eu_row, ))
                        print('USUWAM: %s' % (participants[iter - 1], ))
                        del participants[iter - 1]

                        while (participants.width - len(eu_row)) > 1:
                            eu_row.append('')
                        eu_row.append(activity.MTPD)
                        print('DODAJĘ: %s' % (eu_row, ))
                        participants.append(eu_row)
                        break

                    if existing:
                        continue
                    else:
                        eu_row = []
                        eu_row.append(eu.id)
                        eu_row.append(activity.process.proc_id)
                        eu_row.append(wt.action.in_process_step)
                        eu_row.append(eu.name)
                        eu_row.append(eu.acronym)
                        eu_row.append(wt.task)
                        eu_row.append(
                            'do uzupełnienia na podstawie minimalnego \
wymaganego poziomu odtworzenia działania')

                        while (participants.width - len(eu_row)) > 1:
                            eu_row.append('')
                        eu_row.append(activity.MTPD)
                        print('DODAJĘ: %s' % (eu_row, ))
                        participants.append(eu_row)

                for cu in wt.cooperative_units.all():

                    existing = False
                    iter = 0
                    for r in participants:
                        iter = iter + 1
                        if r[0] != cu.id:
                            continue
                        if r[1] != activity.process.proc_id:
                            continue
                        if r[2] != wt.action.in_process_step:
                            continue
                        if r[5] != wt.task:
                            continue
                        existing = True
                        cu_row = []
                        for idx in range(len(r) - 1):
                            cu_row.append(r[idx])
                        print('FOUND:  %s' % (cu_row, ))
                        print('USUWAM: %s' % (participants[iter - 1], ))
                        del participants[iter - 1]

                        while (participants.width - len(cu_row)) > 1:
                            cu_row.append('')
                        cu_row.append(activity.MTPD)
                        print('DODAJĘ: %s' % (cu_row, ))
                        participants.append(cu_row)
                        break

                    if existing:
                        continue
                    else:
                        cu_row = []
                        cu_row.append(cu.id)
                        cu_row.append(activity.process.proc_id)
                        cu_row.append(wt.action.in_process_step)
                        cu_row.append(cu.name)
                        cu_row.append(cu.acronym)
                        cu_row.append(wt.task)
                        cu_row.append(
                            'do uzupełnienia na podstawie minimalnego \
wymaganego poziomu odtworzenia działania')

                        while (participants.width - len(cu_row)) > 1:
                            cu_row.append('')
                        cu_row.append(activity.MTPD)
                        print('DODAJĘ: %s' % (cu_row, ))
                        participants.append(cu_row)

                for st in wt.stakeholders.filter(type__exact=False):
                    existing = False
                    iter = 0
                    for r in external_suppliers:
                        iter = iter + 1
                        if r[0] != st.id:
                            continue
                        if r[1] != activity.process.proc_id:
                            continue
                        if r[2] != wt.action.in_process_step:
                            continue
                        if r[4] != wt.task:
                            continue
                        existing = True
                        st_row = []
                        for idx in range(len(r) - 1):
                            st_row.append(r[idx])
                        print('FOUND:  %s' % (st_row, ))
                        print('USUWAM: %s' % (external_suppliers[iter - 1], ))
                        del external_suppliers[iter - 1]

                        while (external_suppliers.width - len(st_row)) > 1:
                            st_row.append('')
                        st_row.append(activity.MTPD)
                        print('DODAJĘ: %s' % (st_row, ))
                        external_suppliers.append(st_row)
                        break

                    if existing:
                        continue
                    else:
                        st_row = []
                        st_row.append(st.id)
                        st_row.append(activity.process.proc_id)
                        st_row.append(wt.action.in_process_step)
                        st_row.append(st.name)
                        st_row.append(wt.task)
                        st_row.append(
                            'do uzupełnienia na podstawie minimalnego \
    wymaganego poziomu odtworzenia działania')

                        while (external_suppliers.width - len(st_row)) > 1:
                            st_row.append('')
                        st_row.append(activity.MTPD)
                        external_suppliers.append(st_row)

                for ba in wt.business_apps.all():

                    existing = False
                    iter = 0
                    for r in applications:
                        iter = iter + 1
                        if r[0] != ba.id:
                            continue
                        if r[1] != activity.process.proc_id:
                            continue
                        if r[2] != wt.action.in_process_step:
                            continue
                        if r[3] != wt.task:
                            continue
                        existing = True
                        ba_row = []
                        for idx in range(len(r) - 1):
                            ba_row.append(r[idx])
                        print('FOUND:  %s' % (ba_row, ))
                        print('USUWAM: %s' % (applications[iter - 1], ))
                        del applications[iter - 1]

                        while (applications.width - len(ba_row)) > 1:
                            ba_row.append('')
                        ba_row.append(activity.MTPD)
                        print('DODAJĘ: %s' % (ba_row, ))
                        applications.append(ba_row)
                        break

                    if existing:
                        continue
                    else:
                        ba_row = []
                        ba_row.append(ba.id)
                        ba_row.append(activity.process.proc_id)
                        ba_row.append(wt.action.in_process_step)
                        ba_row.append(wt.task)
                        ba_row.append(ba.name)
                        ba_row.append(ba.description)
                        ba_row.append('')
                        ba_row.append('')

                        while (applications.width - len(ba_row)) > 1:
                            ba_row.append('')
                        ba_row.append(activity.MTPD)
                        print('DODAJĘ: %s' % (ba_row, ))
                        applications.append(ba_row)
        """
        except Exception as e:
            return HttpResponse(
                _(
                    u"<h1>%s encountered while</h1>" \
                    % (type(e).__name__,)
                )
            )
        else:
            print('NO EXCEPTION RAISED')
        finally:

        count_ps = 0
        count_es = 0
        count_apps = 0
        for a in queryset:
            for w in a.tasks.all():
                count_ps = count_ps + w.executive_units.count()
                count_ps = count_ps + w.cooperative_units.count()
                count_es = count_es + w.stakeholders.count()
                count_apps = count_apps + w.business_apps.count()
        print('PARTICIPANTS: %s | %s' % (count_ps, len(participants),))
        print('EXTERNAL SRV: %s | %s' % (count_es, len(external_suppliers),))
        print('APPLICATIONS: %s | %s' % (count_apps, len(applications),))"""

        # participants = participants.sort(col=2)
        # participants = participants.sort(col=1)
        asset_workbook.add_sheet(participants)
        asset_workbook.add_sheet(external_suppliers)
        asset_workbook.add_sheet(personnel)
        asset_workbook.add_sheet(locations)
        asset_workbook.add_sheet(office_equipment)
        asset_workbook.add_sheet(supplies)
        asset_workbook.add_sheet(applications)
        asset_workbook.add_sheet(documentation)
        asset_workbook.add_sheet(app_list)
        asset_workbook.add_sheet(org_list)

        try:
            template_file_name = '/app/core/export/template_assets.xlsx'
            print('TEMPLATE: %s' % (template_file_name, ))
            with open(template_file_name, 'wb') as template:
                print('IT IS OPEN')
                print('BEFORE_EXPORT')
                template.write(asset_workbook.export('xlsx'))

        except Exception as e:
            return HttpResponse(
                _(
                    u"<h1>%s encountered while trying to read Databook</h1>" \
                    % (type(e).__name__,)
                )
            )
Пример #7
0
import tablib

headers = ('id', 'name')

data = [('test_id_1', 'name_1'), ('test_id_2', 'name_2'),
        ('test_id_3', 'name_3')]

data = tablib.Dataset(*data, headers=headers)
print(data.export('csv'))
Пример #8
0
def service_page():
    serviceset = tablib.Dataset()
    with open(os.path.join(os.path.dirname(__file__),
                           'serviceAccounts.csv')) as f:
        serviceset.csv = f.read()
    return serviceset.html
Пример #9
0
import tablib

headers = ('first_name', 'last_name')

data = [('John', 'Adams'), ('George', 'Washington')]

data = tablib.Dataset(*data, headers=headers)

#print(data)

data.append(('Henry', 'Ford'))

#print(data)

data.append_col((90, 67, 83), header='age')

#print("\n")
#print(data)

# Exporting to JSON
#print(data.export('json'))

# Exporting to YAML
print(data.export('yaml'))

# Exporting to CSV
csv_data = data.export('csv')
print(csv_data)

with open("people.csv", "w") as fd:
    fd.write(data.export('csv'))
Пример #10
0
def view_page():
    iamset = tablib.Dataset()
    with open(os.path.join(os.path.dirname(__file__),
                           'projects_iam.csv')) as f:
        iamset.csv = f.read()
    return iamset.html
Пример #11
0
def bucket_page():
    bucketset = tablib.Dataset()
    with open(os.path.join(os.path.dirname(__file__), 'buckets_iam.csv')) as f:
        bucketset.csv = f.read()
    return bucketset.html
Пример #12
0
import  tablib

# 创建dataset1,方式一:
# dataset1 = tablib.Dataset()
# dataset1.headers = ['id','name','age','sex']
# dataset1.append(['1','朱宇','21','male'])
# dataset1.append(['2','张昊','22','male'])
# 创建dataset2,方式二:
header2 = ['id','name','password']
data = [
    ['1','杨鹏','123'],
    ['2','代松柏','567'],
]
dataset2 = tablib.Dataset(headers=header2,*data)
#增
# 增加一行数据(拿dataset2举例子)
dataset2.append(['3','朱宇','123'])
# # 增加一列数据,增加年龄这列数据(实在原有的基础上额)
dataset2.append_col((20,21,22),'age')
# # 行很多时
# import random
# def get_score(row):
#     return random.randint(60, 100)
# dataset2.append_col(get_score, headers='score')
# # 查
#     # 查看某一行的数据,例如第一行
# dataset2[0]
#     # 获取某一列的数据,获取到了score这一列的所有数据
# dataset2['score']
#     # 这是通过索引去获取列数据,和上面获取某一行是一样的
# dataset2.get_col(0)
Пример #13
0
 def get_export_data(self, qs, columns):
     export_data = tablib.Dataset(headers=[label for key, label in columns] + ['_instance'])
     for instance in qs:
         export_data.append(self.get_export_data_row(instance, columns) + [instance])
     return export_data
Пример #14
0
    def export_xls(cls, proj, ver):
        """
        导出图片打分信息为xls文件
        :return:
        """
        total_imgs = Image.objects.filter(project=proj, version=ver)
        resolutions = set(
            [r.get('resolution') for r in total_imgs.values("resolution")])
        data_set = set()
        total_headers = ['名称']
        total_stats = tablib.Dataset()
        total_stats.title = 'Total Average'
        for resolution in resolutions:
            imgs = Image.objects.filter(project=proj,
                                        version=ver,
                                        resolution=resolution)
            grade_times = [img.get_grade_times() for img in imgs]
            width = max(grade_times)
            stats = tablib.Dataset()
            aver_data = ['Average']
            headers = ['名称']
            new_col = []

            if proj == 'Mark':
                stats.title = ver
                getter = itemgetter('dem1', 'dem2', 'dem3', 'dem4')
                dem_list = ['对焦', '清晰', '曝光', '颜值']
            elif proj == 'Zhaidai_Project':
                stats.title = resolution
                getter = itemgetter('dem1', 'dem2', 'dem3', 'dem4', 'dem5')
                dem_list = ['对焦', '清晰', '曝光', '颜值', '纹理']
            elif proj == 'AI-case':
                stats.title = resolution
                getter = itemgetter('dem1', 'dem2', 'dem3', 'dem4')
                dem_list = ['振铃效应', '块效应', '噪声', '模糊or失真']
            else:
                stats.title = resolution
                getter = itemgetter('dem1')
                dem_list = ['改进空间']

            for img in imgs:
                data = getter(img.get_stats(width=width))
                if isinstance(data, list):
                    data = (data, )
                dct = set(data[0])
                if dct == {''}:
                    continue
                row = reduce(add, data)
                row.insert(0, img.name)
                stats.append(row)

            for dem in dem_list:
                h = [dem + str(i) for i in (list(range(width)) + ['avg'])]
                headers.extend(h)
            stats.headers = headers
            for header in headers[1:]:
                if header.endswith('avg'):
                    h_list = list(filter(lambda x: x != '', stats[header]))
                    try:
                        aver_data.append(
                            str(round(sum(h_list) / len(h_list), 2)))
                    except ZeroDivisionError:
                        aver_data.append(0)
                else:
                    aver_data.append('\\')
            stats.append(aver_data)

            for s in stats:
                data_col = []
                for index in range(len(stats.headers)):
                    if stats.headers[index].endswith('avg'):
                        data_col.append(float(s[index]))
                try:
                    new_col.append(str(round(sum(data_col) / len(data_col),
                                             2)))
                except ZeroDivisionError:
                    new_col.append(0)
            stats.append_col(new_col, header='Average')
            dem_list.append('Average')

            data_set.add(stats)
            last_data = list(stats[-1])
            last_data[0] = resolution
            filter_aver_data = list(filter(lambda x: x != '\\', last_data))
            total_stats.append(filter_aver_data)

        total_headers.extend(dem_list)
        total_stats.headers = total_headers
        total_average = ['Total Average']
        for dem in dem_list:
            h_list = list(filter(lambda x: x != '', total_stats[dem]))
            total_list = [float(x) for x in h_list]
            try:
                total_average.append(
                    str(round(sum(total_list) / len(total_list), 2)))
            except ZeroDivisionError:
                total_average.append(0)
        total_stats.append(total_average)
        data_set.add(total_stats)
        book = tablib.Databook(data_set)
        return book.export('xls')
Пример #15
0
def synthetic_time_budget(pj: dict, selected_observations: list,
                          parameters_obs: dict):
    """
    create a synthetic time budget

    Args:
        pj (dict): project dictionary
        selected_observations (list): list of observations to include in time budget
        parameters_obs (dict):

    Returns:
        bool: True if everything OK
        str: message
        tablib.Dataset: dataset containing synthetic time budget data
    """
    try:
        selected_subjects = parameters_obs[SELECTED_SUBJECTS]
        selected_behaviors = parameters_obs[SELECTED_BEHAVIORS]
        include_modifiers = parameters_obs[INCLUDE_MODIFIERS]
        interval = parameters_obs["time"]
        start_time = parameters_obs["start time"]
        end_time = parameters_obs["end time"]

        parameters = [
            ["duration", "Total duration"],
            ["number", "Number of occurrences"],
            ["duration mean", "Duration mean"],
            ["duration stdev", "Duration std dev"],
            ["proportion of time", "Proportion of time"],
        ]

        data_report = tablib.Dataset()
        data_report.title = "Synthetic time budget"

        ok, msg, db_connector = db_functions.load_aggregated_events_in_db(
            pj, selected_subjects, selected_observations, selected_behaviors)

        if not ok:
            return False, msg, None

        db_connector.create_aggregate("stdev", 1, StdevFunc)
        cursor = db_connector.cursor()

        # modifiers
        if include_modifiers:
            cursor.execute(
                "SELECT distinct behavior, modifiers FROM aggregated_events")
            distinct_behav_modif = [[rows["behavior"], rows["modifiers"]]
                                    for rows in cursor.fetchall()]
        else:
            cursor.execute("SELECT distinct behavior FROM aggregated_events")
            distinct_behav_modif = [[rows["behavior"], ""]
                                    for rows in cursor.fetchall()]

        # add selected behaviors that are not observed
        for behav in selected_behaviors:
            if [x for x in distinct_behav_modif if x[0] == behav] == []:
                distinct_behav_modif.append([behav, ""])

        behaviors = init_behav_modif(pj[ETHOGRAM], selected_subjects,
                                     distinct_behav_modif, include_modifiers,
                                     parameters)

        param_header = ["", "Total length (s)"]
        subj_header, behav_header, modif_header = [""] * len(param_header), [
            ""
        ] * len(param_header), [""] * len(param_header)

        for subj in selected_subjects:
            for behavior_modifiers in distinct_behav_modif:
                behavior, modifiers = behavior_modifiers
                behavior_modifiers_str = "|".join(
                    behavior_modifiers) if modifiers else behavior
                for param in parameters:
                    subj_header.append(subj)
                    behav_header.append(behavior)
                    modif_header.append(modifiers)
                    param_header.append(param[1])
        '''
        if parameters_obs["group observations"]:
            cursor.execute("UPDATE aggregated_events SET observation = 'all' " )
            #selected_observations = ["all"]
        '''

        data_report.append(subj_header)
        data_report.append(behav_header)
        if include_modifiers:
            data_report.append(modif_header)
        data_report.append(param_header)

        # select time interval
        for obs_id in selected_observations:

            ok, msg, db_connector = db_functions.load_aggregated_events_in_db(
                pj, selected_subjects, [obs_id], selected_behaviors)

            if not ok:
                return False, msg, None

            db_connector.create_aggregate("stdev", 1, StdevFunc)
            cursor = db_connector.cursor()

            # if modifiers not to be included set modifiers to ""
            if not include_modifiers:
                cursor.execute("UPDATE aggregated_events SET modifiers = ''")

            # time
            obs_length = project_functions.observation_total_length(
                pj[OBSERVATIONS][obs_id])
            if obs_length == -1:
                obs_length = 0

            if interval == TIME_FULL_OBS:
                min_time = float(0)
                max_time = float(obs_length)

            if interval == TIME_EVENTS:
                try:
                    min_time = float(pj[OBSERVATIONS][obs_id][EVENTS][0][0])
                except Exception:
                    min_time = float(0)
                try:
                    max_time = float(pj[OBSERVATIONS][obs_id][EVENTS][-1][0])
                except Exception:
                    max_time = float(obs_length)

            if interval == TIME_ARBITRARY_INTERVAL:
                min_time = float(start_time)
                max_time = float(end_time)

            # adapt start and stop to the selected time interval
            cursor.execute(
                "UPDATE aggregated_events SET start = ? WHERE observation = ? AND start < ? AND stop BETWEEN ? AND ?",
                (
                    min_time,
                    obs_id,
                    min_time,
                    min_time,
                    max_time,
                ))
            cursor.execute(
                "UPDATE aggregated_events SET stop = ? WHERE observation = ? AND stop > ? AND start BETWEEN ? AND ?",
                (
                    max_time,
                    obs_id,
                    max_time,
                    min_time,
                    max_time,
                ))

            cursor.execute(
                "UPDATE aggregated_events SET start = ?, stop = ? WHERE observation = ? AND start < ? AND stop > ?",
                (
                    min_time,
                    max_time,
                    obs_id,
                    min_time,
                    max_time,
                ))

            cursor.execute(
                "DELETE FROM aggregated_events WHERE observation = ? AND (start < ? AND stop < ?) OR (start > ? AND stop > ?)",
                (
                    obs_id,
                    min_time,
                    min_time,
                    max_time,
                    max_time,
                ))

            for subject in selected_subjects:

                # check if behaviors are to exclude from total time
                time_to_subtract = 0
                if EXCLUDED_BEHAVIORS in parameters_obs:
                    for excluded_behav in parameters_obs[EXCLUDED_BEHAVIORS]:
                        cursor.execute((
                            "SELECT SUM(stop-start) "
                            "FROM aggregated_events "
                            "WHERE observation = ? AND subject = ? AND behavior = ? "
                        ), (
                            obs_id,
                            subject,
                            excluded_behav,
                        ))
                        for row in cursor.fetchall():
                            if row[0] is not None:
                                time_to_subtract += row[0]

                for behavior_modifiers in distinct_behav_modif:
                    behavior, modifiers = behavior_modifiers
                    behavior_modifiers_str = "|".join(
                        behavior_modifiers) if modifiers else behavior

                    cursor.execute((
                        "SELECT SUM(stop-start), COUNT(*), AVG(stop-start), stdev(stop-start) "
                        "FROM aggregated_events "
                        "WHERE observation = ? AND subject = ? AND behavior = ? AND modifiers = ? "
                    ), (
                        obs_id,
                        subject,
                        behavior,
                        modifiers,
                    ))

                    for row in cursor.fetchall():
                        behaviors[subject][behavior_modifiers_str][
                            "duration"] = (0 if row[0] is None else
                                           f"{row[0]:.3f}")

                        behaviors[subject][behavior_modifiers_str][
                            "number"] = 0 if row[1] is None else row[1]
                        behaviors[subject][behavior_modifiers_str][
                            "duration mean"] = (0 if row[2] is None else
                                                f"{row[2]:.3f}")
                        behaviors[subject][behavior_modifiers_str][
                            "duration stdev"] = (0 if row[3] is None else
                                                 f"{row[3]:.3f}")

                        if behavior not in parameters_obs[EXCLUDED_BEHAVIORS]:
                            try:
                                behaviors[subject][behavior_modifiers_str][
                                    "proportion of time"] = (
                                        0 if row[0] is None else
                                        f"{row[0] / ((max_time - min_time) - time_to_subtract):.3f}"
                                    )
                            except ZeroDivisionError:
                                behaviors[subject][behavior_modifiers_str][
                                    "proportion of time"] = "-"
                        else:
                            # behavior subtracted
                            behaviors[subject][behavior_modifiers_str][
                                "proportion of time"] = (
                                    0 if row[0] is None else
                                    f"{row[0] / (max_time - min_time):.3f}")

            columns = [obs_id, f"{max_time - min_time:0.3f}"]
            for subj in selected_subjects:
                for behavior_modifiers in distinct_behav_modif:
                    behavior, modifiers = behavior_modifiers
                    behavior_modifiers_str = "|".join(
                        behavior_modifiers) if modifiers else behavior

                    for param in parameters:
                        columns.append(
                            behaviors[subj][behavior_modifiers_str][param[0]])

            data_report.append(columns)
    except Exception:

        error_type, error_file_name, error_lineno = utilities.error_info(
            sys.exc_info())
        logging.critical(
            f"Error in edit_event function: {error_type} {error_file_name} {error_lineno}"
        )

        msg = f"Error type: {error_type}\nError file name: {error_file_name}\nError line number: {error_lineno}"

        logging.critical(msg)

        return (False, msg, tablib.Dataset())

    return True, msg, data_report
Пример #16
0
    def take_action(self, parsed_args):
        self.log.debug('take_action({a})'.format(a=parsed_args))

        args = {}
        args_id = {}
        args_followers = {}

        if parsed_args.limit:
            args['limit'] = parsed_args.limit

        if parsed_args.status:
            args['status'] = parsed_args.status

        if parsed_args.kind:
            args['kind'] = parsed_args.kind

        if parsed_args.priority:
            args['priority'] = parsed_args.priority

        if parsed_args.reported_by:
            args['reported_by'] = parsed_args.reported_by

        if parsed_args.is_spam:
            args['is_spam'] = parsed_args.is_spam

        if parsed_args.search:
            args['search'] = parsed_args.search

        if parsed_args.id:
            args_id['id'] = parsed_args.id

        if parsed_args.followers:
            args_followers['followers'] = parsed_args.followers

        issuelist_url = {}
        issuedetail_url = {}
        issuefilter_url = {}
        issuefollowers_url = {}

        if all([not(args),
                not(args_id),
                not(args_followers)]):
            url = ("https://bitbucket.org/api/1.0/"
                   "repositories/{a.account}/{a.reponame}/"
                   "issues/?").format(a=parsed_args)
            issuelist_url['url'] = url
        elif args == {} and args_followers == {} and args_id != {}:
            url = ("https://bitbucket.org/api/1.0/"
                   "repositories/{a.account}/{a.reponame}/"
                   "issues/{a.id}").format(a=parsed_args)
            issuedetail_url['url'] = url
        elif args_id == {} and args_followers == {} and args != {}:
            primaryurl = ("https://bitbucket.org/api/1.0/"
                          "repositories/{a.account}/{a.reponame}/"
                          "issues/?").format(a=parsed_args)
            params = urllib.urlencode(args)
            url = primaryurl + params
            issuefilter_url['url'] = url
        elif args == {} and args_id != {} and args_followers != {}:
            url = ("https://bitbucket.org/api/1.0/"
                   "repositories/{a.account}/{a.reponame}/"
                   "issues/{a.id}/"
                   "followers").format(a=parsed_args)
            issuefollowers_url['url'] = url
        else:
            self.app.stdout.write('\nInvalid argument supplied.\n')
            sys.exit(1)
        user, passwd = read_creds()
        r = requests.get(url, auth=(user, passwd))

        try:
            data = json.loads(r.text)
        except:
            print("""
 Error: '404' No Issues Found ' or ' Invalid argument supplied.
""")
            sys.exit(1)

        if all([issuelist_url,
                not(issuedetail_url),
                not(issuefilter_url),
                not(issuefollowers_url)]):
            if parsed_args.export:
                csvdata = tablib.Dataset()
                csvdata.headers = [
                    "ID",
                    "Status",
                    "Title",
                    "Kind",
                    "Priority",
                    "Version",
                    "Component",
                    "Milestone",
                    "Reported By",
                    "Created On",
                    "Last Updated",
                    "Responsible",
                    "Comment Count",
                    "is_spam",
                    "Followers Count"]

                with open('issues.xls', 'wb') as f:
                    for i in data['issues']:
                        row = []
                        if 'local_id' in i:
                            row.append(i['local_id'])
                        else:
                            row.append('None')

                        if 'status' in i:
                            row.append(i['status'])
                        else:
                            row.append('None')

                        if 'title' in i:
                            row.append(i['title'])
                        else:
                            row.append('None')

                        if 'metadata' in i:
                            row.append(i['metadata']['kind'])
                        else:
                            row.append('None')

                        if 'priority' in i:
                            row.append(i['priority'])
                        else:
                            row.append('None')

                        if 'metadata' in i:
                            row.append(i['metadata']['version'])
                        else:
                            row.append('None')

                        if 'metadata' in i:
                            row.append(i['metadata']['component'])
                        else:
                            row.append('None')

                        if 'metadata' in i:
                            row.append(i['metadata']['milestone'])
                        else:
                            row.append('None')

                        if 'reported_by' in i:
                            row.append(i['reported_by']['username'])
                        else:
                            row.append('None')

                        if 'utc_created_on' in i:
                            row.append(i['utc_created_on'])
                        else:
                            row.append('None')

                        if 'utc_last_updated' in i:
                            row.append(i['utc_last_updated'])
                        else:
                            row.append('None')

                        if 'responsible' in i:
                            row.append(i['responsible']['username'])
                        else:
                            row.append('None')

                        if 'comment_count' in i:
                            row.append(i['comment_count'])
                        else:
                            row.append('None')

                        if 'is_spam' in i:
                            row.append(i['is_spam'])
                        else:
                            row.append('None')

                        if 'follower_count' in i:
                            row.append(i['follower_count'])
                        else:
                            row.append('None')

                        csvdata.append(row)
                    f.write(csvdata.csv)
                    f.close()
                    print("\n CSV File created.\n")
                    sys.exit(0)
            else:
                print("\nTotal Issues: {d[count]}\n".format(d=data))

                loopmsg = """Issue_ID: {i[local_id]}
Issue_Status: {i[status]}
Issue_Title: {i[title]}
======================================================================="""

                for i in data['issues']:
                    print(loopmsg.format(i=i))
                sys.exit(0)
        elif all([issuedetail_url,
                  not(issuelist_url),
                  not(issuefilter_url),
                  not(issuefollowers_url)]):
            newdata = {}
            newdata['issue id'] = data['local_id']
            newdata['status'] = data['status']
            newdata['kind'] = data['metadata']['kind']
            newdata['priority'] = data['priority']
            newdata['version'] = data['metadata']['version']
            newdata['component'] = data['metadata']['component']
            newdata['milestone'] = data['metadata']['milestone']
            newdata['reported by'] = data['reported_by']['username']
            newdata['utc_created_on'] = data['utc_created_on']
            newdata['utc_last_updated'] = data['utc_last_updated']
            newdata['responsible'] = data['responsible']['username']
            newdata['created on'] = data['created_on']
            newdata['comment_count'] = data['comment_count']
            newdata['is_spam'] = data['is_spam']
            newdata['follower_count'] = data['follower_count']
            columns = newdata.keys()
            columndata = newdata.values()
            print("\nTitle: %s\n" % (data['title']))
            print("Content: %s\n" % (data['content']))
            return (columns, columndata)
        elif all([issuefilter_url,
                  not(issuelist_url),
                  not(issuedetail_url),
                  not(issuefollowers_url)]):
            if parsed_args.export:
                csvdata = tablib.Dataset()
                csvdata.headers = [
                    "ID",
                    "Status",
                    "Title",
                    "Kind",
                    "Priority",
                    "Version",
                    "Component",
                    "Milestone",
                    "Reported By",
                    "Created On",
                    "Last Updated",
                    "Responsible",
                    "Title",
                    "Content",
                    "Comment Count",
                    "is_spam",
                    "Followers Count"]

                with open('issues.xls', 'wb') as f:
                    for i in data['issues']:
                        row = []
                        if 'local_id' in i:
                            row.append(i['local_id'])
                        else:
                            row.append('None')

                        if 'status' in i:
                            row.append(i['status'])
                        else:
                            row.append('None')

                        if 'title' in i:
                            row.append(i['title'])
                        else:
                            row.append('None')

                        if 'metadata' in i:
                            row.append(i['metadata']['kind'])
                        else:
                            row.append('None')

                        if 'priority' in i:
                            row.append(i['priority'])
                        else:
                            row.append('None')

                        if 'metadata' in i:
                            row.append(i['metadata']['version'])
                        else:
                            row.append('None')

                        if 'metadata' in i:
                            row.append(i['metadata']['component'])
                        else:
                            row.append('None')

                        if 'metadata' in i:
                            row.append(i['metadata']['milestone'])
                        else:
                            row.append('None')

                        if 'reported_by' in i:
                            row.append(i['reported_by']['username'])
                        else:
                            row.append('None')

                        if 'utc_created_on' in i:
                            row.append(i['utc_created_on'])
                        else:
                            row.append('None')

                        if 'utc_last_updated' in i:
                            row.append(i['utc_last_updated'])
                        else:
                            row.append('None')

                        if 'responsible' in i:
                            row.append(i['responsible']['username'])
                        else:
                            row.append('None')

                        if 'title' in i:
                            row.append(i['title'])
                        else:
                            row.append('None')

                        if 'content' in i:
                            row.append(i['content'])
                        else:
                            row.append('None')

                        if 'comment_count' in i:
                            row.append(i['comment_count'])
                        else:
                            row.append('None')

                        if 'is_spam' in i:
                            row.append(i['is_spam'])
                        else:
                            row.append('None')

                        if 'follower_count' in i:
                            row.append(i['follower_count'])
                        else:
                            row.append('None')

                        csvdata.append(row)
                    f.write(csvdata.csv)
                    f.close()
                    print("\n CSV File created.\n")
                sys.exit(0)
            else:
                print("\nTotal Issues: {d[count]}\n".format(d=data))

                loopmsg = """Issue_ID: {i[local_id]}
Issue_Status: {i[status]}
Issue_Title: {i[title]}
======================================================================="""
                for i in data['issues']:
                    print(loopmsg.format(i=i))
                sys.exit(0)
        elif all([issuefollowers_url,
                  not(issuelist_url),
                  not(issuedetail_url),
                  not(issuefilter_url)]):
            print("\nFollowers Count: %s\n" % (data['count']))
            for i in data['followers']:
                print("Followers Name: %s" % (i['username']))
            print("\n")
            sys.exit(0)
        else:
            print("Invalid Request no data received.")
            sys.exit(1)
Пример #17
0
def create_behavior_binary_table(pj: dict,
                                 selected_observations: list,
                                 parameters_obs: dict,
                                 time_interval: float) -> dict:
    """
    create behavior binary table

    Args:
        pj (dict): project dictionary
        selected_observations (list): list of selected observations
        parameters_obs (dict): dcit of parameters
        time_interval (float): time interval (in seconds)

    Returns:
        dict: dictionary of tablib dataset

    """

    results_df = {}

    state_behavior_codes = [x for x in utilities.state_behavior_codes(pj[ETHOGRAM]) if x in parameters_obs[SELECTED_BEHAVIORS]]
    point_behavior_codes = [x for x in utilities.point_behavior_codes(pj[ETHOGRAM]) if x in parameters_obs[SELECTED_BEHAVIORS]]
    if not state_behavior_codes and not point_behavior_codes:
        return {"error": True, "msg": "No state events selected"}

    for obs_id in selected_observations:

        if obs_id not in results_df:
            results_df[obs_id] = {}

        for subject in parameters_obs[SELECTED_SUBJECTS]:

            # extract tuple (behavior, modifier)
            behav_modif_list = [(idx[2], idx[3])
                                for idx in pj[OBSERVATIONS][obs_id][EVENTS] if idx[1] == (subject if subject != NO_FOCAL_SUBJECT else "")
                                                                               and idx[2] in parameters_obs[SELECTED_BEHAVIORS]]

            # extract observed subjects NOT USED at the moment
            observed_subjects = [event[EVENT_SUBJECT_FIELD_IDX] for event in pj[OBSERVATIONS][obs_id][EVENTS]]

            # add selected behavior if not found in (behavior, modifier)
            if not parameters_obs[EXCLUDE_BEHAVIORS]:
                #for behav in state_behavior_codes:
                for behav in parameters_obs[SELECTED_BEHAVIORS]:
                    if behav not in [x[0] for x in behav_modif_list]:
                        behav_modif_list.append((behav, ""))

            behav_modif_set = set(behav_modif_list)
            observed_behav = [(x[0], x[1]) for x in sorted(behav_modif_set)]
            if parameters_obs[INCLUDE_MODIFIERS]:
                results_df[obs_id][subject] = tablib.Dataset(headers=["time"] + [f"{x[0]}" + f" ({x[1]})" * (x[1] != "")
                                                                                 for x in sorted(behav_modif_set)])
            else:
                results_df[obs_id][subject] = tablib.Dataset(headers=["time"] + [x[0] for x in sorted(behav_modif_set)])

            if subject == NO_FOCAL_SUBJECT:
                sel_subject_dict = {"": {SUBJECT_NAME: ""}}
            else:
                sel_subject_dict = dict([(idx, pj[SUBJECTS][idx]) for idx in pj[SUBJECTS] if pj[SUBJECTS][idx][SUBJECT_NAME] == subject])

            row_idx = 0
            t = parameters_obs[START_TIME]
            while t < parameters_obs[END_TIME]:

                # state events
                current_states = utilities.get_current_states_modifiers_by_subject_2(state_behavior_codes,
                                                                                   pj[OBSERVATIONS][obs_id][EVENTS],
                                                                                   sel_subject_dict,
                                                                                   t
                                                                                   )

                # point events
                current_point = utilities.get_current_points_by_subject(point_behavior_codes,
                                                                        pj[OBSERVATIONS][obs_id][EVENTS],
                                                                        sel_subject_dict,
                                                                        t,
                                                                        time_interval
                                                                        )

                cols = [float(t)]  # time

                for behav in observed_behav:
                    if behav[0] in state_behavior_codes:
                        cols.append(int(behav in current_states[list(current_states.keys())[0]]))

                    if behav[0] in point_behavior_codes:
                        cols.append(current_point[list(current_point.keys())[0]].count(behav))

                results_df[obs_id][subject].append(cols)

                t += time_interval
                row_idx += 1

    return results_df
Пример #18
0
 def test_latex_export_empty_dataset(self):
     self.assertTrue(tablib.Dataset().latex is not None)
    def handle(self, *args, **options):
        src = options["src"]
        from_index = options.pop("from_index")
        to_index = options.pop("to_index")
        site_id = options.pop("site_id", None)
        permament = options.pop("permanent")
        dry_run = options.pop("dry_run")
        format_ = options.pop("format", None)
        ask = options.pop("ask")

        errors = []
        successes = 0
        skipped = 0
        total = 0
        site = None

        if site_id:
            site = Site.objects.get(id=site_id)

        if not os.path.exists(src):
            raise Exception("Missing file '{0}'".format(src))

        if not os.path.getsize(src) > 0:
            raise Exception("File '{0}' is empty".format(src))

        _, extension = os.path.splitext(src)

        if extension in [".xls", ".xlsx"]:
            mode = "rb"
        else:
            mode = "r"

        if not format_:
            format_ = extension

        with open(src, mode) as fh:
            imported_data = tablib.Dataset().load(fh.read(), format=format_)

            sample_data = tablib.Dataset(
                *imported_data[:min(len(imported_data), 4)],
                headers=imported_data.headers)

            try:
                self.stdout.write("Sample data:")
                self.stdout.write(str(sample_data))
            except:
                self.stdout.write("Warning: Cannot display sample data")

            self.stdout.write("--------------")

            if site:
                self.stdout.write("Using site: {}".format(site.hostname))

            self.stdout.write("Importing redirects:")
            for row in imported_data:
                total += 1

                from_link = row[from_index]
                to_link = row[to_index]

                data = {
                    "old_path": from_link,
                    "redirect_link": to_link,
                    "is_permanent": permament,
                }

                if site:
                    data["site"] = site.pk

                form = RedirectForm(data)
                if not form.is_valid():
                    error = form.errors.as_text().replace("\n", "")
                    self.stdout.write(
                        "{}. Error: {} -> {} (Reason: {})".format(
                            total,
                            from_link,
                            to_link,
                            error,
                        ))
                    errors.append(error)
                    continue

                if ask:
                    answer = input("{}. Found {} -> {} Create? Y/n: ".format(
                        total,
                        from_link,
                        to_link,
                    ))

                    if answer != "Y":
                        skipped += 1
                        continue
                else:
                    self.stdout.write("{}. {} -> {}".format(
                        total,
                        from_link,
                        to_link,
                    ))

                if dry_run:
                    successes += 1
                    continue

                form.save()
                successes += 1

        self.stdout.write("\n")
        self.stdout.write("Found: {}".format(total))
        self.stdout.write("Created: {}".format(successes))
        self.stdout.write("Skipped : {}".format(skipped))
        self.stdout.write("Errors: {}".format(len(errors)))
Пример #20
0
 def test_latex_export_no_headers(self):
     d = tablib.Dataset()
     d.append(('one', 'two', 'three'))
     self.assertTrue('one' in d.latex)
Пример #21
0
def format_to_df(data, columns=None):
    """将数据转换为Dataset类型"""
    return tl.Dataset(*data, headers=columns)
Пример #22
0
 def test_latex_export_none_values(self):
     headers = ['foo', None, 'bar']
     d = tablib.Dataset(['foo', None, 'bar'], headers=headers)
     output = d.latex
     self.assertTrue('foo' in output)
     self.assertFalse('None' in output)
Пример #23
0
 def test_error_no_amount(self):
     dataset = tablib.Dataset(["15/6/2016", "Example payment"],
                              headers=["date", "description"])
     result = self.makeResource().import_data(dataset)
     self.assertEqual(len(result.row_errors()), 1)
     self.assertIn("No amount", str(result.row_errors()[0][1][0].error))
Пример #24
0
    def test_str_no_columns(self):
        d = tablib.Dataset(['a', 1], ['b', 2], ['c', 3])
        output = '%s' % d

        self.assertEqual(output.splitlines(), ['a|1', 'b|2', 'c|3'])
Пример #25
0
def export_events(parameters, obsId, observation, ethogram, file_name,
                  output_format):
    """
    export events

    Args:
        parameters (dict): subjects, behaviors
        obsId (str): observation id
        observation (dict): observation
        ethogram (dict): ethogram of project
        file_name (str): file name for exporting events
        output_format (str): output for exporting events

    Returns:
        bool: result: True if OK else False
        str: error message
    """

    total_length = "{0:.3f}".format(
        project_functions.observation_total_length(observation))

    eventsWithStatus = project_functions.events_start_stop(
        ethogram, observation[EVENTS])

    # check max number of modifiers
    max_modifiers = 0
    for event in eventsWithStatus:
        for c in pj_events_fields:
            if c == "modifier" and event[pj_obs_fields[c]]:
                max_modifiers = max(max_modifiers,
                                    len(event[pj_obs_fields[c]].split("|")))

    # media file number
    mediaNb = 0
    if observation["type"] in [MEDIA]:
        for idx in observation[FILE]:
            for media in observation[FILE][idx]:
                mediaNb += 1

    rows = []

    # observation id
    rows.append(["Observation id", obsId])
    rows.append([""])

    # media file name
    if observation["type"] in [MEDIA]:
        rows.append(["Media file(s)"])
    else:
        rows.append(["Live observation"])
    rows.append([""])

    if observation[TYPE] in [MEDIA]:

        for idx in observation[FILE]:
            for media in observation[FILE][idx]:
                rows.append(["Player #{0}".format(idx), media])
    rows.append([""])

    # date
    if "date" in observation:
        rows.append(
            ["Observation date", observation["date"].replace("T", " ")])
    rows.append([""])

    # description
    if "description" in observation:
        rows.append(
            ["Description",
             utilities.eol2space(observation["description"])])
    rows.append([""])

    # time offset
    if "time offset" in observation:
        rows.append(["Time offset (s)", observation["time offset"]])
    rows.append([""])

    # independent variables
    if INDEPENDENT_VARIABLES in observation:
        rows.extend([["independent variables"], ["variable", "value"]])

        for variable in observation[INDEPENDENT_VARIABLES]:
            rows.append(
                [variable, observation[INDEPENDENT_VARIABLES][variable]])

    rows.append([""])

    # write table header
    col = 0
    header = ["Time"]
    header.extend(["Media file path", "Total length", "FPS"])

    header.extend(["Subject", "Behavior"])
    for x in range(1, max_modifiers + 1):
        header.append("Modifier {}".format(x))
    header.extend(["Comment", "Status"])

    rows.append(header)

    duration1 = []  # in seconds
    if observation["type"] in [MEDIA]:
        try:
            for mediaFile in observation[FILE][PLAYER1]:
                duration1.append(
                    observation["media_info"]["length"][mediaFile])
        except:
            pass

    for event in eventsWithStatus:

        if (((event[SUBJECT_EVENT_FIELD] in parameters["selected subjects"]) or
             (event[SUBJECT_EVENT_FIELD] == ""
              and NO_FOCAL_SUBJECT in parameters["selected subjects"])) and
            (event[BEHAVIOR_EVENT_FIELD] in parameters["selected behaviors"])):

            fields = []
            fields.append(
                utilities.intfloatstr(str(event[EVENT_TIME_FIELD_IDX])))

            if observation["type"] in [MEDIA]:

                time_ = event[EVENT_TIME_FIELD_IDX] - observation[TIME_OFFSET]
                if time_ < 0:
                    time_ = 0

                mediaFileIdx = [
                    idx1 for idx1, x in enumerate(duration1)
                    if time_ >= sum(duration1[0:idx1])
                ][-1]
                fields.append(
                    utilities.intfloatstr(
                        str(observation[FILE][PLAYER1][mediaFileIdx])))
                fields.append(total_length)
                fields.append(observation["media_info"]["fps"][
                    observation[FILE][PLAYER1][mediaFileIdx]])  # fps

            if observation["type"] in [LIVE]:
                fields.append(LIVE)  # media
                fields.append(total_length)  # total length
                fields.append("NA")  # FPS

            fields.append(event[EVENT_SUBJECT_FIELD_IDX])
            fields.append(event[EVENT_BEHAVIOR_FIELD_IDX])

            modifiers = event[EVENT_MODIFIER_FIELD_IDX].split("|")
            while len(modifiers) < max_modifiers:
                modifiers.append("")

            for m in modifiers:
                fields.append(m)
            fields.append(event[EVENT_COMMENT_FIELD_IDX].replace(
                os.linesep, " "))
            # status
            fields.append(event[-1])

            rows.append(fields)

    maxLen = max([len(r) for r in rows])
    data = tablib.Dataset()

    data.title = obsId
    # check if worksheet name will be > 31 char
    if output_format in ["xls", "xlsx"]:
        for forbidden_char in r"\/*[]:?":
            data.title = data.title.replace(forbidden_char, " ")

    if output_format in ["xls"]:
        if len(data.title) > 31:
            data.title = data.title[0:31]

    for row in rows:
        data.append(utilities.complete(row, maxLen))

    r, msg = dataset_write(data, file_name, output_format)

    return r, msg
Пример #26
0
def get_files(
    ctx,
    workflow,
    _format,
    filters,
    output_format,
    filename,
    access_token,
    page,
    size,
    human_readable_or_raw,
):  # noqa: D301
    """List workspace files.

    The ``ls`` command lists workspace files of a workflow specified by the
    environment variable REANA_WORKON or provided as a command-line flag
    ``--workflow`` or ``-w``. The SOURCE argument is optional and specifies a
    pattern matching files and directories.

    Examples: \n
    \t $ reana-client ls --workflow myanalysis.42 \n
    \t $ reana-client ls --workflow myanalysis.42 --human-readable \n
    \t $ reana-client ls --workflow myanalysis.42 'data/*root*' \n
    \t $ reana-client ls --workflow myanalysis.42 --filter name=hello
    """  # noqa: W605
    import tablib
    from reana_client.api.client import current_rs_api_client, list_files

    logging.debug("command: {}".format(ctx.command_path.replace(" ", ".")))
    for p in ctx.params:
        logging.debug("{param}: {value}".format(param=p, value=ctx.params[p]))

    search_filter = None
    headers = ["name", "size", "last-modified"]
    if filters:
        _, search_filter = parse_filter_parameters(filters, headers)
    if _format:
        parsed_format_filters = parse_format_parameters(_format)
    if workflow:
        logging.info('Workflow "{}" selected'.format(workflow))
        try:
            response = list_files(workflow, access_token, filename, page, size,
                                  search_filter)
            data = []
            file_path = get_path_from_operation_id(
                current_rs_api_client.swagger_spec.spec_dict["paths"],
                "download_file")
            urls = []
            for file_ in response:
                if not file_["name"].startswith(FILES_BLACKLIST):
                    data.append(
                        list(
                            map(
                                str,
                                [
                                    file_["name"],
                                    file_["size"][human_readable_or_raw],
                                    file_["last-modified"],
                                ],
                            )))
                    urls.append(ctx.obj.reana_server_url + file_path.format(
                        workflow_id_or_name=workflow, file_name=file_["name"]))
            tablib_data = tablib.Dataset()
            tablib_data.headers = headers
            for row in data:
                tablib_data.append(row)
            if output_format == URL:
                display_message("\n".join(urls))
            elif _format:
                tablib_data, filtered_headers = format_data(
                    parsed_format_filters, headers, tablib_data)
                if output_format == JSON:
                    display_message(json.dumps(tablib_data))
                else:
                    tablib_data = [list(item.values()) for item in tablib_data]
                    click_table_printer(filtered_headers, filtered_headers,
                                        tablib_data)
            else:
                if output_format == JSON:
                    display_message(tablib_data.export(output_format))
                else:
                    click_table_printer(headers, _format, data)

        except Exception as e:
            logging.debug(traceback.format_exc())
            logging.debug(str(e))

            display_message(
                "Something went wrong while retrieving file list"
                " for workflow {0}:\n{1}".format(workflow, str(e)),
                msg_type="error",
            )
Пример #27
0
    def execute(self, avatar=None):
        if not avatar:
            avatar = Avatar.get(self.user, self.block_id - 1)
        dirname = os.path.dirname(__file__)
        filename = os.path.join(dirname, 'data/monsters.csv')
        monsters = tablib.Dataset().load(open(filename).read()).dict
        randoms = self.get_randoms()
        monster = monsters[randoms.pop() % len(monsters)]
        battle_status = []

        for key in ('hp', 'piercing', 'armor'):
            monster[key] = int(monster[key])

        def get_item(ticker_name):
            items = get_related_items(Item)
            for item in items:
                if item.ticker_name == ticker_name:
                    return item
            return None

        weapon = armor = food = None
        if 'weapon' in self.details:
            weapon = get_item(self.details['weapon'])
        if 'armor' in self.details:
            armor = get_item(self.details['armor'])
        if 'food' in self.details:
            food = get_item(self.details['food'])

        while True:
            try:
                if (avatar.hp <= avatar.max_hp * 0.5 and food
                        and food.ticker_name in avatar.items
                        and avatar.items[food.ticker_name] > 0):
                    avatar, status = food().execute(avatar)
                    battle_status.append(status)
                    avatar.items[food.ticker_name] -= 1
                    food = None

                if (avatar.hp <= avatar.max_hp * 0.2 and 'BNDG' in avatar.items
                        and avatar.items['BNDG'] > 0):
                    rolled = self.roll(randoms, '2d6')
                    if rolled >= 7:
                        avatar.hp += 4
                        avatar.items['BNDG'] -= 1
                        battle_status.append({
                            'type': 'item_use',
                            'item': 'BNDG',
                            'status_change': 'HP +4'
                        })
                    else:
                        avatar.items['BNDG'] -= 1
                        battle_status.append({
                            'type': 'item_use_fail',
                            'item': 'BNDG',
                            'status_change': ''
                        })

                rolled = (self.roll(randoms, '2d6') +
                          avatar.modifier('strength'))
                if rolled >= 7:
                    damage = max(
                        self.roll(randoms, avatar.damage) - monster['armor'],
                        0)
                    if weapon:
                        damage += weapon.attack_modifier(avatar, monster)
                    battle_status.append({
                        'type': 'attack_monster',
                        'damage': damage,
                        'monster': monster.copy(),
                    })
                    monster['hp'] = monster['hp'] - damage

                elif rolled in (2, 3, 4, 5, 6, 7, 8, 9):
                    monster_damage = self.roll(randoms, monster['damage'])
                    if armor:
                        monster_damage -= armor.armor_modifier(avatar, monster)
                    battle_status.append({
                        'type': 'attacked_by_monster',
                        'damage': monster_damage,
                        'monster': monster.copy(),
                    })
                    avatar.hp -= monster_damage
                    if rolled <= 6:
                        battle_status.append({
                            'type': 'get_xp',
                        })
                        avatar.xp += 1

                if monster['hp'] <= 0:
                    battle_status.append({
                        'type': 'kill_monster',
                        'monster': monster.copy(),
                    })
                    reward_code = self.roll(randoms, '1d10')
                    if len(monster[f'reward{reward_code}']):
                        avatar.get_item(monster[f'reward{reward_code}'])
                        battle_status.append({
                            'type':
                            'get_item',
                            'item':
                            monster[f'reward{reward_code}'],
                        })
                    return (avatar,
                            dict(
                                type='hack_and_slash',
                                result='win',
                                battle_status=battle_status,
                            ))

                if avatar.hp <= 0:
                    battle_status.append({
                        'type': 'killed_by_monster',
                        'monster': monster.copy(),
                    })
                    return (avatar,
                            dict(
                                type='hack_and_slash',
                                result='lose',
                                battle_status=battle_status,
                            ))

            except OutOfRandomError:
                battle_status.append({
                    'type': 'run',
                    'monster': monster.copy(),
                })
                return (avatar,
                        dict(
                            type='hack_and_slash',
                            result='finish',
                            battle_status=battle_status,
                        ))
Пример #28
0
from flask import Flask, render_template
import tablib
import os

app = Flask(__name__)
dataset = tablib.Dataset()
with open('result.csv') as f:
    dataset.csv = f.read()


@app.route("/")
def index():
    return dataset.html


if __name__ == "__main__":
    app.run()
 def create_dataset(self, in_stream, **kwargs):
     data = tablib.Dataset()
     self.get_format().import_set(data, in_stream, **kwargs)
     return data
Пример #30
0
    def get_all(self,
                output_format='xlsx',
                type=None,
                status=None,
                start_time=None,
                end_time=None,
                limit=None,
                offset=None,
                region_id=None,
                project_id=None,
                user_id=None,
                owed=None):
        """Get queried orders
        If start_time and end_time is not None, will get orders that have bills
        during start_time and end_time, or return all orders directly.
        """
        limit_user_id = acl.get_limited_to_user(request.headers,
                                                'export_orders')

        if limit and limit < 0:
            raise exception.InvalidParameterValue(err="Invalid limit")
        if offset and offset < 0:
            raise exception.InvalidParameterValue(err="Invalid offset")

        if limit_user_id:  # normal user
            user_id = None
            projects = keystone.get_projects_by_user(limit_user_id)
            _project_ids = [project['id'] for project in projects]
            if project_id and project_id in _project_ids:
                project_ids = [project_id]
            else:
                project_ids = _project_ids
        else:  # accountant
            if project_id:  # look up specified project
                project_ids = [project_id]
            else:  # look up all projects
                project_ids = []

        if project_ids:
            project_ids = list(set(project_ids) - set(cfg.CONF.ignore_tenants))

        users = {}
        projects = {}

        def _get_user(user_id):
            user = users.get(user_id)
            if user:
                return user
            contact = kunkka.get_uos_user(user_id)
            user_name = contact['name'] if contact else None
            users[user_id] = models.User(user_id=user_id, user_name=user_name)
            return users[user_id]

        def _get_project(project_id):
            project = projects.get(project_id)
            if project:
                return project
            try:
                project = keystone.get_project(project_id)
                project_name = project.name if project else None
                projects[project_id] = models.SimpleProject(
                    project_id=project_id, project_name=project_name)
                return projects[project_id]
            except Exception as e:
                # Note(chengkun): some project was deleted from keystone,
                # But the project's order still in the gringotts. so when
                # we get the order it will raise 404 project not found error
                LOG.error('error to get project: %s' % e)
                return None

        MAP = [
            {
                "running": u"运行中",
                "stopped": u"暂停中",
                "deleted": u"被删除"
            },
            {
                "instance": u"虚拟机",
                "image": u"镜像",
                "snapshot": u"硬盘快照",
                "volume": u"云硬盘",
                "share": u"共享文件",
                "floatingip": u"公网IP",
                "listener": u"负载均衡监听器",
                "router": u"路由器",
                "alarm": u"监控报警"
            },
        ]

        headers = (u"资源ID", u"资源名称", u"资源类型", u"资源状态", u"单价(元/小时)", u"金额(元)",
                   u"区域", u"用户ID", u"用户名称", u"项目ID", u"项目名称", u"创建时间")
        data = []

        adata = (
            u"过滤条件: 资源类型: %s, 资源状态: %s,用户ID: %s, 项目ID: %s, 区域: %s, 起始时间: %s,  结束时间: %s"
            % (type, status, user_id, project_id, region_id, start_time,
               end_time), "", "", "", "", "", "", "", "", "", "", "")
        data.append(adata)

        conn = pecan.request.db_conn
        orders_db, total_count = conn.get_orders(request.context,
                                                 type=type,
                                                 status=status,
                                                 start_time=start_time,
                                                 end_time=end_time,
                                                 owed=owed,
                                                 limit=limit,
                                                 offset=offset,
                                                 with_count=True,
                                                 region_id=region_id,
                                                 user_id=user_id,
                                                 project_ids=project_ids)
        for order in orders_db:
            price = self._get_order_price(order,
                                          start_time=start_time,
                                          end_time=end_time)
            user = _get_user(order.user_id)
            project = _get_project(order.project_id)
            if project is None:
                continue
            order.created_at += datetime.timedelta(hours=8)
            created_at = \
                timeutils.strtime(order.created_at, fmt=OUTPUT_TIME_FORMAT)
            adata = (order.resource_id, order.resource_name,
                     MAP[1][order.type], MAP[0][order.status],
                     order.unit_price, price, order.region_id, user.user_id,
                     user.user_name, project.project_id, project.project_name,
                     created_at)
            data.append(adata)

        data = tablib.Dataset(*data, headers=headers)

        response.content_type = "application/binary; charset=UTF-8"
        response.content_disposition = \
            "attachment; filename=orders.%s" % output_format
        content = getattr(data, output_format)
        if output_format == 'csv':
            content = content.decode("utf-8").encode("gb2312")
        response.write(content)
        return response