Esempio n. 1
0
def encode_plot(P,
                pad=None,
                pad_inches=0.1,
                bbox_inches=None,
                remove_axes=False,
                transparent=False,
                axes_pad=None):
    """
    Convert a plot object to base64-encoded png format.

    pad is passed down to matplotlib's tight_layout; pad_inches and bbox_inches to savefig.

    The resulting object is a base64-encoded version of the png
    formatted plot, which can be displayed in web pages with no
    further intervention.
    """
    from io import BytesIO as IO
    from matplotlib.backends.backend_agg import FigureCanvasAgg
    from base64 import b64encode
    from urllib.parse import quote

    virtual_file = IO()
    fig = P.matplotlib(axes_pad=axes_pad)
    fig.set_canvas(FigureCanvasAgg(fig))
    if remove_axes:
        for a in fig.axes:
            a.axis('off')
    if pad is not None:
        fig.tight_layout(pad=pad)
    fig.savefig(virtual_file,
                format='png',
                pad_inches=pad_inches,
                bbox_inches=bbox_inches,
                transparent=transparent)
    virtual_file.seek(0)
    buf = virtual_file.getbuffer()
    return "data:image/png;base64," + quote(b64encode(buf))
Esempio n. 2
0
        def zipper(response):
            accept_encoding = request.headers.get('Accept-Encoding', '')

            if 'gzip' not in accept_encoding.lower():
                return response

            response.direct_passthrough = False

            if (response.status_code < 200 or
                response.status_code >= 300 or
                'Content-Encoding' in response.headers):
                return response
            gzip_buffer = IO()
            gzip_file = gzip.GzipFile(mode='wb',
                                      fileobj=gzip_buffer)
            gzip_file.write(response.data)
            gzip_file.close()

            response.data = gzip_buffer.getvalue()
            response.headers['Content-Encoding'] = 'gzip'
            response.headers['Vary'] = 'Accept-Encoding'
            response.headers['Content-Length'] = len(response.data)

            return response
Esempio n. 3
0
def export_submit(request):
    if request.BODY['type'] == 'excel':
        byte_io = IO()

        excel_file = pd.DataFrame(request.BODY['inventory'])
        excel_file = excel_file[request.BODY['columns']]

        writer = pd.ExcelWriter(byte_io, engine='xlsxwriter')
        excel_file.to_excel(writer, sheet_name='Inventory')
        writer.save()
        writer.close()

        byte_io.seek(0)

        response = HttpResponse(
            byte_io.read(),
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )
        response['Content-Disposition'] = 'attachment; filename=inventory.xlsx'

        return response
    else:
        return JsonResponse({}, safe=False)
Esempio n. 4
0
    def get(self, request, **kwargs):
        try:
            data = Data.objects.filter(name='analytics_data')[0]

            # Let's do the analytics
            df = pd.read_excel(data.file, sheet_name='Raw Data')
            df.columns = [
                col.replace('(min)', '').lower() for col in df.columns
            ]
            df.columns = [col.strip() for col in df.columns]
            df.columns = [col.replace(' ', '_').lower() for col in df.columns]
            df = df.assign(retention_time_roundoff=(df.retention_time))

            df.retention_time_roundoff = df.retention_time_roundoff.astype(
                float).round().astype(int)
            retention_time_roundoff = df['retention_time_roundoff']

            excel_file = IO()
            xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')
            df.to_excel(xlwriter, 'raw_data_1')
            retention_time_roundoff.to_excel(xlwriter, 'raw_data_2')
            xlwriter.save()
            xlwriter.close()
            excel_file.seek(0)

            response = HttpResponse(
                excel_file.read(),
                content_type=
                'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
            )
            response[
                'Content-Disposition'] = 'attachment; filename=output2.xlsx'
            return response
        except DatabaseError as e:
            return Response({"Error": str(e)},
                            status=status.HTTP_304_NOT_MODIFIED)
Esempio n. 5
0
def stats_to_csv(stats):
    if sys.version_info[0] >= 3:
        from io import StringIO as IO  # pragma: no cover
    else:
        from cStringIO import StringIO as IO  # pragma: no cover

    import csv

    csv_fh = IO()

    keys = set()
    for stat in stats:
        for key in list(stat.keys()):
            keys.add(key)

    fieldnames = sorted(list(keys), key=str)

    csvwriter = csv.DictWriter(csv_fh, delimiter=str(","), fieldnames=fieldnames)
    csvwriter.writerow(dict((fn, fn) for fn in fieldnames))
    for row in stats:
        csvwriter.writerow(row)
    contents = csv_fh.getvalue()
    csv_fh.close()
    return contents
Esempio n. 6
0
def download(request):
    samples_list = samples.objects.none()
    df1 = convert_to_dataframe2(samples_list,
                                fields=[
                                    'transaction_GlCode', 'transaction_date',
                                    'transaction_number', 'transaction_value',
                                    'remarks', 'action', 'Area',
                                    'Financial_Year', 'Client'
                                ])
    frames = [df1]
    result = pd.concat(frames)
    excel_file = IO()

    xlwriter = pd.ExcelWriter(excel_file, engine='openpyxl')
    result.to_excel(xlwriter, 'samples')
    xlwriter.save()
    xlwriter.close()
    excel_file.seek(0)
    response = HttpResponse(
        excel_file.read(),
        content_type=
        'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
    response['content-Disposition'] = 'attachment; filename=sample_format.xlsx'
    return response
def Advance_Amount_year_action(request):
    if request.method == "POST":
        #monthvarnum = request.GET.get('sel')
        #capturing values from user input

        yearvarnum = request.POST.get('year')
        yearvarnum = int(yearvarnum)

        #for all data show i  dataframe
        pd.set_option('display.max_colwidth', -1)

        try:
            from io import BytesIO as IO  # for modern python

        except ImportError:
            from io import StringIO as IO  # for legacy python

#Attendance table  copy as it is and filter as per not_recived and sent status

        copy1 = Advance_amount.objects.all().values_list(
            'Employee_name',
            'Advance_Amount_Taken', 'year', 'Cleared_or_Notcleared').filter(
                year=yearvarnum)  #get either not_recieved or sent
        dframe00 = pd.DataFrame(copy1,
                                columns=[
                                    'Employee_name',
                                    'Total_Advance_Amount_Taken', 'year',
                                    'Advance_amount_Cleared?'
                                ])

        #group by non repeating for sum of items_names and not recieved number
        dframe1 = dframe00.groupby([
            'Employee_name',
        ]).agg({
            'Total_Advance_Amount_Taken': 'sum',
        }).reset_index()

        #insertion logic of year value repeatedly in dataframe
        This_year_name = yearvarnum

        repeat_var_year = list(
            itertools.repeat(This_year_name, dframe1.Employee_name.count())
        )  #list generation of repeated values it = itertools.repeat(no of items,no of times)

        dframe1['Year'] = repeat_var_year

        #insertion logic of cleared or not
        dframe00 = dframe00.drop([
            'Total_Advance_Amount_Taken',
            'year',
        ],
                                 axis=1)  #dropping few column names

        dframe00 = dframe00.groupby('Employee_name').prod(
        )  #grouping data and multplication on cleared_not_cleared column

        for col in dframe00.columns[dframe00.dtypes == 'bool']:
            dframe00[col] = dframe00[col].map({True: 'Yes', False: 'No'})

#group by non repeating for sum of items_names and not recieved number

#join two dataframes on attribute=Employee name
#combined two dataframes same columns
        dframe2 = reduce(
            lambda x, y: pd.merge(x, y, on=[
                'Employee_name',
            ], how='outer'), [dframe00, dframe1])

        #add column Whole_Or_Partial_Amount_Paid_in_Middle into dframe2

        copy2 = Advance_amount.objects.all().values_list(
            'Employee_name', 'Whole_Or_Partial_Amount_Paid_in_Middle').filter(
                year=yearvarnum)  #get either not_recieved or sent

        dframetemp = pd.DataFrame(copy2,
                                  columns=[
                                      'Employee_name',
                                      'Whole_Or_Partial_Amount_Paid_in_Middle',
                                  ])

        dframetemp = dframetemp.groupby([
            'Employee_name',
        ]).agg({
            'Whole_Or_Partial_Amount_Paid_in_Middle': 'sum',
        }).reset_index()

        #join two dataframes on attribute=Employee name
        #combined two dataframes same columns
        dframe3 = reduce(
            lambda x, y: pd.merge(x, y, on=[
                'Employee_name',
            ], how='outer'), [dframe2, dframetemp])

        dframe3[
            'Remaining_Amt'] = dframe3['Total_Advance_Amount_Taken'] - dframe3[
                'Whole_Or_Partial_Amount_Paid_in_Middle']

        #rearanging dataframe columns
        dframe3 = dframe3[[
            'Employee_name',
            'Year',
            'Total_Advance_Amount_Taken',
            'Whole_Or_Partial_Amount_Paid_in_Middle',
            'Remaining_Amt',
            'Advance_amount_Cleared?',
        ]]

        # my "Excel" file, which is an in-memory output file (buffer)
        # for the new workbook
        excel_file = IO()

        xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')

        dframe3.to_excel(xlwriter, 'Advanced_Amount_Statistics')

        workbook = xlwriter.book
        worksheet = xlwriter.sheets['Advanced_Amount_Statistics']

        worksheet.set_column('B:B', 25)

        worksheet.set_column('C:C', 6)
        worksheet.set_column('D:E', 30)
        worksheet.set_column('E:E', 40)

        worksheet.set_column('F:G', 28)

        xlwriter.save()
        xlwriter.close()
        # important step, rewind the buffer or when it is read() you'll get nothing
        # but an error message when you try to open your zero length file in Excel
        excel_file.seek(0)

        response = HttpResponse(
            excel_file.read(),
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )

        # set the file name in the Content-Disposition header
        response[
            'Content-Disposition'] = 'attachment; filename= Yearwise_Statistics_Of_Advanced_Amount.xlsx'

        return response
Esempio n. 8
0
def Export_Database(request):
    names = [
        'id',
        'id_SAP',
        'id_user__username',
        'id_checker__username',
        'id_rechecker__username',
        'id_retitle__username',
        'status',
        'division_status',
        'created_date',
        'modified_date',
        'comment',
        'time_tracking',
        'check_time_tracking',
    ]
    workdata = Work_data.objects.all().values_list(*names)
    workdata_column_names = [
        field.verbose_name for field in Work_data._meta.fields
    ]

    extractsap = ExtractSAP.objects.all().values_list()
    extractsap_column_names = [
        field.verbose_name for field in ExtractSAP._meta.fields
    ]
    extractsap_column_names[0] = "id SAP"

    df_work = pd.DataFrame.from_records(workdata,
                                        columns=workdata_column_names)
    df_sap = pd.DataFrame.from_records(extractsap,
                                       columns=extractsap_column_names)
    df_sap = df_sap.set_index('id SAP')

    df = df_work.join(df_sap, on='id SAP', rsuffix='_sap')
    df = df.set_index('ID')
    df.sort_values(by=['id SAP'], inplace=True)

    # print(df_work)
    # print(df_sap)
    # print(df)

    Extractsap_filter = [
        'ID', 'Site', 'Div.', 'Ancien numéro Ordre', 'Ordre', 'typ',
        'Intitulé du type de document', 'Num.', 'Folio', 'Rev', 'ID Document',
        'Dernière version', 'titre du projet', 'Transfert vers ordre',
        'Lien vers le serveur', 'Titre du document',
        'Division section sous section client',
        'Division section sous section AUSY', 'Catégorie de document',
        'Date d émission', 'Provenance', 'Auteur', 'Vérificateur',
        'Approbateur', 'Validateur', 'Entrepreneur/Fournisseur',
        'Référence externe', 'Ancien numéro de plan', 'Numéro Cadastre ENG',
        'Révision Cadastre Eng', 'TAG', 'Poste technique #1',
        'Libellé Poste technique #1', 'Poste technique #2',
        'Libellé Poste technique #2', 'Poste technique #3',
        'Libellé Poste technique #3', 'Poste technique #4',
        'Libellé  Poste technique #4', 'Poste technique #5',
        'Libellé Poste technique #5', 'Poste technique #6',
        'Libellé Poste technique #6', 'Poste technique',
        'Libellé Poste Technique', 'Remarque', 'N° d imputation',
        'N° de bon de travail', 'Existance fichier tif/pdf/dwg', 'id SAP',
        'id user', 'id checker', 'id rechecker', 'id retitle', 'created date',
        'modified date', 'comment', 'time tracking', 'check time tracking',
        'id SAP_sap', 'status', 'status_sap', 'division status'
    ]

    # my "Excel" file, which is an in-memory output file (buffer)
    # for the new workbook
    excel_file = IO()

    df = df.filter(items=Extractsap_filter)
    # df = df.style.format({"Date d émission": lambda t: t.strftime("%d/%m/%Y")})
    writer = pd.ExcelWriter(excel_file,
                            engine='xlsxwriter',
                            date_format='dd/mm/yyy')
    df.to_excel(writer, 'Extract_SAP')
    writer.save()
    writer.close()
    # important step, rewind the buffer or when it is read() you'll get nothing
    # but an error message when you try to open your zero length file in Excel
    excel_file.seek(0)

    response = HttpResponse(
        excel_file.read(),
        content_type=
        'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
    response['Content-Disposition'] = 'attachment; filename="Export_SAP.xlsx"'
    return response
Esempio n. 9
0
 def deserialize(self, value):
     audio = value[value.find(",")+1:]
     audio = base64.decodestring(audio.encode('utf8'))
     buffer = IO(audio)
     data, sr = soundfile.read(buffer)
     return data, sr
Esempio n. 10
0
 def parse_string(self, string):
     """Supply file-like object containing the string."""
     file = IO(string.encode('utf-8'))
     return self.run(file=file)
Esempio n. 11
0
    body2 = """
--spam
Content-Disposition: form-data; name="param1"; filename=blob
Content-Type: binary/octet-stream

value1
--spam--
"""
    body = body_file
    headers = {
        'content-type': 'multipart/form-data; boundary=spam;',
        'content-length': len(body)
    }
    headers2 = {
        'content-type':
        'multipart/form-data; boundary=---------------------------265001916915724',
        'content-length': len(body)
    }
    headers3 = {
        'content-type':
        'multipart/form-data; boundary=---------------------------146043902153',
        'content-length': len(body)
    }
    environ = {'REQUEST_METHOD': 'POST'}

    input_headers = [('Host', '127.0.0.1:8192'), ('User-Agent', 'Mozilla/5.0')]

    parsed = cgi.FieldStorage(IO(body.encode('utf-8')),
                              headers=headers3,
                              environ=environ)
    # parsed2 = cgi.parse_multipart(IO(body.encode('utf-8')), pdict = headers2)
Esempio n. 12
0
def exportPandas(
        request, report,
        cloud):  #function which export data of selected report to excel

    from io import BytesIO as IO
    if report == str(1):
        output = [{
            'Tenant Name': x.name,
            'Parent Name': x.parentName,
            'Products': productsToString(x.products),
            'Amount of Users': x.users.count(),
            'Amount of Apps': x.apps.count()
        } for x in Tenant.objects.all()]
        df = pd.DataFrame(output)
        df = df[[
            'Parent Name', 'Tenant Name', 'Products', 'Amount of Users',
            'Amount of Apps'
        ]]

        excel_file = IO()

        xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')
        df.to_excel(xlwriter, 'sheetname', index=False)

        xlwriter.save()
        xlwriter.close()
        # important step, rewind the buffer or when it is read() you'll get nothing
        # but an error message when you try to open your zero length file in Excel
        excel_file.seek(0)
        # set the mime type so that the browser knows what to do with the file
        response = HttpResponse(
            excel_file.read(),
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )

        # set the file name in the Content-Disposition header
        response['Content-Disposition'] = 'attachment; filename=' + str(
            cloud) + '_report_' + str(report) + '.xlsx'
        return response
    elif report == str(2):
        context = getContext2(cloud)
        output = [{
            'Product Name': x['name'],
            'Active': x['active'],
            'Inactive': x['inactive'],
            'All': str(int(x['active']) + int(x['inactive']))
        } for x in context['products']]
        df = pd.DataFrame(output)
        df = df[['Product Name', 'Active', 'Inactive', 'All']]

        excel_file = IO()
        xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')
        df.to_excel(xlwriter, 'sheetname', index=False)
        xlwriter.save()
        xlwriter.close()
        # important step, rewind the buffer or when it is read() you'll get nothing
        # but an error message when you try to open your zero length file in Excel
        excel_file.seek(0)
        # set the mime type so that the browser knows what to do with the file
        response = HttpResponse(
            excel_file.read(),
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )

        # set the file name in the Content-Disposition header
        response['Content-Disposition'] = 'attachment; filename=' + str(
            cloud) + '_report_' + str(report) + '.xlsx'
        return response

    elif report == str(3):
        context = getContext3(cloud)
        excel_file = IO()
        xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')
        for product in context['products']:
            newContext = []  # special format for excel
            for tenant in product['active']:
                dictToExcel = {
                    'tenantName': tenant,
                    'status': 'active',
                }
                newContext.append(dictToExcel)
            for tenant in product['inactive']:
                dictToExcel = {
                    'tenantName': tenant,
                    'status': 'inactive',
                }
                newContext.append(dictToExcel)
            output = [{
                'Tenant Name': x['tenantName'].name,
                'Status': x['status']
            } for x in newContext]
            df = pd.DataFrame(output)
            df = df[['Tenant Name', 'Status']]
            df.to_excel(xlwriter,
                        str(product['name'].replace('/', '_')),
                        index=False)
        xlwriter.save()
        xlwriter.close()
        excel_file.seek(0)
        response = HttpResponse(
            excel_file.read(),
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )

        # set the file name in the Content-Disposition header
        response['Content-Disposition'] = 'attachment; filename=' + str(
            cloud) + '_report_' + str(report) + '.xlsx'
        return response

    elif report == str(4):
        context = getContext4(cloud)
        newContext = []
        for x in context['tenants']:
            if str(x['serviceBox']).upper() != 'None'.upper():
                newContext.append(x)
        output = [{
            'Tenant Name': x['name'],
            'Parent Name': x['parentName'],
            'Demo': x['demoTenant'],
            'Service Box': x['serviceBox'],
            'Installed Version': x['installedVersion'],
            'Last Updated': x['lastUpdate'],
            'Products': productsToString(x['products'])
        } for x in newContext]
        df = pd.DataFrame(output)
        df = df[[
            'Parent Name', 'Tenant Name', 'Demo', 'Service Box',
            'Installed Version', 'Last Updated', 'Products'
        ]]

        excel_file = IO()
        xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')
        df.to_excel(xlwriter, 'sheetname', index=False)

        xlwriter.save()
        xlwriter.close()
        # important step, rewind the buffer or when it is read() you'll get nothing
        # but an error message when you try to open your zero length file in Excel
        excel_file.seek(0)
        # set the mime type so that the browser knows what to do with the file
        response = HttpResponse(
            excel_file.read(),
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )

        # set the file name in the Content-Disposition header
        response['Content-Disposition'] = 'attachment; filename=' + str(
            cloud) + '_report_' + str(report) + '.xlsx'
        return response
Esempio n. 13
0
def heat_mpl(df, id_prop="Compound_Id", cmap="bwr",
             show=True, colorbar=True, biosim=False, chemsim=False, method="dist_corr",
             sort_parm=False, parm_dict=None,
             plot_cache=None):
    # try to load heatmap from cache:
    if plot_cache is not None and op.isfile(plot_cache):
        result = open(plot_cache).read()
        return result
    if "dist" in method.lower():
        profile_sim = cpt.profile_sim_dist_corr
    else:
        profile_sim = cpt.profile_sim_tanimoto
    df_len = len(df)
    img_size = 15 if show else 17
    plt.style.use("seaborn-white")
    plt.style.use("seaborn-pastel")
    plt.style.use("seaborn-talk")
    plt.rcParams['axes.labelsize'] = 25
    # plt.rcParams['legend.fontsize'] = 20

    plt.rcParams['figure.figsize'] = (img_size, 1.1 + 0.47 * (df_len - 1))
    plt.rcParams['axes.labelsize'] = 25
    plt.rcParams['ytick.labelsize'] = 20
    plt.rcParams['xtick.labelsize'] = 15
    fs_text = 18

    y_labels = []
    fp_list = []
    max_val = 3                 # using a fixed color range now
    min_val = -3
    ylabel_templ = "{}{}{}"
    ylabel_cs = ""
    ylabel_bs = ""
    id_prop_list = []
    for ctr, (_, rec) in enumerate(df.iterrows()):
        if sort_parm:
            if ctr == 0:
                compartments = ["Median_Cells", "Median_Cytoplasm", "Median_Nuclei"]
                parm_list = []
                for comp in compartments:
                    parm_comp = [x for x in ACT_PROF_PARAMETERS if x.startswith(comp)]
                    val_list = [rec[x] for x in parm_comp]
                    parm_sorted = [x for _, x in sorted(zip(val_list, parm_comp))]
                    parm_list.extend(parm_sorted)
        else:
            parm_list = ACT_PROF_PARAMETERS
        fp = [rec[x] for x in ACT_PROF_PARAMETERS]
        fp_view = [rec[x] for x in parm_list]
        fp_list.append(fp_view)
        id_prop_list.append(rec[id_prop])
        if chemsim:
            if ctr == 0:
                mol = mol_from_smiles(rec.get("Smiles", "*"))
                if len(mol.GetAtoms()) > 1:
                    ylabel_cs = "Chem | "
                    mol_fp = Chem.GetMorganFingerprint(mol, 2)  # ECFC4
                else:  # no Smiles present in the DataFrame
                    ylabel_cs = ""
                    chemsim = False
            else:
                q = rec.get("Smiles", "*")
                if len(q) < 2:
                    ylabel_cs = "     | "
                else:
                    sim = cpt.chem_sim(mol_fp, q) * 100
                    ylabel_cs = "{:3.0f}% | ".format(sim)
        if biosim:
            if ctr == 0:
                prof_ref = fp
                ylabel_bs = "  Bio  |  "
            else:
                sim = profile_sim(prof_ref, fp) * 100
                ylabel_bs = "{:3.0f}% |  ".format(sim)

        ylabel = ylabel_templ.format(ylabel_cs, ylabel_bs, rec[id_prop])
        y_labels.append(ylabel)


        # m_val = max(fp)       # this was the calculation of the color range
        # if m_val > max_val:
        #     max_val = m_val
        # m_val = min(fp)
        # if m_val < min_val:
        #     min_val = m_val

    if isinstance(parm_dict, dict):
        parm_dict["Parameter"] = parm_list
        for i in range(len(id_prop_list)):
            parm_dict[str(id_prop_list[i])] = fp_list[i].copy()
    # calc the colorbar range
    max_val = max(abs(min_val), max_val)
    # invert y axis:
    y_labels = y_labels[::-1]
    fp_list = fp_list[::-1]
    Z = np.asarray(fp_list)
    plt.xticks(XTICKS)
    plt.yticks(np.arange(df_len) + 0.5, y_labels)
    plt.pcolor(Z, vmin=-max_val, vmax=max_val, cmap=cmap)
    plt.text(XTICKS[1] // 2, -1.1, "Cells",
             horizontalalignment='center', fontsize=fs_text)
    plt.text(XTICKS[1] + ((XTICKS[2] - XTICKS[1]) // 2), -1.1,
             "Cytoplasm", horizontalalignment='center', fontsize=fs_text)
    plt.text(XTICKS[2] + ((XTICKS[3] - XTICKS[2]) // 2), -1.1,
             "Nuclei", horizontalalignment='center', fontsize=fs_text)
    if colorbar and len(df) > 3:
        plt.colorbar()
    plt.tight_layout()
    if show:
        plt.show()
    else:
        img_file = IO()
        plt.savefig(img_file, bbox_inches='tight', format="jpg")
        result = img_tag(img_file, format="jpg",
                         options='style="width: 900px;"')
        img_file.close()
        # important, otherwise the plots will accumulate and fill up memory:
        plt.clf()
        plt.close()
        gc.collect()
        if plot_cache is not None:  # cache the plot
            open(plot_cache, "w").write(result)
        return result
Esempio n. 14
0
 def makefile(self, *args, **kwargs):
     return IO(bytes(self.path, 'utf-8'))
Esempio n. 15
0
    def display_mol(self):
        highlight_lipinski = self.check_lipinski.isChecked()

        if self.curr_sdf_mol_index == 0:
            self.btn_prev.setEnabled(False)
        else:
            self.btn_prev.setEnabled(True)

        if self.curr_sdf_mol_index == self.curr_sdf_num_of_mols - 1:
            self.btn_next.setEnabled(False)
        else:
            self.btn_next.setEnabled(True)

        img_file = IO()  # for structure depiction
        img = sdft.autocrop(
            Draw.MolToImage(self.curr_sdf[self.curr_sdf_mol_index]), "white")
        img.save(img_file, format='PNG')
        # qimg = QtGui.QImage.fromData(img_file.getvalue())
        self.qpixmap = QtGui.QPixmap()
        self.qpixmap.loadFromData(img_file.getvalue(), "PNG")
        self.label_molimage.setPixmap(self.qpixmap)
        self.le_recnumber.setText("{} of {}".format(
            self.curr_sdf_mol_index + 1, self.curr_sdf_num_of_mols))

        if self.SDF_CHANGED:
            # self.SDF_CHANGED = False
            # self.selected_fields = self.get_selected_fields()
            self.table_props.clear()
            self.table_props.setHorizontalHeaderLabels(["prop", "value"])

        for row, prop in enumerate(self.curr_sdf_fields):
            tbl_item = QtGui.QTableWidgetItem(prop[2:])
            # tbl_item.setFlags(QtCore.Qt.ItemIsEnabled)
            self.table_props.setItem(row, 0, tbl_item)

            if self.SDF_CHANGED:
                if self.selected_fields and prop in self.selected_fields:
                    self.table_props.setItemSelected(tbl_item, True)

            if prop in self.curr_sdf[self.curr_sdf_mol_index].GetPropNames():
                value = self.curr_sdf[self.curr_sdf_mol_index].GetProp(prop)
                tbl_item = QtGui.QTableWidgetItem(value)
                # QtCore.Qt.ItemIsEditable is required to edit the cells
                # tbl_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable)
                tbl_item.setFlags(QtCore.Qt.ItemIsEnabled
                                  | QtCore.Qt.ItemIsEditable)
                if highlight_lipinski and prop in self.highlight_dict:
                    if float(value) > self.highlight_dict[prop]:
                        tbl_item.setBackgroundColor(QtGui.QColor(255, 0, 0))
                    else:
                        tbl_item.setBackgroundColor(QtGui.QColor(0, 255, 0))
                self.table_props.setItem(row, 1, tbl_item)
            else:
                tbl_item = QtGui.QTableWidgetItem("n.d.")
                # see above for flag QtCore.Qt.ItemIsEditable
                tbl_item.setFlags(QtCore.Qt.ItemIsEnabled
                                  | QtCore.Qt.ItemIsEditable)
                self.table_props.setItem(row, 1, tbl_item)

            self.table_props.setRowHeight(row, 18)

        self.SDF_CHANGED = False

        if self.curr_sdf_mol_index in self.selected_recs:
            self.check_rec_selected.setChecked(True)
        else:
            self.check_rec_selected.setChecked(False)
Esempio n. 16
0
def main(request):
    if request.method != 'POST':
        return render(request, "main.html")
    if request.method == 'POST':
        d = dict(request.POST)
        # getting all the data
        field_name = d['fieldName']
        field_type = d['fieldType']
        percentage = ['percentage']
        nrows = d['fname']
        fileExport = d['fileExport']
        ending = d['l_ending_style']
        chkempty = d['Empty']
        button_value = d["button"]
        valempty = d['valEmpty']

        rows = []
        chk_index = 0
        chk_arr = []
        formula = (d['formulas'])
        while chk_index < len(chkempty):
            if chkempty[chk_index] == 'Yes':
                chk_arr.append('on')
                chk_index += 2
            else:
                chk_arr.append('off')
                chk_index += 1
        chkempty = chk_arr
        valindex = 0
        chk_index = 0
        val_arr = []
        while valindex < len(valempty) and chk_index < len(chkempty):
            if chkempty[chk_index] == "on":
                val_arr.append(valempty[valindex])
                valindex += 2
            else:
                val_arr.append("0")
                valindex += 1
            chk_index += 1

        valempty = val_arr

        # generating the data using functions above
        field_type.pop()
        field_name.pop()
        for i in field_type:
            # GUID Generation
            if i == 'GUID':
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append(id())
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # Row Number Generation
            elif i == 'Row Number':
                tempdata = []
                for j in range(1, int(nrows[0]) + 1):
                    data = str(j)
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string:
                            print('yes it is there')
                            newValue = enforce_function(string, data, 3)
                        else:
                            newValue = enforce_function(string, data, 2)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # First Name Generation
            elif i == 'First Name':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = fname()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # last Name Generation
            elif i == 'Last Name':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = lname()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # full Name Generation
            elif i == 'Full Name':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = fullname()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # First Name male Generation
            elif i == 'First Name(male)':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = f_fname()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # First Name female Generation
            elif i == 'First Name(female)':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = m_fname()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # Email Generation
            elif i == 'Email':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = Email()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # username Generation
            elif i == 'Username':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = username()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # Comapany Name Generation
            elif i == 'Comapny Name':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = comapny()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # IP address v4 Generation
            elif i == 'Ip address v4':
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append(ipv4())
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # IP address v6 Generation
            elif i == 'Ip address v6':
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append(ipv6())
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # job title Generation
            elif i == 'Job Title':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = job()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # Language Generation
            elif i == 'Language':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = lang()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # programming Language Generation
            elif i == 'Programming Language':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = planguage()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # Currency
            elif i == 'Currency':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = Currency()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # Currency symbol
            elif i == 'Currency Symbol':
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append(symbol())
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # Currency symbol
            elif i == 'Gender':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = gender()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # random number generation
            elif i == 'Number':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = number()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string:
                            print('yes it is there')
                            newValue = enforce_function(string, data, 3)
                        else:
                            newValue = enforce_function(string, data, 2)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # phone number generation
            elif i == 'Phone Number':
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append(phnumber())
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # domain name generation
            elif i == 'Domain Name':
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append(domain())
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # date
            elif i == 'Date':
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append(date_())
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # date with time
            elif i == 'Date with time':
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append(datetime())
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # time zone generation
            elif i == 'Time Zone':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = timezone()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # boolean generation
            elif i == 'Boolean':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = boolean()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # money generation
            elif i == 'Money':
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append(money())
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # mac address generation
            elif i == 'MAC Address':
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append(mac())
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # name title generation
            elif i == 'Title':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = nametitle()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # gender
            elif i == 'Gender':
                tempdata = []
                for j in range(int(nrows[0])):
                    data = gender()
                    string = formula[field_type.index(i)]
                    if 'this' in string:
                        if "if" in string and "else" in string:
                            newValue = enforce_function(string, data, 3)
                        else:
                            datas = data
                            newValue = ''
                            for data in datas.split():
                                newValue = newValue + ' ' + enforce_function(
                                    string, data, 1)
                        tempdata.append(newValue)
                    else:
                        tempdata.append(data)
                if chkempty[field_type.index(i)] == 'on':
                    df = pd.DataFrame(tempdata)
                    df = empty(df, valempty[field_type.index(i)])
                    tempdata = df['result'].values.tolist()

            # if no match to input
            else:
                tempdata = []
                for j in range(int(nrows[0])):
                    tempdata.append('Not valid Field Type')

            rows.append(tempdata)

        df = pd.DataFrame((list(zip(*rows))), columns=field_name, dtype=str)
        r, c = df.shape
        table_content = df.to_html(index=False,
                                   justify='center',
                                   classes="table")
        if button_value[0] == 'Preview':
            if c == 0:
                return render(request, "main.html")
            else:
                context = {'table_content': table_content}
                return JsonResponse(context)
        elif button_value[0] == '':
            if c == 0:
                return render(request, "main.html")
            else:
                # csv file exporter

                if fileExport[0] == 'CSV':

                    filename = "download/random_data.csv"
                    if ending[0] == 'Unix':
                        print("uix")
                        df.to_csv(filename,
                                  index=False,
                                  encoding="ascii",
                                  line_terminator="\n")
                    else:
                        df.to_csv(filename,
                                  index=False,
                                  encoding="ascii",
                                  line_terminator="\r\n")
                    path_to_file = os.path.realpath("download/random_data.csv")
                    f = open(path_to_file, 'rb')
                    myfile = File(f)
                    response = HttpResponse(myfile, content_type='text/csv')
                    response[
                        'Content-Disposition'] = 'attachment; filename=' + 'random_data.csv'
                    return response

                # JSON file exporter
                elif fileExport[0] == 'JSON':
                    filename = "download/random_data.json"
                    df.to_json(filename, orient='records', lines=True)
                    path_to_file = os.path.realpath(
                        "download/random_data.json")
                    f = open(path_to_file, 'r')
                    myfile = File(f)
                    response = HttpResponse(myfile,
                                            content_type='application/json')
                    response[
                        'Content-Disposition'] = 'attachment; filename="result.json"'
                    return response

                # TSV file exporter
                elif fileExport[0] == 'TSV':
                    filename = "download/random_data.tsv"
                    if ending[0] == 'Unix':
                        df.to_csv(filename,
                                  sep='\t',
                                  index=False,
                                  line_terminator="\n")
                    else:
                        df.to_csv(filename,
                                  sep='\t',
                                  index=False,
                                  line_terminator="\r\n")
                    path_to_file = os.path.realpath(filename)
                    f = open(path_to_file, 'r')
                    myfile = File(f)
                    response = HttpResponse(myfile, content_type='text/tsv')
                    response[
                        'Content-Disposition'] = 'attachment; filename=' + 'random_data.tsv'
                    return response

                # TSV file exporter
                elif fileExport[0] == 'Excel':
                    from io import BytesIO as IO
                    excel_file = IO()
                    xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')
                    df.to_excel(xlwriter, 'sheetname')
                    xlwriter.save()
                    xlwriter.close()
                    excel_file.seek(0)
                    response = HttpResponse(
                        excel_file.read(),
                        content_type=
                        'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
                    )
                    response[
                        'Content-Disposition'] = 'attachment; filename=download/random_data.xlsx'
                    return response

                # XML file exporter
                elif fileExport[0] == 'XML':
                    filename = "download/random_data.xml"
                    pd.DataFrame.to_xml = to_xml
                    to_xml(df, filename)
                    path_to_file = os.path.realpath(filename)
                    f = open(path_to_file, 'r')
                    myfile = File(f)
                    response = HttpResponse(myfile, content_type="text/xml")
                    response[
                        'Content-Disposition'] = 'attachment; filename=' + 'random_data.xml'
                    return response

                if fileExport[0] == 'SQL':
                    r, c = df.shape
                    open("download/randomdata.txt", "w")
                    with open("download/randomdata.txt", "a") as file_prime:
                        for num in range(r):
                            file_prime.write(
                                "insert into MOCK_DATA (" +
                                ", ".join(df.columns.values.tolist()) +
                                ") values ('" + str("', '".join(
                                    str(x)
                                    for x in df.iloc[[num]].values.tolist()
                                    [0])) + "');" + '\n')

                    path_to_file = os.path.realpath("download/randomdata.txt")
                    f = open(path_to_file, 'r')
                    with open("download/random_data.sql", 'w') as file:
                        file.write("".join(f))
                    path_to_file = os.path.realpath("download/random_data.sql")
                    f = open(path_to_file, 'r')
                    myfile = File(f)
                    response = HttpResponse(myfile, content_type="text/sql")
                    response[
                        'Content-Disposition'] = 'attachment; filename=' + 'random_data.sql'
                    return response
Esempio n. 17
0
def capture(command, *args, **kwargs):
    out, sys.stdout = sys.stdout, IO()
    command(*args, **kwargs)
    sys.stdout.seek(0)
    yield sys.stdout.read()
    sys.stdout = out
Esempio n. 18
0
    def test_request_logs(self):
        test_cases = [
            [
                '',
                [
                    'Invalid URL', 'No schema supplied', 'gettaddrinfo failed',
                    [
                        'nodename nor servname provided, or not known',
                        'Name or service not known'
                    ]
                ]
            ],
            [
                'typo://plotly.acme.com',
                [
                    'No connection adapters were found', 'gettaddrinfo failed',
                    [
                        'nodename nor servname provided, or not known',
                        'No connection adapters were found'
                    ]
                ]
            ],
            [
                'https://doesntexist.plotly.systems',
                [
                    'Failed to establish a new connection',
                    'gettaddrinfo failed',
                    [
                        'nodename nor servname provided, or not known',
                        'Name or service not known'
                    ]
                ]
            ],
            [
                'https://expired.badssl.com',
                [
                    'Caused by SSLError(SSLError("bad handshake: Error([(',
                    'SSL routines', 'tls_process_server_certificate',
                    'certificate verify failed', 'gettaddrinfo: ',
                    "'104.154.89.105', 443"
                ]
            ],
            [
                'https://self-signed.badssl.com',
                [
                    'Caused by SSLError(SSLError("bad handshake: Error([(',
                    'SSL routines', 'tls_process_server_certificate',
                    'certificate verify failed', 'gettaddrinfo',
                    "'104.154.89.105', 443"
                ]
            ]
        ]

        for url, expected_messages in test_cases:
            os.environ['plotly_api_domain'] = url
            f = IO()
            with captured_output(f) as out:
                try:
                    api_requests.post('/dash-apps')
                except Exception:
                    pass

            for expected_message in [url] + expected_messages:
                stdout = out.getvalue()
                if isinstance(expected_message, six.string_types):
                    self.assertTrue(
                        expected_message in stdout,
                        'url "{}"\nExpected "{}" to be in:\n{}\n'.format(
                            url, expected_message, stdout))
                else:
                    self.assertTrue(
                        (expected_message[0] in stdout)
                        or (expected_message[1] in stdout),
                        'url "{}"\nExpected\n"{}"\nor "{}"\nto be in:\n{}\n'.
                        format(url, expected_message[0], expected_message[1],
                               stdout))
Esempio n. 19
0
 def makefile(self, *args, **kwargs):
     return IO(b"GET /")
Esempio n. 20
0
 def setup(self):
     super().setup()
     self.wfile = IO()
Esempio n. 21
0
 def __init__(self, headers, body):
     parsed = cgi.FieldStorage(IO(body.encode('utf-8')),
                               headers=headers3,
                               environ=environ)
Esempio n. 22
0
 def makefile(self, *args, **kwargs):
     return IO(b"GET %s HTTP/1.0" % self._path)
Esempio n. 23
0
def train_once(
    sess,
    step,
    ops,
    names=None,
    gen_feed_dict_fn=None,
    deal_results_fn=None,
    interval_steps=100,
    eval_ops=None,
    eval_names=None,
    gen_eval_feed_dict_fn=None,
    deal_eval_results_fn=melt.print_results,
    valid_interval_steps=100,
    print_time=True,
    print_avg_loss=True,
    model_dir=None,
    log_dir=None,
    is_start=False,
    num_steps_per_epoch=None,
    metric_eval_fn=None,
    metric_eval_interval_steps=0,
    summary_excls=None,
    fixed_step=None,  # for epoch only, incase you change batch size
    eval_loops=1,
    learning_rate=None,
    learning_rate_patience=None,
    learning_rate_decay_factor=None,
    num_epochs=None,
    model_path=None,
    use_horovod=False,
):
    use_horovod = 'OMPI_COMM_WORLD_RANK' in os.environ

    #is_start = False # force not to evaluate at first step
    #print('-----------------global_step', sess.run(tf.train.get_or_create_global_step()))
    timer = gezi.Timer()
    if print_time:
        if not hasattr(train_once, 'timer'):
            train_once.timer = Timer()
            train_once.eval_timer = Timer()
            train_once.metric_eval_timer = Timer()

    melt.set_global('step', step)
    epoch = (fixed_step
             or step) / num_steps_per_epoch if num_steps_per_epoch else -1
    if not num_epochs:
        epoch_str = 'epoch:%.3f' % (epoch) if num_steps_per_epoch else ''
    else:
        epoch_str = 'epoch:%.3f/%d' % (
            epoch, num_epochs) if num_steps_per_epoch else ''
    melt.set_global('epoch', '%.2f' % (epoch))

    info = IO()
    stop = False

    if eval_names is None:
        if names:
            eval_names = ['eval/' + x for x in names]

    if names:
        names = ['train/' + x for x in names]

    if eval_names:
        eval_names = ['eval/' + x for x in eval_names]

    is_eval_step = is_start or valid_interval_steps and step % valid_interval_steps == 0
    summary_str = []

    eval_str = ''
    if is_eval_step:
        # deal with summary
        if log_dir:
            if not hasattr(train_once, 'summary_op'):
                #melt.print_summary_ops()
                if summary_excls is None:
                    train_once.summary_op = tf.summary.merge_all()
                else:
                    summary_ops = []
                    for op in tf.get_collection(tf.GraphKeys.SUMMARIES):
                        for summary_excl in summary_excls:
                            if not summary_excl in op.name:
                                summary_ops.append(op)
                    print('filtered summary_ops:')
                    for op in summary_ops:
                        print(op)
                    train_once.summary_op = tf.summary.merge(summary_ops)

                #train_once.summary_train_op = tf.summary.merge_all(key=melt.MonitorKeys.TRAIN)
                train_once.summary_writer = tf.summary.FileWriter(
                    log_dir, sess.graph)

                tf.contrib.tensorboard.plugins.projector.visualize_embeddings(
                    train_once.summary_writer, projector_config)

        # if eval ops then should have bee rank 0

        if eval_ops:
            #if deal_eval_results_fn is None and eval_names is not None:
            #  deal_eval_results_fn = lambda x: melt.print_results(x, eval_names)
            for i in range(eval_loops):
                eval_feed_dict = {} if gen_eval_feed_dict_fn is None else gen_eval_feed_dict_fn(
                )
                #eval_feed_dict.update(feed_dict)

                # if use horovod let each rant use same sess.run!
                if not log_dir or train_once.summary_op is None or gezi.env_has(
                        'EVAL_NO_SUMMARY') or use_horovod:
                    #if not log_dir or train_once.summary_op is None:
                    eval_results = sess.run(eval_ops, feed_dict=eval_feed_dict)
                else:
                    eval_results = sess.run(eval_ops + [train_once.summary_op],
                                            feed_dict=eval_feed_dict)
                    summary_str = eval_results[-1]
                    eval_results = eval_results[:-1]
                eval_loss = gezi.get_singles(eval_results)
                #timer_.print()
                eval_stop = False
                if use_horovod:
                    sess.run(hvd.allreduce(tf.constant(0)))

                #if not use_horovod or  hvd.local_rank() == 0:
                # @TODO user print should also use logging as a must ?
                #print(gezi.now_time(), epoch_str, 'eval_step: %d'%step, 'eval_metrics:', end='')
                eval_names_ = melt.adjust_names(eval_loss, eval_names)
                #if not use_horovod or hvd.rank() == 0:
                #  logging.info2('{} eval_step:{} eval_metrics:{}'.format(epoch_str, step, melt.parse_results(eval_loss, eval_names_)))
                eval_str = 'valid:{}'.format(
                    melt.parse_results(eval_loss, eval_names_))

                # if deal_eval_results_fn is not None:
                #   eval_stop = deal_eval_results_fn(eval_results)

                assert len(eval_loss) > 0
                if eval_stop is True:
                    stop = True
                eval_names_ = melt.adjust_names(eval_loss, eval_names)
                if not use_horovod or hvd.rank() == 0:
                    melt.set_global('eval_loss',
                                    melt.parse_results(eval_loss, eval_names_))

        elif interval_steps != valid_interval_steps:
            #print()
            pass

    metric_evaluate = False

    # if metric_eval_fn is not None \
    #   and (is_start \
    #     or (num_steps_per_epoch and step % num_steps_per_epoch == 0) \
    #          or (metric_eval_interval_steps \
    #              and step % metric_eval_interval_steps == 0)):
    #  metric_evaluate = True

    if metric_eval_fn is not None \
      and ((is_start or metric_eval_interval_steps \
           and step % metric_eval_interval_steps == 0) or model_path):
        metric_evaluate = True

    if 'EVFIRST' in os.environ:
        if os.environ['EVFIRST'] == '0':
            if is_start:
                metric_evaluate = False
        else:
            if is_start:
                metric_evaluate = True

    if step == 0 or 'QUICK' in os.environ:
        metric_evaluate = False

    #print('------------1step', step, 'pre metric_evaluate', metric_evaluate, hvd.rank())
    if metric_evaluate:
        if use_horovod:
            print('------------metric evaluate step', step, model_path,
                  hvd.rank())
        if not model_path or 'model_path' not in inspect.getargspec(
                metric_eval_fn).args:
            metric_eval_fn_ = metric_eval_fn
        else:
            metric_eval_fn_ = lambda: metric_eval_fn(model_path=model_path)

        try:
            l = metric_eval_fn_()
            if isinstance(l, tuple):
                num_returns = len(l)
                if num_returns == 2:
                    evaluate_results, evaluate_names = l
                    evaluate_summaries = None
                else:
                    assert num_returns == 3, 'retrun 1,2,3 ok 4.. not ok'
                    evaluate_results, evaluate_names, evaluate_summaries = l
            else:  #return dict
                evaluate_results, evaluate_names = tuple(zip(*dict.items()))
                evaluate_summaries = None
        except Exception:
            logging.info('Do nothing for metric eval fn with exception:\n',
                         traceback.format_exc())

        if not use_horovod or hvd.rank() == 0:
            #logging.info2('{} valid_step:{} {}:{}'.format(epoch_str, step, 'valid_metrics' if model_path is None else 'epoch_valid_metrics', melt.parse_results(evaluate_results, evaluate_names)))
            logging.info2('{} valid_step:{} {}:{}'.format(
                epoch_str, step, 'valid_metrics',
                melt.parse_results(evaluate_results, evaluate_names)))

        if learning_rate is not None and (learning_rate_patience
                                          and learning_rate_patience > 0):
            assert learning_rate_decay_factor > 0 and learning_rate_decay_factor < 1
            valid_loss = evaluate_results[0]
            if not hasattr(train_once, 'min_valid_loss'):
                train_once.min_valid_loss = valid_loss
                train_once.deacy_steps = []
                train_once.patience = 0
            else:
                if valid_loss < train_once.min_valid_loss:
                    train_once.min_valid_loss = valid_loss
                    train_once.patience = 0
                else:
                    train_once.patience += 1
                    logging.info2('{} valid_step:{} patience:{}'.format(
                        epoch_str, step, train_once.patience))

            if learning_rate_patience and train_once.patience >= learning_rate_patience:
                lr_op = ops[1]
                lr = sess.run(lr_op) * learning_rate_decay_factor
                train_once.deacy_steps.append(step)
                logging.info2(
                    '{} valid_step:{} learning_rate_decay by *{}, learning_rate_decay_steps={}'
                    .format(epoch_str, step, learning_rate_decay_factor,
                            ','.join(map(str, train_once.deacy_steps))))
                sess.run(tf.assign(lr_op, tf.constant(lr, dtype=tf.float32)))
                train_once.patience = 0
                train_once.min_valid_loss = valid_loss

    if ops is not None:
        #if deal_results_fn is None and names is not None:
        #  deal_results_fn = lambda x: melt.print_results(x, names)

        feed_dict = {} if gen_feed_dict_fn is None else gen_feed_dict_fn()
        # NOTICE ops[2] should be scalar otherwise wrong!! loss should be scalar
        #print('---------------ops', ops)
        if eval_ops is not None or not log_dir or not hasattr(
                train_once,
                'summary_op') or train_once.summary_op is None or use_horovod:
            feed_dict[K.learning_phase()] = 1
            results = sess.run(ops, feed_dict=feed_dict)
        else:
            ## TODO why below ?
            #try:
            feed_dict[K.learning_phase()] = 1
            results = sess.run(ops + [train_once.summary_op],
                               feed_dict=feed_dict)
            summary_str = results[-1]
            results = results[:-1]
            # except Exception:
            #   logging.info('sess.run(ops + [train_once.summary_op], feed_dict=feed_dict) fail')
            #   results = sess.run(ops, feed_dict=feed_dict)

        #print('------------results', results)
        # #--------trace debug
        # if step == 210:
        #   run_metadata = tf.RunMetadata()
        #   results = sess.run(
        #         ops,
        #         feed_dict=feed_dict,
        #         options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
        #         run_metadata=run_metadata)
        #   from tensorflow.python.client import timeline
        #   trace = timeline.Timeline(step_stats=run_metadata.step_stats)

        #   trace_file = open('timeline.ctf.json', 'w')
        #   trace_file.write(trace.generate_chrome_trace_format())

        #reults[0] assume to be train_op, results[1] to be learning_rate
        learning_rate = results[1]
        results = results[2:]

        #@TODO should support aver loss and other avg evaluations like test..
        if print_avg_loss:
            if not hasattr(train_once, 'avg_loss'):
                train_once.avg_loss = AvgScore()
            #assume results[0] as train_op return, results[1] as loss
            loss = gezi.get_singles(results)
            train_once.avg_loss.add(loss)

        steps_per_second = None
        instances_per_second = None
        hours_per_epoch = None
        #step += 1
        #if is_start or interval_steps and step % interval_steps == 0:
        interval_ok = not use_horovod or hvd.local_rank() == 0
        if interval_steps and step % interval_steps == 0 and interval_ok:
            train_average_loss = train_once.avg_loss.avg_score()
            if print_time:
                duration = timer.elapsed()
                duration_str = 'duration:{:.2f} '.format(duration)
                melt.set_global('duration', '%.2f' % duration)
                #info.write(duration_str)
                elapsed = train_once.timer.elapsed()
                steps_per_second = interval_steps / elapsed
                batch_size = melt.batch_size()
                num_gpus = melt.num_gpus()
                instances_per_second = interval_steps * batch_size / elapsed
                gpu_info = '' if num_gpus <= 1 else ' gpus:[{}]'.format(
                    num_gpus)
                if num_steps_per_epoch is None:
                    epoch_time_info = ''
                else:
                    hours_per_epoch = num_steps_per_epoch / interval_steps * elapsed / 3600
                    epoch_time_info = '1epoch:[{:.2f}h]'.format(
                        hours_per_epoch)
                info.write(
                    'elapsed:[{:.2f}] batch_size:[{}]{} batches/s:[{:.2f}] insts/s:[{:.2f}] {} lr:[{:.6f}]'
                    .format(elapsed, batch_size, gpu_info, steps_per_second,
                            instances_per_second, epoch_time_info,
                            learning_rate))

            if print_avg_loss:
                #info.write('train_avg_metrics:{} '.format(melt.value_name_list_str(train_average_loss, names)))
                names_ = melt.adjust_names(train_average_loss, names)
                #info.write('train_avg_metric:{} '.format(melt.parse_results(train_average_loss, names_)))
                info.write(' train:{} '.format(
                    melt.parse_results(train_average_loss, names_)))
                #info.write('train_avg_loss: {} '.format(train_average_loss))
            info.write(eval_str)
            #print(gezi.now_time(), epoch_str, 'train_step:%d'%step, info.getvalue(), end=' ')
            logging.info2('{} {} {}'.format(epoch_str, 'step:%d' % step,
                                            info.getvalue()))

            if deal_results_fn is not None:
                stop = deal_results_fn(results)

    summary_strs = gezi.to_list(summary_str)
    if metric_evaluate:
        if evaluate_summaries is not None:
            summary_strs += evaluate_summaries

    if step > 1:
        if is_eval_step:
            # deal with summary
            if log_dir:
                summary = tf.Summary()
                if eval_ops is None:
                    if train_once.summary_op is not None:
                        for summary_str in summary_strs:
                            train_once.summary_writer.add_summary(
                                summary_str, step)
                else:
                    for summary_str in summary_strs:
                        train_once.summary_writer.add_summary(
                            summary_str, step)
                    suffix = 'valid' if not eval_names else ''
                    # loss/valid
                    melt.add_summarys(summary,
                                      eval_results,
                                      eval_names_,
                                      suffix=suffix)

                if ops is not None:
                    try:
                        # loss/train_avg
                        melt.add_summarys(summary,
                                          train_average_loss,
                                          names_,
                                          suffix='train_avg')
                    except Exception:
                        pass
                    ##optimizer has done this also
                    melt.add_summary(summary, learning_rate, 'learning_rate')
                    melt.add_summary(summary,
                                     melt.batch_size(),
                                     'batch_size',
                                     prefix='other')
                    melt.add_summary(summary,
                                     melt.epoch(),
                                     'epoch',
                                     prefix='other')
                    if steps_per_second:
                        melt.add_summary(summary,
                                         steps_per_second,
                                         'steps_per_second',
                                         prefix='perf')
                    if instances_per_second:
                        melt.add_summary(summary,
                                         instances_per_second,
                                         'instances_per_second',
                                         prefix='perf')
                    if hours_per_epoch:
                        melt.add_summary(summary,
                                         hours_per_epoch,
                                         'hours_per_epoch',
                                         prefix='perf')

                if metric_evaluate:
                    #melt.add_summarys(summary, evaluate_results, evaluate_names, prefix='eval')
                    prefix = 'step_eval'
                    if model_path:
                        prefix = 'eval'
                        if not hasattr(train_once, 'epoch_step'):
                            train_once.epoch_step = 1
                        else:
                            train_once.epoch_step += 1
                        step = train_once.epoch_step
                    # eval/loss eval/auc ..
                    melt.add_summarys(summary,
                                      evaluate_results,
                                      evaluate_names,
                                      prefix=prefix)

                train_once.summary_writer.add_summary(summary, step)
                train_once.summary_writer.flush()
            return stop
        elif metric_evaluate and log_dir:
            summary = tf.Summary()
            for summary_str in summary_strs:
                train_once.summary_writer.add_summary(summary_str, step)
            #summary.ParseFromString(evaluate_summaries)
            summary_writer = train_once.summary_writer
            prefix = 'step_eval'
            if model_path:
                prefix = 'eval'
                if not hasattr(train_once, 'epoch_step'):
                    ## TODO.. restart will get 1 again..
                    #epoch_step = tf.Variable(0, trainable=False, name='epoch_step')
                    #epoch_step += 1
                    #train_once.epoch_step = sess.run(epoch_step)
                    valid_interval_epochs = 1.
                    try:
                        valid_interval_epochs = FLAGS.valid_interval_epochs
                    except Exception:
                        pass
                    train_once.epoch_step = 1 if melt.epoch() <= 1 else int(
                        int(melt.epoch() * 10) /
                        int(valid_interval_epochs * 10))
                    logging.info('train_once epoch start step is',
                                 train_once.epoch_step)
                else:
                    #epoch_step += 1
                    train_once.epoch_step += 1
                step = train_once.epoch_step
            #melt.add_summarys(summary, evaluate_results, evaluate_names, prefix='eval')
            melt.add_summarys(summary,
                              evaluate_results,
                              evaluate_names,
                              prefix=prefix)
            summary_writer.add_summary(summary, step)
            summary_writer.flush()
Esempio n. 24
0
 def makefile(self, *args, **kwargs):
     return IO(b"PUT %s HTTP/1.0\nContent-Length: %d\n\n%s" %
               (self._path, len(self._data), self._data))
Esempio n. 25
0
 def serialize(self, value):
     data, sr = value
     buffer = IO()
     soundfile.write(buffer, data, sr, format='WAV', subtype='PCM_16')
     body = base64.b64encode(buffer.getvalue()).decode('utf8')
     return 'data:audio/wav;base64,{body}'.format(body=body)
def attendance_Of_Any_month1(request):
    if request.method == "POST":
        #monthvarnum = request.GET.get('sel')
        #capturing values from user input
        monthvarnum = request.POST.get('Select')
        monthvarnum = int(monthvarnum)

        yearvarnum = request.POST.get('year')
        yearvarnum = int(yearvarnum)

        #for all data show i  dataframe
        pd.set_option('display.max_colwidth', -1)

        try:
            from io import BytesIO as IO  # for modern python

        except ImportError:
            from io import StringIO as IO  # for legacy python


#Attendance table  copy as it is and filter as per not_recived and sent status

        copy1 = Attendance.objects.all().values_list(
            'Employee_name',
            'Attnd_status',
            'year',
            'month',
        ).filter(
            Q(Attnd_status=1) & Q(year=yearvarnum)
            & Q(month=monthvarnum))  #get either not_recieved or sent
        dframe1 = pd.DataFrame(copy1,
                               columns=[
                                   'Employee_name',
                                   'Present_Days',
                                   'year',
                                   'month',
                               ])

        #group by non repeating for sum of items_names and not recieved number
        dframe1 = dframe1.groupby([
            'Employee_name',
        ]).agg({
            'Present_Days': 'sum',
        }).reset_index()

        #Attendance table  copy as it is and filter as per not_recived and sent status

        copy2 = Attendance.objects.all().values_list(
            'Employee_name',
            'Attnd_status',
            'year',
            'month',
        ).filter(
            Q(Attnd_status=0) & Q(year=yearvarnum)
            & Q(month=monthvarnum))  #get either not_recieved or sent
        dframe2 = pd.DataFrame(copy2,
                               columns=[
                                   'Employee_name',
                                   'Absent_Days',
                                   'year',
                                   'month',
                               ])

        dframe2['Absent_Days'] = dframe2['Absent_Days'].replace([
            0,
        ], 1)

        #group by non repeating for sum of items_names and not recieved number
        dframe2 = dframe2.groupby([
            'Employee_name',
        ]).agg({
            'Absent_Days': 'sum',
        }).reset_index()

        #join two dataframes on attribute=Employee name

        #combined two dataframes same columns
        dframe3 = reduce(
            lambda x, y: pd.merge(x, y, on=[
                'Employee_name',
            ], how='outer'), [dframe2, dframe1])

        #insertion logic of days of this month
        #  now = datetime.datetime
        This_month_days = calendar.monthrange(yearvarnum, monthvarnum)[1]

        repeat_var = list(
            itertools.repeat(This_month_days, dframe3.Employee_name.count())
        )  #list generation of repeated values it = itertools.repeat(no of items,no of times)

        dframe3['Total_Days_Of_This_Month'] = repeat_var

        #adding 0 at missing entries

        dframe3 = dframe3.fillna(0)

        #rearanging dataframe columns
        dframe3 = dframe3[[
            'Employee_name', 'Total_Days_Of_This_Month', 'Present_Days',
            'Absent_Days'
        ]]

        # my "Excel" file, which is an in-memory output file (buffer)
        # for the new workbook
        excel_file = IO()

        xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')

        dframe3.to_excel(xlwriter, 'This_month_Attendance')

        workbook = xlwriter.book
        worksheet = xlwriter.sheets['This_month_Attendance']

        worksheet.set_column('B:C', 25)

        worksheet.set_column('D:E', 25)

        xlwriter.save()
        xlwriter.close()
        # important step, rewind the buffer or when it is read() you'll get nothing
        # but an error message when you try to open your zero length file in Excel
        excel_file.seek(0)

        response = HttpResponse(
            excel_file.read(),
            content_type=
            'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
        )

        # set the file name in the Content-Disposition header
        response[
            'Content-Disposition'] = 'attachment; filename= Custom_Attendance_Month_Salary.xlsx'

    #dframe2.to_csv(path_or_buf=response,index=False,)

    return response
Esempio n. 27
0
def export(request, mode, tender_ids=None):
    if tender_ids is None:
        tender_objs = Tender.objects.all()
        bid_objs = Bid.objects.all()
        volume_objs = Volume.objects.all()
        company_objs = Company.objects.all()
    else:
        list_tender_ids = tender_ids.split("|")
        tender_objs = Tender.objects.filter(pk__in=list_tender_ids).distinct()
        bid_objs = Bid.objects.filter(tender__in=tender_objs).distinct()
        volume_objs = Volume.objects.filter(tender__in=tender_objs).distinct()
        company_objs = Company.objects.filter(bids__in=bid_objs).distinct()

    if mode == "volume":
        tenders = pd.DataFrame(list(tender_objs.values()))
        bids = pd.DataFrame(list(bid_objs.values()))
        volumes = pd.DataFrame(list(volume_objs.values()))
        companies = pd.DataFrame(list(company_objs.values()))

        tenders.rename(columns={"id": "tender_id"},
                       inplace=True)  # 修改列名,为之后merge匹配列做准备,下同
        df = pd.merge(volumes, tenders, how="left",
                      on="tender_id")  # 以volume为base匹配tender

        bids.rename(columns={"id": "winner_id"}, inplace=True)
        df = pd.merge(df, bids, how="left",
                      on="winner_id")  # 以volume+tender为base匹配bid

        companies.rename(columns={"id": "bidder_id"}, inplace=True)
        df = pd.merge(df, companies, how="left",
                      on="bidder_id")  # 以volume+tender+bid为base匹配company

        # df["proc_percentage"] = df.apply(
        #     lambda x: Tender.objects.get(pk=x["tender_id"]).proc_percentage, axis=1
        # )  # 添加列:集采比例
        df["amount_contract"] = df.apply(
            lambda x: Volume.objects.get(pk=x["id"]).amount_contract(),
            axis=1)  # 添加列:实际合同量

        df = df[[
            "vol",
            "target",
            "spec",
            "tender_begin",
            "ceiling_price",
            "region",
            "amount_reported",
            # "proc_percentage",
            "amount_contract",
            "full_name",
            "abbr_name",
            "mnc_or_local",
            "origin",
            "bid_price",
            "original_price",
        ]]

        df.columns = [
            "批次",
            "标的",
            "剂型剂量",
            "标期开始时间",
            "最高有效申报价",
            "地区",
            "区域报量",
            # "集采比例",
            "实际合同量",
            "竞标公司全称",
            "竞标公司简称",
            "是否跨国公司",
            "是否此标的原研",
            "竞标价",
            "集采前价格",
        ]
    elif mode == "tender":
        tenders = pd.DataFrame(list(tender_objs.values()))
        bids = pd.DataFrame(list(bid_objs.values()))
        companies = pd.DataFrame(list(company_objs.values()))

        companies.rename(columns={"id": "bidder_id"}, inplace=True)
        df = pd.merge(bids, companies, how="left",
                      on="bidder_id")  # 以bid为base匹配company

        tenders.rename(columns={"id": "tender_id"}, inplace=True)
        df = pd.merge(df, tenders, how="left",
                      on="tender_id")  # 以bid+company为base匹配tender

        df["is_winner"] = df.apply(
            lambda x: Bid.objects.get(pk=x["id"]).is_winner(),
            axis=1)  # 添加列:是否中标
        df["specs"] = df.apply(
            lambda x: ",".join(
                list(Tender.objects.get(pk=x["tender_id"]).get_specs())),
            axis=1,
        )  # 添加列:剂型剂量
        df["total_std_volume_reported"] = df.apply(
            lambda x: Tender.objects.get(pk=x["tender_id"]).
            total_std_volume_reported(),
            axis=1,
        )  # 添加列:标的官方报量
        df["total_std_volume_contract"] = df.apply(
            lambda x: Tender.objects.get(pk=x["tender_id"]).
            total_std_volume_contract(),
            axis=1,
        )  # 添加列:标的实际合同量
        df["total_value_contract"] = df.apply(
            lambda x: Tender.objects.get(pk=x["tender_id"]).
            total_value_contract(),
            axis=1,
        )  # 添加列:标的实际合同金额
        df["specs"] = df.apply(
            lambda x: ",".join(
                list(Tender.objects.get(pk=x["tender_id"]).get_specs())),
            axis=1,
        )  # 添加列:剂型剂量
        df["regions_win"] = df.apply(
            lambda x: ",".join(list(Bid.objects.get(pk=x["id"]).regions_win())
                               ),
            axis=1,
        )  # 添加中标区域
        df["std_volume_win"] = df.apply(
            lambda x: Bid.objects.get(pk=x["id"]).std_volume_win(),
            axis=1)  # 添加列:竞标者赢得实际合同量
        df["value_win"] = df.apply(
            lambda x: Bid.objects.get(pk=x["id"]).value_win(),
            axis=1)  # 添加列:竞标者赢得实际合同金额
        df["tender_period"] = df.apply(
            lambda x: Tender.objects.get(pk=x["tender_id"]).tender_period,
            axis=1)  # 添加列:标期
        df["proc_percentage"] = df.apply(
            lambda x: Tender.objects.get(pk=x["tender_id"]).proc_percentage,
            axis=1)  # 添加列:带量比例

        df = df[[
            "vol",
            "target",
            "specs",
            "tender_begin",
            "tender_period",
            "ceiling_price",
            "total_std_volume_reported",
            "proc_percentage",
            "total_std_volume_contract",
            "total_value_contract",
            "full_name",
            "abbr_name",
            "mnc_or_local",
            "origin",
            "bid_price",
            "original_price",
            "is_winner",
            "std_volume_win",
            "value_win",
            "regions_win",
        ]]

        df.columns = [
            "批次",
            "标的",
            "标的剂型剂量",
            "标期开始时间",
            "标期",
            "最高有效申报价",
            "标的官方报量",
            "标的带量比例",
            "标的实际合同量",
            "标的实际合同金额",
            "竞标公司全称",
            "竞标公司简称",
            "是否跨国公司",
            "是否此标的原研",
            "竞标价",
            "集采前价格",
            "是否中标",
            "竞标者赢得实际合同量",
            "竞标者赢得实际合同金额",
            "中标区域",
        ]
    excel_file = IO()

    xlwriter = pd.ExcelWriter(excel_file, engine="xlsxwriter")

    df.to_excel(xlwriter, "data", index=False)

    xlwriter.save()
    xlwriter.close()

    excel_file.seek(0)

    # 设置浏览器mime类型
    response = HttpResponse(
        excel_file.read(),
        content_type=
        "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
    )

    # 设置文件名
    now = datetime.datetime.now().strftime(
        "%Y%m%d%H%M%S")  # 当前精确时间不会重复,适合用来命名默认导出文件
    response["Content-Disposition"] = "attachment; filename=" + now + ".xlsx"
    return response
Esempio n. 28
0
    def __init__(self, *components, **kwargs):
        """
        """
        # Check input components
        if len(components) < 2:
            raise \
        '\n\n +++ CliMT.federation: you must give me more than 1 component to federate!\n\n'
        else:
            for component in components:
                assert type(component) is InstanceType, \
                '\n\n +++CliMT.federation: Input item %s is not an instance.\n\n' % str(c)

        # Re-order components: diagnostic, semi-implicit, explicit, implicit
        components = list(components)
        """
        for i in range(len(components)):
            if len(components[i].Prognostic) > 0:
                components.append(components.pop(i))
        for scheme in ['semi-implicit', 'explicit', 'implicit']:
            for i in range(len(components)):
                if components[i].SteppingScheme == scheme:
                    components.append(components.pop(i))
        """
        self.components = components

        # Federation's Required is union of all components' Required;
        # same for Prognostic and Diagnostic
        self.Required = []
        self.Prognostic = []
        self.Diagnostic = []
        for component in components:
            self.Required = list(set(self.Required).union(component.Required))
            self.Prognostic = list(
                set(self.Prognostic).union(component.Prognostic))
            self.Diagnostic = list(
                set(self.Diagnostic).union(component.Diagnostic))

        # Other attributes
        self.Name = 'federation'
        self.Extension = None

        # Set LevType to None if all components are None, else p
        self.LevType = None
        for component in components:
            if component.LevType == 'p': self.LevType = 'p'

        # Initialize self.Fixed (subset of self.Prognostic which will NOT be time-marched)
        if 'Fixed' in kwargs: self.Fixed = kwargs.pop('Fixed')
        else: self.Fixed = []

        # Instantiate I/O
        self.Io = IO(self, **kwargs)

        # Get values from restart file, if available
        if 'RestartFile' in kwargs:
            ParamNames = Parameters().value.keys()
            FieldNames = self.Required
            kwargs = self.Io.readRestart(FieldNames, ParamNames, kwargs)

        # Initialize scalar parameters
        self.Params = Parameters(**kwargs)

        # Initialize State
        self.State = State(self, **kwargs)
        self.Grid = self.State.Grid

        # Set some redundant attributes (mainly for backward compatibility)
        self.nlon = self.Grid['nlon']
        self.nlat = self.Grid['nlat']
        self.nlev = self.Grid['nlev']
        try:
            self.o3 = self.State['o3']
        except:
            pass

        # Check if components enforce axis dimensions, ensure consistency
        for component in self.components:
            for AxisName in ['lev', 'lat', 'lon']:
                exec('n_fed = self.n%s' % AxisName)
                try:
                    exec('n_com = component.Extension.get_n%s()' % AxisName)
                except:
                    n_com = n_fed
                assert n_com == n_fed, \
                '\n\n ++++ CliMT.federation.init: recompile with %i %ss to run this federation\n'\
                % (n_fed,AxisName)

        # Dictionary to hold increments on prognos fields
        self.Inc = {}

        # Adjust components' attributes
        for component in self.components:
            component.Monitoring = False
            component.Io.OutputFreq = self.Io.OutputFreq
            component.Fixed.extend(self.Fixed)
            if component.UpdateFreq == component['dt']:
                component.UpdateFreq = self['dt']
            component.Params = self.Params
            component.Grid = self.State.Grid
            component.State = self.State
            component.Inc = {}
            # insolation component gets special treatment because
            # of need to set orb params in common block (yes, this is ugly)
            try:
                component.setOrbParams(**kwargs)
            except:
                pass
        self.compute(ForcedCompute=True)

        # Create output file
        self.Io.createOutputFile(self.State, self.Params.value)

        # Write out initial state
        if not self.Io.Appending: self.write()

        # Initialize plotting facilities
        self.Plot = Plot()

        # Initialize runtime monitor
        self.Monitor = Monitor(self, **kwargs)

        # Notify user of unused input quantities
        self._checkUnused(kwargs)
Esempio n. 29
0
 def __init__(self, filename):
     self.filename = filename
     self.excel_file = IO()
     self.xlwriter = pd.ExcelWriter(self.excel_file, engine='xlsxwriter')
Esempio n. 30
0
 def makefile(self, *args, **kwargs):
     return IO(self.data)