Пример #1
0
    def export(self, name, file_type):
        file_path = os.path.expanduser("~/")
        file_path = file_path + "/" + name

        if file_type == "csv":
            self.all_crimes.to_csv(file_path)
        elif file_type == "sav":
            temp = self.all_crimes
            temp.columns = [col.replace(" ", "_") for col in temp.columns]
            write_sav(temp, file_path)
Пример #2
0
def export_spss(request, selected_attributes):
	studyid =  request.session['study_select']
	tweet_resource = resources.create_TweetResource(get_attributes_to_export(selected_attributes))
	dataset = tweet_resource.export(Study.objects.get(study_id=studyid).tweets.all())
	obj = json.loads(dataset.json)
	df = pd.json_normalize(obj)
	pyreadstat.write_sav(df, 'Data/SPSS/template.sav')
	response = HttpResponse()
	f = default_storage.open('Data/SPSS/template.sav').read()
	# response.write(f)
	response = HttpResponse(f, content_type='application/x-spss-sav')
	response['Content-Disposition'] = 'attachment; filename="'+ studyid + ' - '  + str(datetime.now()) +'.sav"'
	return response
Пример #3
0
def main():
    """Reads a csv file and outptus an spss .sav file that contains the same data"""
    parser = ArgumentParser(
        description='Converts some ResponseRecords into an SPSS .sav file')
    parser.add_argument('csv_name',
                        nargs=1,
                        type=str,
                        help='The name of the input file')
    parser.add_argument('output_name',
                        nargs=1,
                        type=str,
                        help='The name of the output file')

    args = parser.parse_args()
    csv_name = args.csv_name[0]
    output_name = args.output_name[0]

    records_df = pandas.read_csv(csv_name, engine='c')
    records_df.columns = [
        column.replace(' ', '_') for column in records_df.columns
    ]
    pyreadstat.write_sav(records_df, output_name)
Пример #4
0
def from_dataframe(df: DataFrame,
                   target: Optional[Union['PathLike[Any]', BytesIO]] = None,
                   metadata: Optional[Metadata] = None,
                   compress: bool = False):
    """Create an SPSS dataset from a `Pandas <https://pandas.pydata.org/>`_
    :class:`DataFrame <pandas:DataFrame>`.

    :param df: The :class:`DataFrame` to serialize to an SPSS dataset.
    :type df: :class:`pandas.DataFrame <pandas:DataFrame>`

    :param target: The target to which the SPSS dataset should be written. Accepts either
      a filename/path, a :class:`BytesIO <python:io.BytesIO>` object, or
      :obj:`None <python:None>`. If :obj:`None <python:None>` will return a
      :class:`BytesIO <python:io.BytesIO>` object containing the SPSS dataset. Defaults to
      :obj:`None <python:None>`.
    :type target: Path-like / :class:`BytesIO <python:io.BytesIO>` /
      :obj:`None <python:None>`

    :param metadata: The :class:`Metadata` associated with the dataset. If
      :obj:`None <python:None>`, will attempt to derive it form ``df``. Defaults to
      :obj:`None <python:None>`.
    :type metadata: :class:`Metadata` / :obj:`None <python:None>`

    :param compress: If ``True``, will return data in the compressed ZSAV format. If
      ``False``, will return data in the standards SAV format. Defaults to ``False``.
    :type compress: :class:`bool <python:bool>`

    :returns: A :class:`BytesIO <python:io.BytesIO>` object containing the SPSS data if
      ``target`` is :obj:`None <python:None>` or not a filename, otherwise
      :obj:`None <python:None>`
    :rtype: :class:`BytesIO <python:io.BytesIO>` or :obj:`None <python:None>`

    :raises ValueError: if ``df`` is not a :class:`pandas.DataFrame <pandas:DataFrame>`
    :raises ValueError: if ``metadata`` is not a :class:`Metadata`

    """
    if not checkers.is_type(df, 'DataFrame'):
        raise ValueError(
            f'df must be a pandas.DataFrame. Was: {df.__class__.__name__}')
    if metadata and not checkers.is_type(
            metadata, ('Metadata', 'metadata_container', 'dict')):
        raise ValueError(
            f'metadata must be a Metadata instance or compatible object. '
            f'Was: {metadata.__class__.__name__}')
    elif metadata and checkers.is_type(metadata, 'metadata_container'):
        metadata = Metadata.from_pyreadstat(metadata)
    elif metadata and checkers.is_type(metadata, 'dict'):
        metadata = Metadata.from_dict(metadata)

    is_file = False
    if target and checkers.is_pathlike(target):
        is_file = True
    elif target:
        target = validators.bytesIO(target, allow_empty=False)

    if metadata:
        as_pyreadstat = metadata.to_pyreadstat()
    else:
        as_pyreadstat = None

    if target and is_file:
        with open(target, 'wb') as target_file:
            if as_pyreadstat:
                pyreadstat.write_sav(
                    df=df,
                    dst_path=target_file,
                    file_label=as_pyreadstat.file_label,
                    column_labels=as_pyreadstat.column_labels,
                    compress=compress,
                    note=as_pyreadstat.notes,
                    variable_value_labels=as_pyreadstat.variable_value_labels,
                    missing_ranges=as_pyreadstat.missing_ranges,
                    variable_display_width=as_pyreadstat.
                    variable_display_width,
                    variable_measure=as_pyreadstat.variable_measure)
            else:
                pyreadstat.write_sav(df=df,
                                     dst_path=target_file,
                                     compress=compress)

    else:
        with tempfile.NamedTemporaryFile() as temp_file:
            if as_pyreadstat:
                pyreadstat.write_sav(
                    df=df,
                    dst_path=temp_file.name,
                    file_label=as_pyreadstat.file_label,
                    column_labels=as_pyreadstat.column_labels,
                    compress=compress,
                    note=as_pyreadstat.notes,
                    variable_value_labels=as_pyreadstat.variable_value_labels,
                    missing_ranges=as_pyreadstat.missing_ranges,
                    variable_display_width=as_pyreadstat.
                    variable_display_width,
                    variable_measure=as_pyreadstat.variable_measure)
            else:
                pyreadstat.write_sav(df=df,
                                     dst_path=temp_file.name,
                                     compress=compress)

            if target:
                target.write(temp_file.read())
            else:
                target = BytesIO(temp_file.read())

            return target
Пример #5
0
# input

df96, meta96 = pyreadstat.read_sav('./data/1996/tscs1996q2.sav',
                                   apply_value_formats=False,
                                   formats_as_category=False
                                   )

















# output
pyreadstat.write_sav(aa,
                     "data1xxxx.sav",
                     column_labels=var_lt, # 標籤
                     variable_value_labels=val_dict,
                     compress=True
                     )
Пример #6
0
#Move the file into checks against our master dictornary
inputFILE_3 = masterschool_compare(inputFILE_2, logVECTOR)

#Last dataframe before write out be sure it is the one you want
finalFILE = inputFILE_3

#Make output file name
log.info('Writing File........')
finalFILEstr = os.getcwd() + '\\dQ_' + inputFILE

print(finalFILEstr)

#Output new modified file
if ftype == ".csv":
    finalFILE.to_csv(finalFILEstr)
elif ftype == ".sav":
    pyreadstat.write_sav(finalFILE, finalFILEstr)
elif ftype == ".xlsx":
    writer = pd.ExcelWriter(finalFILEstr, engine='xlsxwriter')
    finalFILE.to_excel(writer, sheet_name='Sheet1')
    writer.save()

#FUTURE GOALS
#Add student data read and prcoessing
#Student id handling
#add on minority or other demographic issues (?)

#For students, compare DOB against grade (what if students fail)
#For schools, compare against grades applicable

log.info('Procedure Complete')
Пример #7
0
#####

# MAIN ROUTINE
if compile_trimmed_files == 'yes':
    print('Compiling files:')
    dfs = []
    files = [f for f in glob.glob("*_trimmed.csv")]
    files.sort()
    if len(files) == 0:
        print('ERROR: No trimmed files to compile...')
    else:
        for file in files:
            print('    Adding file', file, '...')
            ID = file.rstrip('_trimmed.csv')
            df_name = ID + '_df'
            df_name = pd.read_csv(file)
            df_name['ID'] = ID
            dfs.append(df_name)
        compiled_df = pd.concat(dfs, axis=0, ignore_index=True, sort=False)
        cols = compiled_df.columns.tolist()
        cols = cols[-1:] + cols[:-1]
        compiled_df = compiled_df[cols]
        compiled_df.columns = compiled_df.columns.str.replace(' ', '_')
        compiled_df.columns = compiled_df.columns.str.replace('-', '_')
        compiled_df.columns = compiled_df.columns.str.replace('/', '_')
        print('Writing SPSS output...')
        compiled_df = compiled_df.astype(str)
        pyreadstat.write_sav(compiled_df, compiled_file)

print('SCRIPT RUN COMPLETE')
Пример #8
0
        print("it's empty after")
        emptyStates.append(fullFileName[-6:-4])
        os.remove(fullFileName)  # DELETE FILE
        continue

    # Reset indices - Important
    df.reset_index(level=0, drop=True, inplace=True)
    print("it worked")

    # Append each to a master dataframe
    if populated == 1:
        dfMaster = dfMaster.append(df, ignore_index=True)
    if populated == 0:
        dfMaster = df
        populated = 1

    # Write the new SPSS changes back to the same file
    pyreadstat.write_sav(df, fullFileName)

# Write master df to a final SPSS file
pyreadstat.write_sav(dfMaster, "./finalFilteredSPSS.sav")

# Prints empty states
# print("The states that did not respond yet are: ", emptyStates)

# Write master df to a json file
# json_file = dfMaster.to_json()
# print(json_file)
# with open("./finalFilteredSPSSjson.txt", 'w') as outfile:
#     json.dump(json_file, outfile)