def main(file_path, year, output_path):
    pre_check()
    output_path = os.path.join(output_path, str(year))
    
    if not os.path.exists(output_path): os.makedirs(output_path)
    print "Output Path=", output_path
    # d = pd.HDFStore(os.path.abspath(os.path.join(output_path,'sc_data.h5')))
    print; print '''STEP 1: \nImport file to pandas dataframe'''
    
    hdf_filepath = output_path + "/store_df.h5"

    print "LOOKING for HDF file at location ", hdf_filepath

    if os.path.exists(hdf_filepath):
        print "READING HDF"
        df = pd.read_hdf(hdf_filepath, 'table')
    else:
        print "No HDF file. Need to create DF"
        df = to_df(file_path, False)
        print "SAVING HDF to", hdf_filepath
        df.to_hdf(hdf_filepath, 'table')

    print; print "Step 2: aggregate"

    from _aggregate import agg_rules


    pk_lookup = {"y": "year", "d": "d_id", "b": "bra_id", "c": "course_sc_id", "s": "school_id"}
    
    tables_list = ["yb", "ybd", "yd", "ybs", "yc", "ybc", "ybcd"]

    for table_name in tables_list:
        pk = [pk_lookup[l] for l in table_name]
        print "working on", table_name
        
        dems = ['gender', 'color', 'loc', 'school_type'] if "d" in table_name else ['']
        
        for dem in dems:
            print '''\nSTEP 2: Aggregate {0}'''.format(dem)
            tbl = aggregate(table_name, pk, df, dem)
            
            if "c" in table_name:
                pk2 = [x for x in pk]
                pk2[pk2.index("course_sc_id")] = df.course_sc_id.str.slice(0, 2)
                tbl_course2 = aggregate(table_name, pk2, df, dem, course_flag=True)

                tbl = pd.concat([tbl, tbl_course2])
            
            tbl = add_column_length(table_name, tbl)
            # tbl.rename(columns={"student_id": "students"}, inplace=True)   
            file_name = table_name + "_" + dem + ".tsv.bz2" if "d" in table_name else table_name + ".tsv.bz2"
            print '''Save {0} to output path'''.format(file_name)
            new_file_path = os.path.abspath(os.path.join(output_path, file_name))
            tbl.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)
예제 #2
0
def main(file_path, year, output_path, prev_path, prev5_path):
    print "\nHEDU YEAR: {0}\n".format(year)
    pre_check()
    output_path = os.path.join(output_path, str(year))

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    hdf_store = pd.HDFStore(os.path.abspath(os.path.join(output_path, 'hedu_data.h5')))

    print '''\nImport file to pandas dataframe'''

    if "hedu_df" in hdf_store:
        hedu_df = hdf_store['hedu_df']
    else:
        hedu_df = to_df(file_path, year)
        try:
            hdf_store['hedu_df'] = hedu_df
        except OverflowError:
            print "WARNING: Unable to save dataframe, Overflow Error."
            hdf_store.close()
            os.remove(os.path.join(output_path, 'hedu_data.h5'))

    tables_list = ["yb", "yu", "yc", "ybc", "ybu", "yuc", "ybuc"]
    index_lookup = {"y": "year", "b": "bra_id", "c": "course_hedu_id", "u": "university_id"}

    for table_name in tables_list:
        indexes = [index_lookup[l] for l in table_name]

        print '''\nAggregating {0}'''.format(table_name)
        aggregated_df = aggregate(indexes, hedu_df)

        print '''Adding length column to {0}'''.format(table_name)
        aggregated_df = add_column_length(table_name, aggregated_df)

        print '''Renaming {0} columns'''.format(table_name)
        aggregated_df.rename(columns={"student_id": "students"}, inplace=True)
        if 'u' not in table_name:
            aggregated_df.rename(columns={"university_id": "num_universities"}, inplace=True)

        if prev_path:
            print '''\nCalculating {0} 1 year growth'''.format(table_name)
            previous_df = open_prev_df(prev_path, table_name, year, indexes)
            aggregated_df = calc_growth(aggregated_df, previous_df, ['enrolled', 'graduates'])

        if prev5_path:
            print '''\nCalculating {0} 5 year growth'''.format(table_name)
            previous_df = open_prev_df(prev5_path, table_name, year, indexes)
            aggregated_df = calc_growth(aggregated_df, previous_df, ['enrolled', 'graduates'], 5)

        if table_name == "ybuc":
            print '''Calculating RCAs'''
            ybc = calc_rca(aggregated_df, year)
            new_file_path = os.path.abspath(os.path.join(output_path, "ybc_rca.tsv.bz2"))
            ybc.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)

        file_name = table_name + ".tsv.bz2"
        print '''Save {0} to output path'''.format(file_name)
        new_file_path = os.path.abspath(os.path.join(output_path, file_name))
        aggregated_df.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)
def main(file_path, trade_flow, year, eci_file_path, pci_file_path,
         output_path, prev_path, prev5_path):
    start = time.time()
    step = 0

    depths = {"bra": [1, 3, 5, 7, 8, 9], "hs": [2, 6], "wld": [2, 5]}

    step += 1
    print '''\nSTEP {0}: \nImport file to pandas dataframe'''.format(step)
    secex_df = to_df(file_path, False)
    secex_df = secex_df.head(1000)
    sys.exit()

    step += 1
    print '''\nSTEP {0}: \nAggregate'''.format(step)
    ybpw = aggregate(secex_df)

    step += 1
    print '''\nSTEP {0}: \nShard'''.format(step)
    [yb, ybp, ybw, yp, ypw, yw] = shard(ybpw, depths)

    if trade_flow == "export":
        step += 1
        print '''\nSTEP {0}: \nCalculate PCI & ECI'''.format(step)
        [yp, yw] = pci_wld_eci(eci_file_path, pci_file_path, yp, yw)

        step += 1
        print '''\nSTEP {0}: \nCalculate domestic ECI'''.format(step)
        yb = domestic_eci(yp, yb, ybp, depths)

    step += 1
    print '''\nSTEP {0}: \nCalculate diversity'''.format(step)
    yb = calc_diversity(ybp, yb, "bra_id", "hs_id", depths)
    yb = calc_diversity(ybw, yb, "bra_id", "wld_id", depths)
    yp = calc_diversity(ybp, yp, "hs_id", "bra_id", depths)
    yp = calc_diversity(ypw, yp, "hs_id", "wld_id", depths)
    yw = calc_diversity(ybw, yw, "wld_id", "bra_id", depths)
    yw = calc_diversity(ypw, yw, "wld_id", "hs_id", depths)

    if trade_flow == "export":
        step += 1
        print '''\nSTEP {0}: \nCalculate Brazilian RCA'''.format(step)
        yp = brazil_rca(yp, year)

    if trade_flow == "export":
        step += 1
        print '''\nSTEP {0}: \nCalculate RCA, diversity and opp_gain aka RDO'''.format(
            step)
        ybp = rdo(ybp, yp, year, depths)
    if trade_flow == "import":
        step += 1
        print '''\nSTEP {0}: \nCalculate RCD calculation'''.format(step)
        ybp = rcd(ybp, yp, year, depths)

    # print ybp.head(20)
    # sys.exit()

    tables = {
        "yb": yb,
        "yp": yp,
        "yw": yw,
        "ybp": ybp,
        "ybpw": ybpw,
        "ybw": ybw,
        "ypw": ypw
    }

    if prev_path:
        step += 1
        print '''\nSTEP {0}: \nCalculate 1 year growth'''.format(step)
        if prev5_path:
            step += 1
            print '''\nSTEP {0}: \nCalculate 5 year growth'''.format(step)
        for t_name, t in tables.items():
            prev_file = os.path.join(prev_path, "{0}.tsv.bz2".format(t_name))
            t_prev = to_df(prev_file, t_name)
            t_prev = t_prev.reset_index(level="year")
            t_prev["year"] = int(year)
            t_prev = t_prev.set_index("year", append=True)
            t_prev = t_prev.reorder_levels(["year"] +
                                           list(t_prev.index.names)[:-1])

            t = calc_growth(t, t_prev)

            if prev5_path:
                prev_file = os.path.join(prev5_path,
                                         "{0}.tsv.bz2".format(t_name))
                t_prev = to_df(prev_file, t_name)
                t_prev = t_prev.reset_index(level="year")
                t_prev["year"] = int(year)
                t_prev = t_prev.set_index("year", append=True)
                t_prev = t_prev.reorder_levels(["year"] +
                                               list(t_prev.index.names)[:-1])

                t = calc_growth(t, t_prev, 5)

    print "computing column lengths"
    for table_name, table_data in tables.items():
        tables[table_name] = add_column_length(table_name, table_data)

    print '''\nFINAL STEP: \nSave files to output path'''
    for t_name, t in tables.items():
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        new_file_path = os.path.abspath(
            os.path.join(output_path, "{0}.tsv.bz2".format(t_name)))
        t.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)

    total_run_time = (time.time() - start) / 60
    print
    print
    print "Total runtime: {0} minutes".format(int(total_run_time))
    print
    print
def main(file_path, year, output_path, prev_path, prev5_path):
    print "\nHEDU YEAR: {0}\n".format(year)
    pre_check()
    output_path = os.path.join(output_path, str(year))

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    hdf_store = pd.HDFStore(
        os.path.abspath(os.path.join(output_path, 'hedu_data.h5')))

    print '''\nImport file to pandas dataframe'''

    if "hedu_df" in hdf_store:
        hedu_df = hdf_store['hedu_df']
    else:
        hedu_df = to_df(file_path, year)
        try:
            hdf_store['hedu_df'] = hedu_df
        except OverflowError:
            print "WARNING: Unable to save dataframe, Overflow Error."
            hdf_store.close()
            os.remove(os.path.join(output_path, 'hedu_data.h5'))

    tables_list = ["yb", "yu", "yc", "ybc", "ybu", "yuc", "ybuc"]
    index_lookup = {
        "y": "year",
        "b": "bra_id",
        "c": "course_hedu_id",
        "u": "university_id"
    }

    for table_name in tables_list:
        indexes = [index_lookup[l] for l in table_name]

        print '''\nAggregating {0}'''.format(table_name)
        aggregated_df = aggregate(indexes, hedu_df)

        print '''Adding length column to {0}'''.format(table_name)
        aggregated_df = add_column_length(table_name, aggregated_df)

        print '''Renaming {0} columns'''.format(table_name)
        aggregated_df.rename(columns={"student_id": "students"}, inplace=True)
        if 'u' not in table_name:
            aggregated_df.rename(columns={"university_id": "num_universities"},
                                 inplace=True)

        if prev_path:
            print '''\nCalculating {0} 1 year growth'''.format(table_name)
            previous_df = open_prev_df(prev_path, table_name, year, indexes)
            aggregated_df = calc_growth(aggregated_df, previous_df,
                                        ['enrolled', 'graduates'])

        if prev5_path:
            print '''\nCalculating {0} 5 year growth'''.format(table_name)
            previous_df = open_prev_df(prev5_path, table_name, year, indexes)
            aggregated_df = calc_growth(aggregated_df, previous_df,
                                        ['enrolled', 'graduates'], 5)

        if table_name == "ybuc":
            print '''Calculating RCAs'''
            ybc = calc_rca(aggregated_df, year)
            new_file_path = os.path.abspath(
                os.path.join(output_path, "ybc_rca.tsv.bz2"))
            ybc.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)

        file_name = table_name + ".tsv.bz2"
        print '''Save {0} to output path'''.format(file_name)
        new_file_path = os.path.abspath(os.path.join(output_path, file_name))
        aggregated_df.to_csv(bz2.BZ2File(new_file_path, 'wb'),
                             sep="\t",
                             index=True)
예제 #5
0
def main(export_file_path, import_file_path, year, eci_file_path,
         pci_file_path, ypw_file_path, output_path, prev_path, prev5_path):
    output_path = os.path.join(output_path, str(year))
    start = time.time()
    step = 0

    depths = {"bra": [1, 3, 5, 7, 9], "hs": [2, 6], "wld": [2, 5]}

    if not os.path.exists(output_path):
        os.makedirs(output_path)
    d = pd.HDFStore(os.path.join(output_path, 'secex.h5'))
    # if "ymb" in d:
    if "ymbp" in d:
        tables = {}
        tables["ymb"] = d["ymb"]
        tables["ymp"] = d["ymp"]
        tables["ymw"] = d["ymw"]
        tables["ymbp"] = d["ymbp"]
        tables["ymbw"] = d["ymbw"]
        tables["ympw"] = d["ympw"]
        tables["ymbpw"] = d["ymbpw"]
    else:
        step += 1
        print '''\nSTEP {0}: \nImport file to pandas dataframe'''.format(step)
        secex_exports = to_df(export_file_path, False)
        secex_imports = to_df(import_file_path, False)

        step += 1
        print '''\nSTEP {0}: \nMerge imports and exports'''.format(step)
        secex_df = merge(secex_exports, secex_imports)

        step += 1
        print '''\nSTEP {0}: \nAggregate'''.format(step)
        ymbpw = aggregate(secex_df)

        step += 1
        print '''\nSTEP {0}: \nShard'''.format(step)
        [ymb, ymbp, ymbw, ymp, ympw, ymw] = shard(ymbpw)

        step += 1
        print '''\nSTEP {0}: \nCalculate PCI & ECI'''.format(step)
        [ymp, ymw] = pci_wld_eci(eci_file_path, pci_file_path, ymp, ymw, year)

        step += 1
        print '''\nSTEP {0}: \nCalculate diversity'''.format(step)
        ymb = calc_diversity(ymbp, ymb, "bra_id", "hs_id")
        ymb = calc_diversity(ymbw, ymb, "bra_id", "wld_id")
        ymp = calc_diversity(ymbp, ymp, "hs_id", "bra_id")
        ymp = calc_diversity(ympw, ymp, "hs_id", "wld_id")
        ymw = calc_diversity(ymbw, ymw, "wld_id", "bra_id")
        ymw = calc_diversity(ympw, ymw, "wld_id", "hs_id")

        step += 1
        print '''\nSTEP {0}: \nCalculate domestic ECI'''.format(step)
        ymb = domestic_eci(ymp, ymb, ymbp, depths["bra"])

        step += 1
        print '''\nSTEP {0}: \nCalculate domestic ECI'''.format(step)
        ymb = domestic_eci(ymp, ymb, ymbp, depths["bra"])

        step += 1
        print '''\nSTEP {0}: \nCalculate Brazilian RCA'''.format(step)
        ymp = brazil_rca(ymp, ypw_file_path, year)

        step += 1
        print '''\nSTEP {0}: \nCalculate RCA, diversity and opp_gain aka RDO'''.format(
            step)
        ymbp = rdo(ymbp, ymp, year, depths["bra"], ypw_file_path)

        tables = {
            "ymb": ymb,
            "ymp": ymp,
            "ymw": ymw,
            "ymbp": ymbp,
            "ymbpw": ymbpw,
            "ymbw": ymbw,
            "ympw": ympw
        }
        for tbln, tbl in tables.items():
            d[tbln] = tbl

    if prev_path:
        step += 1
        print '''\nSTEP {0}: \nCalculate 1 year growth'''.format(step)
        if prev5_path:
            step += 1
            print '''\nSTEP {0}: \nCalculate 5 year growth'''.format(step)
        for t_name, t in tables.items():
            print t_name
            prev_file = os.path.join(prev_path, "{0}.tsv.bz2".format(t_name))
            t_prev = to_df(prev_file, t_name)
            t_prev = t_prev.reset_index(level="year")
            t_prev["year"] = int(year)
            t_prev = t_prev.set_index("year", append=True)
            t_prev = t_prev.reorder_levels(["year"] +
                                           list(t_prev.index.names)[:-1])

            t = calc_growth(t, t_prev)

            if prev5_path:
                prev_file = os.path.join(prev5_path,
                                         "{0}.tsv.bz2".format(t_name))
                t_prev = to_df(prev_file, t_name)
                t_prev = t_prev.reset_index(level="year")
                t_prev["year"] = int(year)
                t_prev = t_prev.set_index("year", append=True)
                t_prev = t_prev.reorder_levels(["year"] +
                                               list(t_prev.index.names)[:-1])

                t = calc_growth(t, t_prev, 5)

    print "computing column lengths"
    for table_name, table_data in tables.items():
        tables[table_name] = add_column_length(table_name, table_data)

    print '''\nFINAL STEP: \nSave files to output path'''
    for t_name, t in tables.items():
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        new_file_path = os.path.abspath(
            os.path.join(output_path, "{0}.tsv.bz2".format(t_name)))
        t.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)

    total_run_time = (time.time() - start) / 60
    print
    print
    print "Total runtime: {0} minutes".format(int(total_run_time))
    print
    print
예제 #6
0
def main(file_path, year, output_path, prev_path, prev5_path, requireds_only):

    print; print "~~~**** YEAR: {0} ****~~~".format(year); print;
    start = time.time()
    step = 0
    # regions state, meso, micro, planning region, munic
    depths = {
        "bra": [1, 3, 5, 7, 8, 9],
        "cnae": [1, 3, 6],
        "cbo": [1, 4],
        "demo": [1, 4]
    }

    if file_path:
        if not os.path.exists(output_path): os.makedirs(output_path)
        d = pd.HDFStore(os.path.join(output_path, 'rais_df_raw.h5'))
        if "rais_df" in d:
            rais_df = d['rais_df']
        else:
            step+=1; print; print '''STEP {0}: \nImport file to pandas dataframe'''.format(step)
            rais_df = to_df(file_path, False)
            try:
                d['rais_df'] = rais_df
                # d.close()
            except OverflowError:
                print "WARNING: Unable to save dataframe, Overflow Error."
                d.close()
                os.remove(os.path.join(output_path, 'rais_df_raw.h5'))
        # rais_df = to_df(file_path, False)

        if "yb" in d and not requireds_only:
            tables = {"yb":d["yb"], "yo":d["yo"], "yi":d["yi"], "ybi":d["ybi"], "ybo":d["ybo"], "yio":d["yio"], "ybio":d["ybio"]}
        else:
            step+=1; print; print '''STEP {0}: \nAggregate'''.format(step)
            tables = aggregate(rais_df, depths)

            step+=1; print; print 'STEP {0}: \nImportance'.format(step)
            tables["yio"] = importance(tables["ybio"], tables["ybi"], tables["yio"], tables["yo"], year, depths)

            try:
                d["yb"] = tables["yb"]; d["yo"] =  tables["yo"]; d["yi"] =  tables["yi"]; d["ybi"] = tables["ybi"]; d["ybo"] = tables["ybo"]; d["yio"] = tables["yio"]; d["ybio"] = tables["ybio"]
                d.close()
            except OverflowError:
                print "WARNING: Unable to save dataframe, Overflow Error."
                d.close()
                os.remove(os.path.join(output_path, 'rais_df_raw.h5'))

        step+=1; print; print 'STEP {0}: \nRequired'.format(step)
        [tables["ybi"], tables["ybio"]] = required(tables["ybio"], tables["ybi"], tables["yi"], year, depths, output_path)

        # print tables["ybi"].head()
        # sys.exit()

        step+=1; print; print 'STEP {0}: \nDiversity'.format(step)
        tables["yb"] = calc_diversity(tables["ybi"], tables["yb"], "bra_id", "cnae_id", year, depths)
        tables["yb"] = calc_diversity(tables["ybo"], tables["yb"], "bra_id", "cbo_id", year, depths)
        tables["yi"] = calc_diversity(tables["ybi"], tables["yi"], "cnae_id", "bra_id", year, depths)
        tables["yi"] = calc_diversity(tables["yio"], tables["yi"], "cnae_id", "cbo_id", year, depths)
        tables["yo"] = calc_diversity(tables["ybo"], tables["yo"], "cbo_id", "bra_id", year, depths)
        tables["yo"] = calc_diversity(tables["yio"], tables["yo"], "cbo_id", "cnae_id", year, depths)

        step+=1; print; print 'STEP {0}: \nCalculate RCA, diversity and opportunity gain aka RDO'.format(step)
        tables["ybi"] = rdo(tables["ybi"], tables["yi"], year, depths)

        for table_name, table_data in tables.items():
            table_data = add_column_length(table_name, table_data)

        print; print '''FINAL STEP: \nSave files to output path'''
        for t_name, t in tables.items():
            new_file_path = os.path.abspath(os.path.join(output_path, "{0}.tsv.bz2".format(t_name)))
            t.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True, float_format="%.3f")

    if prev_path:
        print; print '''Calculating growth:'''
        for current_year_file_path in findFiles(output_path, '*.tsv.bz2'):
            if "growth" in current_year_file_path: continue
            current_year_file_name = os.path.basename(current_year_file_path)
            prev_year_file_path = os.path.join(prev_path, current_year_file_name)
            prev5_year_file_path = None
            if prev5_path:
                prev5_year_file_path = os.path.join(prev5_path, current_year_file_name)
            if not os.path.exists(prev_year_file_path):
                print "Unable to find", current_year_file_name, "for previous year."
                continue
            tbl_name, tbl_w_growth = calc_growth(year, current_year_file_path, prev_year_file_path, prev5_year_file_path)
            print tbl_name
            new_file_path = os.path.abspath(os.path.join(output_path, "{0}_growth.tsv.bz2".format(tbl_name)))
            tbl_w_growth.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True, float_format="%.3f")
            # os.remove(current_year_file_path)


    print("--- %s minutes ---" % str((time.time() - start)/60))
예제 #7
0
def main(file_path, trade_flow, year, eci_file_path, pci_file_path, output_path, prev_path, prev5_path):
    start = time.time()
    step = 0
    
    depths = {
        "bra": [1, 3, 5, 7, 8, 9],
        "hs": [2, 6],
        "wld": [2, 5]
    }
    
    step += 1; print '''\nSTEP {0}: \nImport file to pandas dataframe'''.format(step)
    secex_df = to_df(file_path, False)
    secex_df = secex_df.head(1000)
    sys.exit()

    step += 1; print '''\nSTEP {0}: \nAggregate'''.format(step)
    ybpw = aggregate(secex_df)

    step += 1; print '''\nSTEP {0}: \nShard'''.format(step)
    [yb, ybp, ybw, yp, ypw, yw] = shard(ybpw, depths)

    if trade_flow == "export":
        step += 1; print '''\nSTEP {0}: \nCalculate PCI & ECI'''.format(step)
        [yp, yw] = pci_wld_eci(eci_file_path, pci_file_path, yp, yw)

        step += 1; print '''\nSTEP {0}: \nCalculate domestic ECI'''.format(step)
        yb = domestic_eci(yp, yb, ybp, depths)

    step += 1; print '''\nSTEP {0}: \nCalculate diversity'''.format(step)
    yb = calc_diversity(ybp, yb, "bra_id", "hs_id", depths)
    yb = calc_diversity(ybw, yb, "bra_id", "wld_id", depths)
    yp = calc_diversity(ybp, yp, "hs_id", "bra_id", depths)
    yp = calc_diversity(ypw, yp, "hs_id", "wld_id", depths)
    yw = calc_diversity(ybw, yw, "wld_id", "bra_id", depths)
    yw = calc_diversity(ypw, yw, "wld_id", "hs_id", depths)

    if trade_flow == "export":
        step += 1; print '''\nSTEP {0}: \nCalculate Brazilian RCA'''.format(step)
        yp = brazil_rca(yp, year)
    
    if trade_flow == "export":
        step += 1; print '''\nSTEP {0}: \nCalculate RCA, diversity and opp_gain aka RDO'''.format(step)
        ybp = rdo(ybp, yp, year, depths)
    if trade_flow == "import":
        step += 1; print '''\nSTEP {0}: \nCalculate RCD calculation'''.format(step)
        ybp = rcd(ybp, yp, year, depths)
    
    # print ybp.head(20)
    # sys.exit()
    
    tables = {"yb": yb, "yp": yp, "yw": yw, "ybp": ybp, "ybpw": ybpw, "ybw": ybw, "ypw": ypw}
    
    if prev_path:
        step += 1; print '''\nSTEP {0}: \nCalculate 1 year growth'''.format(step)
        if prev5_path:
            step += 1; print '''\nSTEP {0}: \nCalculate 5 year growth'''.format(step)
        for t_name, t in tables.items():
            prev_file = os.path.join(prev_path, "{0}.tsv.bz2".format(t_name))
            t_prev = to_df(prev_file, t_name)
            t_prev = t_prev.reset_index(level="year")
            t_prev["year"] = int(year)
            t_prev = t_prev.set_index("year", append=True)
            t_prev = t_prev.reorder_levels(["year"] + list(t_prev.index.names)[:-1])
            
            t = calc_growth(t, t_prev)
            
            if prev5_path:
                prev_file = os.path.join(prev5_path, "{0}.tsv.bz2".format(t_name))
                t_prev = to_df(prev_file, t_name)
                t_prev = t_prev.reset_index(level="year")
                t_prev["year"] = int(year)
                t_prev = t_prev.set_index("year", append=True)
                t_prev = t_prev.reorder_levels(["year"] + list(t_prev.index.names)[:-1])
                
                t = calc_growth(t, t_prev, 5)

    print "computing column lengths"
    for table_name, table_data in tables.items():
        tables[table_name] = add_column_length(table_name, table_data)

    print '''\nFINAL STEP: \nSave files to output path'''
    for t_name, t in tables.items():
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        new_file_path = os.path.abspath(os.path.join(output_path, "{0}.tsv.bz2".format(t_name)))
        t.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)
    
    total_run_time = (time.time() - start) / 60
    print; print;
    print "Total runtime: {0} minutes".format(int(total_run_time))
    print; print;
def main(file_path, year, output_path, prev_path, prev5_path, requireds_only):

    print
    print "~~~**** YEAR: {0} ****~~~".format(year)
    print
    start = time.time()
    step = 0
    # regions state, meso, micro, planning region, munic
    depths = {
        "bra": [1, 3, 5, 7, 8, 9],
        "cnae": [1, 3, 6],
        "cbo": [1, 4],
        "demo": [1, 4]
    }

    if file_path:
        if not os.path.exists(output_path): os.makedirs(output_path)
        d = pd.HDFStore(os.path.join(output_path, 'rais_df_raw.h5'))
        if "rais_df" in d:
            rais_df = d['rais_df']
        else:
            step += 1
            print
            print '''STEP {0}: \nImport file to pandas dataframe'''.format(
                step)
            rais_df = to_df(file_path, False)
            try:
                d['rais_df'] = rais_df
                # d.close()
            except OverflowError:
                print "WARNING: Unable to save dataframe, Overflow Error."
                d.close()
                os.remove(os.path.join(output_path, 'rais_df_raw.h5'))
        # rais_df = to_df(file_path, False)

        if "yb" in d and not requireds_only:
            tables = {
                "yb": d["yb"],
                "yo": d["yo"],
                "yi": d["yi"],
                "ybi": d["ybi"],
                "ybo": d["ybo"],
                "yio": d["yio"],
                "ybio": d["ybio"]
            }
        else:
            step += 1
            print
            print '''STEP {0}: \nAggregate'''.format(step)
            tables = aggregate(rais_df, depths)

            step += 1
            print
            print 'STEP {0}: \nImportance'.format(step)
            tables["yio"] = importance(tables["ybio"], tables["ybi"],
                                       tables["yio"], tables["yo"], year,
                                       depths)

            try:
                d["yb"] = tables["yb"]
                d["yo"] = tables["yo"]
                d["yi"] = tables["yi"]
                d["ybi"] = tables["ybi"]
                d["ybo"] = tables["ybo"]
                d["yio"] = tables["yio"]
                d["ybio"] = tables["ybio"]
                d.close()
            except OverflowError:
                print "WARNING: Unable to save dataframe, Overflow Error."
                d.close()
                os.remove(os.path.join(output_path, 'rais_df_raw.h5'))

        step += 1
        print
        print 'STEP {0}: \nRequired'.format(step)
        [tables["ybi"],
         tables["ybio"]] = required(tables["ybio"], tables["ybi"],
                                    tables["yi"], year, depths, output_path)

        # print tables["ybi"].head()
        # sys.exit()

        step += 1
        print
        print 'STEP {0}: \nDiversity'.format(step)
        tables["yb"] = calc_diversity(tables["ybi"], tables["yb"], "bra_id",
                                      "cnae_id", year, depths)
        tables["yb"] = calc_diversity(tables["ybo"], tables["yb"], "bra_id",
                                      "cbo_id", year, depths)
        tables["yi"] = calc_diversity(tables["ybi"], tables["yi"], "cnae_id",
                                      "bra_id", year, depths)
        tables["yi"] = calc_diversity(tables["yio"], tables["yi"], "cnae_id",
                                      "cbo_id", year, depths)
        tables["yo"] = calc_diversity(tables["ybo"], tables["yo"], "cbo_id",
                                      "bra_id", year, depths)
        tables["yo"] = calc_diversity(tables["yio"], tables["yo"], "cbo_id",
                                      "cnae_id", year, depths)

        step += 1
        print
        print 'STEP {0}: \nCalculate RCA, diversity and opportunity gain aka RDO'.format(
            step)
        tables["ybi"] = rdo(tables["ybi"], tables["yi"], year, depths)

        for table_name, table_data in tables.items():
            table_data = add_column_length(table_name, table_data)

        print
        print '''FINAL STEP: \nSave files to output path'''
        for t_name, t in tables.items():
            new_file_path = os.path.abspath(
                os.path.join(output_path, "{0}.tsv.bz2".format(t_name)))
            t.to_csv(bz2.BZ2File(new_file_path, 'wb'),
                     sep="\t",
                     index=True,
                     float_format="%.3f")

    if prev_path:
        print
        print '''Calculating growth:'''
        for current_year_file_path in findFiles(output_path, '*.tsv.bz2'):
            if "growth" in current_year_file_path: continue
            current_year_file_name = os.path.basename(current_year_file_path)
            prev_year_file_path = os.path.join(prev_path,
                                               current_year_file_name)
            prev5_year_file_path = None
            if prev5_path:
                prev5_year_file_path = os.path.join(prev5_path,
                                                    current_year_file_name)
            if not os.path.exists(prev_year_file_path):
                print "Unable to find", current_year_file_name, "for previous year."
                continue
            tbl_name, tbl_w_growth = calc_growth(year, current_year_file_path,
                                                 prev_year_file_path,
                                                 prev5_year_file_path)
            print tbl_name
            new_file_path = os.path.abspath(
                os.path.join(output_path,
                             "{0}_growth.tsv.bz2".format(tbl_name)))
            tbl_w_growth.to_csv(bz2.BZ2File(new_file_path, 'wb'),
                                sep="\t",
                                index=True,
                                float_format="%.3f")
            # os.remove(current_year_file_path)

    print("--- %s minutes ---" % str((time.time() - start) / 60))
def main(export_file_path, import_file_path, year, eci_file_path, pci_file_path, ypw_file_path, output_path):
    start = time.time()
    step = 0
    
    depths = {
        "bra": [1, 3, 5, 7, 9],
        "hs": [2, 6],
        "wld": [2, 5]
    }
    
    if not os.path.exists(output_path): os.makedirs(output_path)
    d = pd.HDFStore(os.path.join(output_path, 'secex.h5'))
    # if "ymb" in d:
    if "ymbp" in d:
        tables = {}
        tables["ymb"] = d["ymb"]; tables["ymp"] = d["ymp"]; tables["ymw"] = d["ymw"]; tables["ymbp"] = d["ymbp"]; tables["ymbw"] = d["ymbw"]; tables["ympw"] = d["ympw"]; tables["ymbpw"] = d["ymbpw"]
    else:
        step += 1; print '''\nSTEP {0}: \nImport file to pandas dataframe'''.format(step)
        secex_exports = to_df(export_file_path, False)
        secex_imports = to_df(import_file_path, False)
        # secex_exports = secex_exports.head(1000)
        # secex_imports = secex_imports.head(1000)

        step += 1; print '''\nSTEP {0}: \nMerge imports and exports'''.format(step)
        secex_df = merge(secex_exports, secex_imports)

        step += 1; print '''\nSTEP {0}: \nAggregate'''.format(step)
        ymbpw = aggregate(secex_df)

        step += 1; print '''\nSTEP {0}: \nShard'''.format(step)
        [ymb, ymbp, ymbw, ymp, ympw, ymw] = shard(ymbpw)

        step += 1; print '''\nSTEP {0}: \nCalculate PCI & ECI'''.format(step)
        [ymp, ymw] = pci_wld_eci(eci_file_path, pci_file_path, ymp, ymw, year)

        step += 1; print '''\nSTEP {0}: \nCalculate diversity'''.format(step)
        ymb = calc_diversity(ymbp, ymb, "bra_id", "hs_id")
        ymb = calc_diversity(ymbw, ymb, "bra_id", "wld_id")
        ymp = calc_diversity(ymbp, ymp, "hs_id", "bra_id")
        ymp = calc_diversity(ympw, ymp, "hs_id", "wld_id")
        ymw = calc_diversity(ymbw, ymw, "wld_id", "bra_id")
        ymw = calc_diversity(ympw, ymw, "wld_id", "hs_id")
        
        step += 1; print '''\nSTEP {0}: \nCalculate domestic ECI'''.format(step)
        ymb = domestic_eci(ymp, ymb, ymbp, depths["bra"])

        step += 1; print '''\nSTEP {0}: \nCalculate Brazilian RCA'''.format(step)
        ymp = brazil_rca(ymp, ypw_file_path, year)
    
        step += 1; print '''\nSTEP {0}: \nCalculate RCA, diversity and opp_gain aka RDO'''.format(step)
        ymbp = rdo(ymbp, ymp, year, depths["bra"], ypw_file_path)
        
        tables = {"ymb": ymb, "ymp": ymp, "ymw": ymw, "ymbp": ymbp, "ymbpw": ymbpw, "ymbw": ymbw, "ympw": ympw}
        for tbln, tbl in tables.items():
            d[tbln] = tbl

    print "computing column lengths"
    for table_name, table_data in tables.items():
        tables[table_name] = add_column_length(table_name, table_data)
    
    print '''\nFINAL STEP: \nSave files to output path'''
    for t_name, t in tables.items():
        if not os.path.exists(output_path):
            os.makedirs(output_path)
        new_file_path = os.path.abspath(os.path.join(output_path, "{0}.tsv.bz2".format(t_name)))
        t.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)

    total_run_time = (time.time() - start) / 60
    print; print;
    print "Total runtime: {0} minutes".format(int(total_run_time))
    print; print;
예제 #10
0
def main(file_path, year, output_path, prev_path, prev5_path):
    print "\nSC YEAR: {0}\n".format(year)
    start = time.time()
    pre_check()
    output_path = os.path.join(output_path, str(year))

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    hdf_store = pd.HDFStore(
        os.path.abspath(os.path.join(output_path, 'sc_data.h5')))

    print '''\nImport file to pandas dataframe'''

    if "sc_df" in hdf_store:
        sc_df = hdf_store['sc_df']
    else:
        sc_df = to_df(file_path)
        try:
            hdf_store['sc_df'] = sc_df
        except OverflowError:
            print "WARNING: Unable to save dataframe, Overflow Error."
            hdf_store.close()
            os.remove(os.path.join(output_path, 'sc_data.h5'))

    tables_list = ["yb", "yc", "ys", "ybs", "ybc", "ysc", "ybsc"]
    index_lookup = {"y": "year", "b": "bra_id", "c": "course_sc_id", "s": "school_id"}

    for table_name in tables_list:
        indexes = [index_lookup[l] for l in table_name]

        print '''\nAggregating {0}'''.format(table_name)
        aggregated_df = aggregate(indexes, sc_df)

        print '''Adding length column to {0}'''.format(table_name)
        aggregated_df = add_column_length(table_name, aggregated_df)

        print '''Renaming {0} columns'''.format(table_name)
        aggregated_df.rename(columns={"enroll_id": "enrolled"}, inplace=True)
        aggregated_df.rename(columns={"class_id": "classes"}, inplace=True)
        if 's' not in table_name:
            aggregated_df.rename(columns={"school_id": "num_schools"}, inplace=True)

        if prev_path:
            print '''\nCalculating {0} 1 year growth'''.format(table_name)
            previous_df = open_prev_df(prev_path, table_name, year, indexes)
            aggregated_df = calc_growth(aggregated_df, previous_df, ['enrolled'])

        if prev5_path:
            print '''\nCalculating {0} 5 year growth'''.format(table_name)
            previous_df = open_prev_df(prev5_path, table_name, year, indexes)
            aggregated_df = calc_growth(aggregated_df, previous_df, ['enrolled'], 5)

        file_name = table_name + ".tsv.bz2"
        print '''\nSave {0} to output path'''.format(file_name)
        new_file_path = os.path.abspath(os.path.join(output_path, file_name))
        aggregated_df.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)

    time_elapsed = "%s minutes" % str((time.time() - start) / 60)

    print '''\nTotal time %s''' % time_elapsed
    print '''\nSending alert e-mail'''

    client = sendgrid.SendGridClient(os.environ['SENDGRID_API_KEY'])
    message = sendgrid.Mail()

    message.add_to(os.environ.get('ADMIN_EMAIL', '*****@*****.**'))
    message.set_from("*****@*****.**")
    message.set_subject("Scholar census %s ready!" % year)
    message.set_html("Your calculation took %s, please check out the output at the calc-server" % time_elapsed)

    client.send(message)
def main(file_path, year, output_path, prev_path, prev5_path):
    print "\nSC YEAR: {0}\n".format(year)
    start = time.time()
    pre_check()
    output_path = os.path.join(output_path, str(year))

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    hdf_store = pd.HDFStore(
        os.path.abspath(os.path.join(output_path, 'sc_data.h5')))

    print '''\nImport file to pandas dataframe'''

    if "sc_df" in hdf_store:
        sc_df = hdf_store['sc_df']
    else:
        sc_df = to_df(file_path)
        try:
            hdf_store['sc_df'] = sc_df
        except OverflowError:
            print "WARNING: Unable to save dataframe, Overflow Error."
            hdf_store.close()
            os.remove(os.path.join(output_path, 'sc_data.h5'))

    tables_list = ["yb", "yc", "ys", "ybs", "ybc", "ysc", "ybsc"]
    index_lookup = {
        "y": "year",
        "b": "bra_id",
        "c": "course_sc_id",
        "s": "school_id"
    }

    for table_name in tables_list:
        indexes = [index_lookup[l] for l in table_name]

        print '''\nAggregating {0}'''.format(table_name)
        aggregated_df = aggregate(indexes, sc_df)

        print '''Adding length column to {0}'''.format(table_name)
        aggregated_df = add_column_length(table_name, aggregated_df)

        print '''Renaming {0} columns'''.format(table_name)
        aggregated_df.rename(columns={"enroll_id": "enrolled"}, inplace=True)
        aggregated_df.rename(columns={"class_id": "classes"}, inplace=True)
        if 's' not in table_name:
            aggregated_df.rename(columns={"school_id": "num_schools"},
                                 inplace=True)

        if prev_path:
            print '''\nCalculating {0} 1 year growth'''.format(table_name)
            previous_df = open_prev_df(prev_path, table_name, year, indexes)
            aggregated_df = calc_growth(aggregated_df, previous_df,
                                        ['enrolled'])

        if prev5_path:
            print '''\nCalculating {0} 5 year growth'''.format(table_name)
            previous_df = open_prev_df(prev5_path, table_name, year, indexes)
            aggregated_df = calc_growth(aggregated_df, previous_df,
                                        ['enrolled'], 5)

        file_name = table_name + ".tsv.bz2"
        print '''\nSave {0} to output path'''.format(file_name)
        new_file_path = os.path.abspath(os.path.join(output_path, file_name))
        aggregated_df.to_csv(bz2.BZ2File(new_file_path, 'wb'),
                             sep="\t",
                             index=True)

    time_elapsed = "%s minutes" % str((time.time() - start) / 60)

    print '''\nTotal time %s''' % time_elapsed
    print '''\nSending alert e-mail'''

    client = sendgrid.SendGridClient(os.environ['SENDGRID_API_KEY'])
    message = sendgrid.Mail()

    message.add_to(os.environ.get('ADMIN_EMAIL', '*****@*****.**'))
    message.set_from("*****@*****.**")
    message.set_subject("Scholar census %s ready!" % year)
    message.set_html(
        "Your calculation took %s, please check out the output at the calc-server"
        % time_elapsed)

    client.send(message)
예제 #12
0
def main(file_path, year, output_path):
    pre_check()
    output_path = os.path.join(output_path, str(year))

    print "\nYEAR: {0}\n".format(year)
    this_output_path = os.path.join(output_path)
    if not os.path.exists(this_output_path): os.makedirs(this_output_path)

    step = 0
    step += 1
    print '''STEP {0}: Import file to pandas dataframe'''.format(step)
    df = to_df(file_path, year)

    tables_list = [
        "yb", "ybd", "yd", "ybc", "yc", "ybu", "ybcd", "yu", "yuc", "yucd",
        "yud"
    ]
    pk_lookup = {
        "y": "year",
        "d": "d_id",
        "b": "bra_id",
        "c": "course_hedu_id",
        "u": "university_id"
    }

    ybuc = None

    for table_name in tables_list:
        pk = [pk_lookup[l] for l in table_name]
        print "working on", table_name

        dems = ['gender', 'ethnicity', 'school_type'
                ] if "d" in table_name else ['']

        for dem in dems:
            print '''\nSTEP 2: Aggregate {0}'''.format(dem)
            tbl = aggregate(pk, df, dem)

            if "c" in table_name:
                pk2 = [x for x in pk]
                pk2[pk2.index("course_hedu_id")] = df.course_hedu_id.str.slice(
                    0, 2)
                # df2.course_hedu_id = df.course_hedu_id.str.slice(0, 2)
                tbl_course2 = aggregate(pk2, df, dem)

                tbl = pd.concat([tbl, tbl_course2])

            tbl = add_column_length(table_name, tbl)
            tbl.rename(columns={"student_id": "students"}, inplace=True)
            if table_name == "yb":
                tbl.rename(columns={"university_id": "num_universities"},
                           inplace=True)
            if table_name == "ybuc":
                print tbl.head()
                ybuc = tbl
            file_name = table_name + "_" + dem + ".tsv.bz2" if "d" in table_name else table_name + ".tsv.bz2"
            print '''Save {0} to output path'''.format(file_name)
            new_file_path = os.path.abspath(
                os.path.join(output_path, file_name))
            tbl.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)

        # if "c" in table_name:
        #     print '''\nSTEP 3: Aggregate {0}'''
        #     tbl = aggregate(pk, df, '', 2)
        #     tbl = add_column_length(table_name, tbl)
        #     # print tbl.reset_index().course_hedu_id.nunique()
        #     file_name = table_name + "_cid2.tsv.bz2"
        #     print '''Save {0} to output path'''.format(file_name)
        #     new_file_path = os.path.abspath(os.path.join(output_path, file_name))
        #     tbl.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)

    if ybuc is not None:
        step += 1
        print '''STEP {0}: Calculating RCAs'''.format(step)
        ybc = calc_rca(ybuc, year)
        new_file_path = os.path.abspath(
            os.path.join(output_path, "ybc_rca.tsv.bz2"))
        ybc.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)
        print "writing", new_file_path
def main(file_path, year, output_path):
    pre_check()
    output_path = os.path.join(output_path, str(year))

    print "\nYEAR: {0}\n".format(year)
    this_output_path = os.path.join(output_path)
    if not os.path.exists(this_output_path): os.makedirs(this_output_path)
    
    step = 0
    step+=1; print '''STEP {0}: Import file to pandas dataframe'''.format(step)
    df = to_df(file_path, year)
    
    tables_list = ["yb", "ybd", "yd", "ybc", "yc", "ybu", "ybcd", "yu", "yuc", "yucd", "yud"]
    pk_lookup = {"y": "year", "d": "d_id", "b": "bra_id", "c": "course_hedu_id", "u": "university_id"}

    ybuc = None

    for table_name in tables_list:
        pk = [pk_lookup[l] for l in table_name]
        print "working on", table_name
        
        dems = ['gender', 'ethnicity', 'school_type'] if "d" in table_name else ['']
        
        for dem in dems:
            print '''\nSTEP 2: Aggregate {0}'''.format(dem)
            tbl = aggregate(pk, df, dem)
            
            if "c" in table_name:
                pk2 = [x for x in pk]
                pk2[pk2.index("course_hedu_id")] = df.course_hedu_id.str.slice(0, 2)
                # df2.course_hedu_id = df.course_hedu_id.str.slice(0, 2)
                tbl_course2 = aggregate(pk2, df, dem)

                tbl = pd.concat([tbl, tbl_course2])
            
            tbl = add_column_length(table_name, tbl)
            tbl.rename(columns={"student_id": "students"}, inplace=True)   
            if table_name == "yb":
                tbl.rename(columns={"university_id": "num_universities"}, inplace=True)   
            if table_name == "ybuc":
                print tbl.head()
                ybuc = tbl
            file_name = table_name + "_" + dem + ".tsv.bz2" if "d" in table_name else table_name + ".tsv.bz2"
            print '''Save {0} to output path'''.format(file_name)
            new_file_path = os.path.abspath(os.path.join(output_path, file_name))
            tbl.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)

        # if "c" in table_name:
        #     print '''\nSTEP 3: Aggregate {0}'''
        #     tbl = aggregate(pk, df, '', 2)
        #     tbl = add_column_length(table_name, tbl)
        #     # print tbl.reset_index().course_hedu_id.nunique()
        #     file_name = table_name + "_cid2.tsv.bz2"
        #     print '''Save {0} to output path'''.format(file_name)
        #     new_file_path = os.path.abspath(os.path.join(output_path, file_name))
        #     tbl.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)        
    
    if ybuc is not None:
        step+=1; print '''STEP {0}: Calculating RCAs'''.format(step)
        ybc = calc_rca(ybuc, year)
        new_file_path = os.path.abspath(os.path.join(output_path, "ybc_rca.tsv.bz2"))
        ybc.to_csv(bz2.BZ2File(new_file_path, 'wb'), sep="\t", index=True)
        print "writing", new_file_path