Beispiel #1
0
    def _init_dict(self, data, axes, dtype=None):
        haxis = axes.pop(self._info_axis_number)

        # prefilter if haxis passed
        if haxis is not None:
            haxis = _ensure_index(haxis)
            data = OrderedDict(
                (k, v) for k, v in compat.iteritems(data) if k in haxis)
        else:
            ks = list(data.keys())
            if not isinstance(data, OrderedDict):
                ks = _try_sort(ks)
            haxis = Index(ks)

        for k, v in compat.iteritems(data):
            if isinstance(v, dict):
                data[k] = self._constructor_sliced(v)

        # extract axis for remaining axes & create the slicemap
        raxes = [
            self._extract_axis(self, data, axis=i) if a is None else a
            for i, a in enumerate(axes)
        ]
        raxes_sm = self._extract_axes_for_slice(self, raxes)

        # shallow copy
        arrays = []
        haxis_shape = [len(a) for a in raxes]
        for h in haxis:
            v = values = data.get(h)
            if v is None:
                values = np.empty(haxis_shape, dtype=dtype)
                values.fill(np.nan)
            elif isinstance(v, self._constructor_sliced):
                d = raxes_sm.copy()
                d['copy'] = False
                v = v.reindex(**d)
                if dtype is not None:
                    v = v.astype(dtype)
                values = v.values
            arrays.append(values)

        return self._init_arrays(arrays, haxis, [haxis] + raxes)
Beispiel #2
0
    def _init_dict(self, data, axes, dtype=None):
        haxis = axes.pop(self._info_axis_number)

        # prefilter if haxis passed
        if haxis is not None:
            haxis = _ensure_index(haxis)
            data = OrderedDict((k, v) for k, v
                               in compat.iteritems(data) if k in haxis)
        else:
            ks = list(data.keys())
            if not isinstance(data, OrderedDict):
                ks = _try_sort(ks)
            haxis = Index(ks)

        for k, v in compat.iteritems(data):
            if isinstance(v, dict):
                data[k] = self._constructor_sliced(v)

        # extract axis for remaining axes & create the slicemap
        raxes = [self._extract_axis(self, data, axis=i)
                 if a is None else a for i, a in enumerate(axes)]
        raxes_sm = self._extract_axes_for_slice(self, raxes)

        # shallow copy
        arrays = []
        haxis_shape = [len(a) for a in raxes]
        for h in haxis:
            v = values = data.get(h)
            if v is None:
                values = np.empty(haxis_shape, dtype=dtype)
                values.fill(np.nan)
            elif isinstance(v, self._constructor_sliced):
                d = raxes_sm.copy()
                d['copy'] = False
                v = v.reindex(**d)
                if dtype is not None:
                    v = v.astype(dtype)
                values = v.values
            arrays.append(values)

        return self._init_arrays(arrays, haxis, [haxis] + raxes)
# Group by the hours, so we have one average value for each hour of the day
df['hour'] = df['FZ_AB'].str[0:2].astype(int)
df_grouped = df.groupby(['ID_Abschnitt', 'hour']).mean()

# Loop over the data and create a entry with the necessary info for each section
data_json = OrderedDict()
for row in df_grouped.itertuples():

    From = stops_joined.loc[stops_joined.Haltestellen_Id ==
                            row.Haltestellen_Id].index[0]
    To = stops_joined.loc[stops_joined.Haltestellen_Id ==
                          row.Nach_Hst_Id].index[0]
    id_fromTo = str(min(From, To)) + '_' + str(max(From, To))

    # If it does not exist yet (first time this section comes up), create a new entry
    if (id_fromTo not in data_json.keys()):
        data_json[id_fromTo] = OrderedDict()
        data_json[id_fromTo]['Fr'] = From
        data_json[id_fromTo]['To'] = To
        data_json[id_fromTo]['x_from'] = stops_joined.loc[
            stops_joined.Haltestellen_Id == row.Haltestellen_Id].x.iloc[0]
        data_json[id_fromTo]['y_from'] = stops_joined.loc[
            stops_joined.Haltestellen_Id == row.Haltestellen_Id].y.iloc[0]
        data_json[id_fromTo]['x_to'] = stops_joined.loc[
            stops_joined.Haltestellen_Id == row.Nach_Hst_Id].x.iloc[0]
        data_json[id_fromTo]['y_to'] = stops_joined.loc[
            stops_joined.Haltestellen_Id == row.Nach_Hst_Id].y.iloc[0]

    # Else just update the values
    if ('h_' + str(row.Index[1]) not in data_json[id_fromTo].keys()):
        data_json[id_fromTo]['h_' + str(row.Index[1])] = row.Besetzung
Beispiel #4
0
'''
data = pd.read_csv("../../data/traffic/verkehr_filtered.csv", sep=",",  parse_dates=['MessungDatZeit'], date_parser=dateparse)

data['hour'] = data['MessungDatZeit'].dt.hour
#data['day'] = data['MessungDatZeit'].day

df_grouped = data.groupby(['ZSID','hour', 'Richtung']).mean()
print df_grouped.head(30)

df_grouped_all = df_grouped.groupby(['ZSID', 'Richtung']).sum()
print df_grouped_all.head(30)

data_json = OrderedDict()
for row in df_grouped.itertuples():

    key = row.Index[0] + '_' + row.Index[2]
    if (key not in data_json.keys()):
        data_json[key] = OrderedDict()
        #data_json[row.Index[0]]['ZSName'] =  row.ZSName
        data_json[key]['Direction'] =  row.Index[2]
        data_json[key]['EKoord'] =  row.EKoord
        data_json[key]['NKoord'] = row.NKoord
        data_json[key]['all'] = df_grouped_all.loc[row.Index[0], row.Index[2]].AnzFahrzeuge


    data_json[key]['h_' + str(row.Index[1])] =  row.AnzFahrzeuge
    
final = pd.DataFrame.from_dict(data_json, orient='index')

final.to_csv('../../data/traffic/traffic_direction.csv', sep=',')