コード例 #1
0
def extract_ride_options(df, opt_df):
    """
    Creates new column 'Опци_заказа'. Fills it with respectively set bits
    in 'options_1', '...2', '...3' and 'opt_4'
    :param df: orders dataframe
    :param opt_df: options dataframe
    :return: None
    """
    df['Опции_заказа'] = ''
    for idx, row in df.iterrows():
        for bit in range(10):
            if tk_u.is_bit_setted(row['option_1'], n_of_bit=bit):
                df.loc[idx,
                       'Опции_заказа'] += opt_df.loc[opt_df['id_option'] == (
                           bit + 1), 'name'][bit] + ', '
        for bit in range(10):
            if tk_u.is_bit_setted(row['option_2'], n_of_bit=bit):
                df.loc[idx,
                       'Опции_заказа'] += opt_df.loc[opt_df['id_option'] == (
                           bit + 11), 'name'][bit + 10] + ', '
        for bit in range(10):
            if tk_u.is_bit_setted(row['option_3'], n_of_bit=bit):
                df.loc[idx,
                       'Опции_заказа'] += opt_df.loc[opt_df['id_option'] == (
                           bit + 21), 'name'][bit + 20] + ', '
        for bit in range(7):
            if tk_u.is_bit_setted(row['opt_4'], n_of_bit=bit):
                df.loc[idx,
                       'Опции_заказа'] += opt_df.loc[opt_df['id_option'] == (
                           bit + 31), 'name'][bit + 30] + ', '
    df['Опции_заказа'] = np.where(
        df.Опции_заказа == '', np.NaN,
        df.Опции_заказа.str.replace(', $', '', regex=True))
コード例 #2
0
def unfold_stat_opt(df):
    """
    Adds 3 new columns to df, based on bits, setted in stat_opt field
    :param df: Pandas DataFrame with orders
    :return: None
    """
    df['ПРЦ предлагалось'] = df.apply(
        lambda row: 'Да'
        if tk_u.is_bit_setted(row.stat_opt, n_of_bit=0) else np.NaN,
        axis=1)
    df['ПРЦ использовано'] = df.apply(
        lambda row: 'Да'
        if tk_u.is_bit_setted(row.stat_opt, n_of_bit=1) else np.NaN,
        axis=1)
    df['Расчет по базе 2?'] = df.apply(
        lambda row: 'Да'
        if tk_u.is_bit_setted(row.stat_opt, n_of_bit=2) else np.NaN,
        axis=1)
コード例 #3
0
def modify_and_save_pins(df, car_classes_df, cities_df, geo_df, date):
    """
    Transforms and saves to file pins dataframe
    :param df: Pandas DataFrame with pins
    :param car_classes_df: Pandas DataFrame with car classes codes and names
    :param cities_df: Pandas DataFrame with cities id's and names
    :param geo_df: Pandas DataFrame with geo zones names, their boundary points and city names
    :param date: date to load in "YYYY-MM-DD" format
    :return: None, but saving file to local network server "//bigshare/Выгрузки ТФ/Выгрузки My_TK/'year'/'month'"
    """
    # Merge car classes
    df = df.merge(car_classes_df,
                  left_on='type_auto',
                  right_on='id',
                  how='left')  # retrieve car classes names
    df.drop(columns=['type_auto', 'id'], inplace=True)  # cleaning after merge
    df.rename({'name': 'type_auto'}, axis='columns',
              inplace=True)  # cleaning after merge
    # Merge with cities names df and drop non-taxi entries
    df = df.merge(cities_df, left_on='city', right_on='id', how='left')
    df = df[df.type.str.startswith('taxi', na=False)]
    df.drop(columns=['id', 'type', 'city', 'to_local_time_corr'], inplace=True)
    df.rename(columns={'name': 'city'}, inplace=True)
    # Add 'Регион' field
    df['Регион'] = df.city.map(secrets.region_dict)
    # Incoming source mapping
    df['come_from'] = df.come_from.map(renaming_dicts.incoming_type)
    # Separate 'date' column to date and time
    df['dat'] = pd.to_datetime(df.dat)
    new_dates, new_times = zip(*[(d.date(), d.time()) for d in df['dat']])
    df = df.assign(Дата=new_dates, Время=new_times)
    df.drop(columns='dat', inplace=True)
    # Map geo zones
    df.rename(columns={'x': 'x_in', 'y': 'y_in'}, inplace=True)
    get_zone(df=df, geozone_df=geo_df, mode='in')
    # Generate key field
    df['Номер_пина'] = df.Дата.astype(str).str.replace('-', '', regex=True).apply(lambda x: x[-4:]) + \
                       df.Время.astype(str).str.replace(':', '', regex=True) + \
                       df.id_client.astype(str)
    # Final renaming, dropping and saving
    df.rename(renaming_dicts.pin, axis='columns', inplace=True)
    df['Статус'] = 'Пин'
    df = df.replace(r'^\s*$', np.NaN,
                    regex=True)  # replace all empty strings with NaNs
    # df.to_csv(f"data/{date}_пины.csv", sep=';', index=False)
    saving_path = tk_u.set_bigshare_dir(date)
    df.to_csv(f"{saving_path}/{date}_пины.csv", sep=';', index=False)
コード例 #4
0
ファイル: manual_load.py プロジェクト: Jack-I/TK_ETL_script
def load_one_date(date):
    """Loads only ine particular date onto hard drive (and Google Drive - now disabled).
    \nInputs: selected date in "YYYY-MM-DD" format
    \nOutputs: Nothing"""
    # Pins and unformed, car classes, cities_ids
    logger = logging.getLogger(__name__)
    logger.info(f'...loading "{date}"')
    print(f'...loading "{date}"')
    unf_params = {
        'type_query':
        'get_qlick_leads',  # it's name mistake, in fact it loads only unformed orders
        'name': secrets.login,
        'pass': secrets.password,
        'date': date,
        'lang': 'ru'
    }
    unformed_content = tk_u.server_request(secrets.api_url_tail, unf_params)

    # Car classes
    car_classes_df = pd.DataFrame(
        unformed_content['car_classes'])  # 'car classes' in JSON table
    car_classes_df['id'] = car_classes_df['id'].astype(
        'int64')  # for proper sorting
    car_classes_df.sort_values(by='id', ignore_index=True, inplace=True)
    # car_classes_df.to_csv(r"match_tables/car_classes.csv", sep=';', index=False)

    # Cities ids and timezone corrections
    cities_df = pd.DataFrame(unformed_content['cities_ids'])
    cities_df['id'] = cities_df.id.astype(int)
    cities_df.name.replace(r'г\. ', '', regex=True, inplace=True)
    cities_df['to_local_time_corr'] = cities_df.name.map(
        renaming_dicts.time_zones)  # just numeric field
    cities_df['to_local_time_corr'] = pd.to_timedelta(
        cities_df.to_local_time_corr, unit='hour')
    try:
        cities_df.to_csv(
            r"\\bigshare\Выгрузки ТФ\Общая база qvd\cities_ids_and_types.csv",
            sep=';',
            index=False)
    except PermissionError:
        logger.warning(
            f'Cities ids file is locked by another user. Rewriting failed.')

    # Options for orders
    options_df = pd.DataFrame(unformed_content['options'])
    options_df = options_df[options_df.id_option < 94]
    options_df = options_df.append({
        'id_option': 10,
        'name': 'Скид.карта'
    },
                                   ignore_index=True)
    options_df.sort_values(by='id_option', ignore_index=True, inplace=True)
    # options_df.to_csv(r"match_tables/options.csv", sep=';', index=False)

    # Geozones
    geo_params = {
        'type_query': 'get_qlick_geo_zones',
        'name': secrets.login,
        'pass': secrets.password,
        'lang': 'ru'
    }
    geo_content = tk_u.server_request(secrets.api_url_tail, geo_params)
    geo_json = tk_u.decode_decompress(geo_content['data'])
    geo_df = df_t.get_geozones(pd.DataFrame(geo_json), cities_df=cities_df)

    # Unformed orders
    unformed_json = tk_u.decode_decompress(unformed_content['data'],
                                           backup_name='unf',
                                           date=date)
    df_t.modify_and_save_unformed(pd.DataFrame(unformed_json),
                                  car_classes_df=car_classes_df,
                                  cities_df=cities_df,
                                  geo_df=geo_df,
                                  date=date)
    logger.info(f'{unf_params["type_query"]} has loaded ')

    # Pins
    pin_params = {
        'type_query': 'get_qlick_pins',
        'name': secrets.login,
        'pass': secrets.password,
        'date': date,
        'lang': 'ru'
    }
    pin_content = tk_u.server_request(secrets.api_url_tail, pin_params)
    pin_json = tk_u.decode_decompress(pin_content['data'],
                                      backup_name='pins',
                                      date=date)
    df_t.modify_and_save_pins(pd.DataFrame(pin_json),
                              car_classes_df=car_classes_df,
                              cities_df=cities_df,
                              geo_df=geo_df,
                              date=date)
    logger.info(f'{pin_params["type_query"]} has loaded ')

    # Orders
    ord_params = {
        'type_query': 'get_qlick_orders',
        'name': secrets.login,
        'pass': secrets.password,
        'date': date,
        'lang': 'ru'
    }
    ord_content = tk_u.server_request(secrets.api_url_tail, ord_params)
    ord_json = tk_u.decode_decompress(ord_content['data'],
                                      backup_name='orders',
                                      date=date)
    df_t.modify_and_save_orders(pd.DataFrame(ord_json),
                                car_classes_df=car_classes_df,
                                cities_df=cities_df,
                                options_df=options_df,
                                geo_df=geo_df,
                                date=date)
    logger.info(f'{ord_params["type_query"]} has loaded')
コード例 #5
0
ファイル: manual_load.py プロジェクト: Jack-I/TK_ETL_script
    logger.info(f'{ord_params["type_query"]} has loaded')


if __name__ == '__main__':
    logging.basicConfig(
        filename='TK_manual.log',
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        filemode='a',
        level=logging.INFO)
    logging.getLogger('googleapiclient.discovery_cache').setLevel(
        logging.ERROR)
    logger = logging.getLogger(__name__)
    print(
        'Hello there!\nTo load one date press Enter.\nTo load an interval enter anything: ',
        end='')
    select = input()
    if select:
        date_range = tk_u.set_interval()
        load_start_timestamp = datetime.now()
        for date in date_range:
            load_one_date(date)
    else:
        date = tk_u.set_date()
        load_start_timestamp = datetime.now()
        load_one_date(date)
    tk_u.manual_logging_timedelta(load_start_timestamp)
    try:
        playsound('./data/Duck_is_burning.wav')
    except UnicodeDecodeError:
        logger.error("Damn, Playsound isn't working again")
コード例 #6
0
def modify_and_save_unformed(df, car_classes_df, cities_df, geo_df, date):
    """
    Transforms and saves to file unformed orders dataframe
    :param df: Pandas DataFrame with unformed orders
    :param car_classes_df: Pandas DataFrame with car classes codes and names
    :param cities_df: Pandas DataFrame with cities id's and names
    :param geo_df: Pandas DataFrame with geo zones names, their boundary points and city names
    :param date: date to load in "YYYY-MM-DD" format
    :return: None, but saving file to local network server "//bigshare/Выгрузки ТФ/Выгрузки My_TK/'year'/'month'"
    """
    df['points'] = df['points'].apply(
        len)  # transitional points list to len of that list
    df['type_auto'] = df.type_auto.astype(int)
    df['is_taxo'] = pd.array(df.is_taxo.replace('', np.NaN),
                             dtype=pd.Int8Dtype())  # so I use Int8
    df['base_price'] = df['base_price'].fillna(0).astype(int)
    df['base_price2'] = df['base_price2'].fillna(0).astype(int)
    # FIXME duct tape for compatibility
    if 'is_b2' in df.columns:
        df['is_b2'] = df.is_b2.replace({1.: 'Да'})
    df['proc_a_in'] = df.proc_a_in / 100
    # FIXME duct tape for compatibility
    if 'k_jam' in df.columns:
        df['k_jam'] = df.k_jam.fillna(1.)
    # Extract car serving time from autos_time
    df['autos_time'] = df.apply(
        lambda x: extract_unf_car_time(x.type_auto, x.autos_time),
        axis=1)  # axis ==1 => apply to each row

    df['autos_time'] = pd.array(df.autos_time, dtype=pd.Int16Dtype())
    # Merge with car classes
    df = df.merge(car_classes_df,
                  left_on='type_auto',
                  right_on='id',
                  how='left')  # retrieve car classes names
    df.drop(columns=['type_auto', 'id'], inplace=True)  # cleaning after merge
    df.rename({'name': 'type_auto'}, axis='columns',
              inplace=True)  # cleaning after merge
    # Merge with cities names df and drop non-taxi entries
    df = df.merge(cities_df,
                  left_on='city',
                  right_on='id',
                  how='left',
                  suffixes=('', '_source'))
    df = df[df.type_source.str.startswith('taxi', na=False)]
    df.drop(columns=['id', 'type_source', 'city', 'to_local_time_corr'],
            inplace=True)
    df.rename(columns={'name': 'city'}, inplace=True)
    # Add 'Регион' field
    df['Регион'] = df.city.map(secrets.region_dict)
    # Separate 'date' column to date and time
    df['date'] = pd.to_datetime(df.date)
    new_dates, new_times = zip(*[(d.date(), d.time()) for d in df['date']])
    df = df.assign(Дата=new_dates, Время=new_times)
    df.drop(columns='date', inplace=True)
    # Drop duplicates!
    df.drop_duplicates(subset=['Дата', 'Время', 'phone'],
                       ignore_index=True,
                       inplace=True)
    # Incoming source mapping
    df['type'] = df.type.map(renaming_dicts.incoming_type)
    # Get rid of possible bug entries
    df = df[df.x_in != 0.]
    # Map geo zones
    get_zone(df=df, geozone_df=geo_df, mode='in')
    get_zone(df=df, geozone_df=geo_df, mode='out')
    # Generate key field: MMDDhhmmss&id (or &phone[-7:] if id == 0)
    df['Номер_неоформленного'] = np.where(
        df.id_client == 0,
        df.Дата.astype(str).str.replace('-', '',
                                        regex=True).apply(lambda x: x[-4:]) +
        df.Время.astype(str).str.replace(':', '', regex=True) +
        df.phone.astype(str).apply(lambda x: x[-7:]),
        df.Дата.astype(str).str.replace('-', '',
                                        regex=True).apply(lambda x: x[-4:]) +
        df.Время.astype(str).str.replace(':', '', regex=True) +
        df.id_client.astype(str))
    # Final renaming, dropping and saving
    df.rename(renaming_dicts.unf, axis='columns', inplace=True)
    df.drop(columns=[
        'option_1', 'option_2', 'option_3', 'c_auto_all', 'proc_a_in_all',
        'id_user'
    ],
            inplace=True)
    df['Статус'] = 'Неоформленный'
    df = df.replace(r'^\s*$', np.NaN,
                    regex=True)  # replace all empty strings with NaNs
    # df.to_csv(f"data/{date}_неоф.csv", sep=';', index=False)
    saving_path = tk_u.set_bigshare_dir(date)
    df.to_csv(f"{saving_path}/{date}_неоф.csv", sep=';', index=False)
コード例 #7
0
def get_zone(df, geozone_df, mode='in'):
    """
    Calculates zone for given X and Y coordinates depends on mode and creates column 'Название зоны...'.
    :df: df for creating new geoname column (pin_unf_df/orders_df)
    :geozone_df: df with decompressed boundaries, city and geozone names
    :mode: 'in' - generates 'Название зоны подачи', 'out' - generates 'Название зоны назначения'. Default 'in'
    :return: None
    """
    logger = logging.getLogger(__name__)
    geo_cities_array = geozone_df.city.unique()
    for idx, row in df.iterrows():  # iterate through dataframe
        # TODO: bug. Marks out zone by the city of input zone
        if row.city not in geo_cities_array:  # if there are no such city in geozones table
            if row.city in secrets.spb_list:  # if city one of suburbs in big city then city slice = that city
                city_slice = geozone_df[geozone_df['city'] ==
                                        'Санкт-Петербург']
            elif row.city in secrets.msk_list:
                city_slice = geozone_df[geozone_df['city'] == 'Москва']
            elif mode == 'in':
                df.loc[idx, 'Название зоны подачи'] = row.loc[
                    'city'] + ' (неразмеч. город)'
                continue
            elif mode == 'out':
                df.loc[idx, 'Название зоны назначения'] = row.loc[
                    'city'] + ' (неразмеч. город)'
                continue
            else:
                logger.error(
                    f"City {row.city} not in geo_df and no mode setted")
                continue
        else:
            city_slice = geozone_df[geozone_df['city'] == row.city]
        if city_slice.shape[
                0] == 1:  # case if only one zone name tagged to current city
            if mode == 'in':
                df.loc[idx,
                       'Название зоны подачи'] = city_slice.reset_index().loc[
                           0, 'geozone']
                continue
            if mode == 'out':
                df.loc[idx,
                       'Название зоны назначения'] = city_slice.reset_index(
                       ).loc[0, 'geozone']
                continue
        if mode == 'in':
            point_x = row.loc['x_in']
            point_y = row.loc['y_in']
        elif mode == 'out':
            point_x = row.loc['x_out']
            point_y = row.loc['y_out']
        else:
            raise ValueError('Unknown mode!')
        # calculations:
        for tmp_idx, tmp_row in city_slice.iterrows():
            if tk_u.is_in_polygon(
                    X=point_x,
                    Y=point_y,
                    polygon=city_slice.loc[tmp_idx, 'compressed_boundary']):
                if mode == 'in':
                    df.loc[idx,
                           'Название зоны подачи'] = city_slice.loc[tmp_idx,
                                                                    'geozone']
                if mode == 'out':
                    df.loc[idx, 'Название зоны назначения'] = city_slice.loc[
                        tmp_idx, 'geozone']
                break
        else:  # runs if for loop ends w/o break => without finding proper polygon
            if mode == 'in':
                df.loc[idx, 'Название зоны подачи'] = row.loc[
                    'city'] + ' (неразмеч. зона)'
            if mode == 'out':
                df.loc[idx, 'Название зоны назначения'] = row.loc[
                    'city'] + ' (неразмеч. зона)'
コード例 #8
0
def modify_and_save_orders(df, car_classes_df, cities_df, options_df, geo_df,
                           date):
    """
    Transforms and saves to file orders dataframe
    :param df: Pandas DataFrame with orders
    :param car_classes_df: Pandas DataFrame with car classes codes and names
    :param cities_df: Pandas DataFrame with cities id's and names
    :param options_df: Pandas DataFrame with options id's and names
    :param geo_df: Pandas DataFrame with geo zones names, their boundary points and city names
    :param date: date to load in "YYYY-MM-DD" format
    :return: None, but saving file to local network server "//bigshare/Выгрузки ТФ/Выгрузки My_TK/'year'/'month'"
    """
    df.drop(
        columns=[
            'id_user_out',
            'name_type_auto',
            # * CONSTANTS.gruz_fields,
        ],
        inplace=True)
    df.drop(columns=[x for x in df.columns if x.startswith('g_')],
            inplace=True)  # remove all Gruzovichkoff columns
    df.drop_duplicates(subset='id', keep='last', inplace=True)  # id == Номер
    df['note'] = df['note'].str.replace(r'\r\n|\r|\n|\t',
                                        ' ')  # Delete damn escape-characters
    df['company_answer'] = df['company_answer'].str.replace(
        r'\r\n|\r|\n|\t', ' ')
    # Trim names
    df['client_name'] = df['client_name'].str.strip()
    df['client_name'] = df['client_name'].replace(r'\r\n|\r|\n|\t', ' ')
    df['contact_client_name'] = df['contact_client_name'].str.strip()
    df['contact_client_name'] = df['contact_client_name'].replace(
        r'\r\n|\r|\n|\t', ' ')
    # Change date/time types
    lst = [
        'dat', 'dat_add', 'dat_out', 'driver_dat_a_in', 'ed_22', 'dat_close',
        'dat_cancel'
    ]
    df[lst] = df[lst].apply(pd.to_datetime)
    df['dat'] = df.dat.dt.date
    # Transform the time format to the adequate one
    df['time_'] = df.time_.map(tk_u.time_transform)
    # Make 'Date and time of arrival' field
    df['Дата и время подачи'] = df.dat.astype(str) + ' ' + df.time_
    df['Дата и время подачи'] = pd.to_datetime(df['Дата и время подачи'])
    df['Дата и время подачи'] = df['Дата и время подачи'].dt.strftime(
        '%d.%m.%Y %H:%M')
    # Merge with cities names df, drop non-taxi entries, cast moscow time to local
    df['city_'] = df.city_.fillna(0).astype(
        int)  # missed values has id equal 0 (Saint-Petersburg)
    df = df.merge(cities_df,
                  left_on='city_',
                  right_on='id',
                  how='left',
                  suffixes=('', '_source'))
    df = df[df.type.str.startswith('taxi', na=False)]
    df = df.apply(lambda x: x + df.to_local_time_corr if x.name in [
        'dat_add', 'dat_out', 'driver_dat_a_in', 'ed_22', 'dat_close',
        'dat_cancel'
    ] else x)
    df.drop(columns=['id_source', 'type', 'city_', 'to_local_time_corr'],
            inplace=True)
    df.rename(columns={'name': 'city'}, inplace=True)
    # Add 'Регион' field
    df['Регион'] = df.city.map(secrets.region_dict)
    # Coords type cast and drop entries with empty coords
    df.rename(columns={'x_out_': 'x_out', 'y_out_': 'y_out'}, inplace=True)
    df.drop(df[df.x_in == ''].index, axis=0, inplace=True)
    df.drop(df[df.x_out == ''].index, axis=0, inplace=True)
    df.drop(df[df.y_in == ''].index, axis=0, inplace=True)
    df.drop(df[df.y_out == ''].index, axis=0, inplace=True)
    df['x_in'] = df.x_in.astype(float)
    df['x_out'] = df.x_out.astype(float)
    df['y_in'] = df.y_in.astype(float)
    df['y_out'] = df.y_out.astype(float)
    # Pickup zone's percents (by base1 and base2)
    df['hexo_proc_a_in'] = df.hexo_proc_a_in.fillna(1.).astype(float)
    df['k_jam'] = df.k_jam.astype(float).fillna(100) / 100
    # Other type transformations and empty field replacements
    df['park_'] = df.park_.fillna(0).astype(int) + 1
    lst = [
        'dr_minimum', 'p_auto', 'pp_sum', 'pp_min', 'pp_min_4', 'c_auto',
        'c_auto_b', 'pp_sum', 'oper_pay', 'ap_dist', 'client_minimalka',
        'slice_pr_by_hexo', 'base_price', 'base_price2', 'time_ed3',
        'time_ed0', 'time2', 'dist1', 'dist2', 'p_driver_s', 'warn', 'dr_opt',
        'come_from'
    ]
    df = df.apply(lambda x: x.fillna(0).astype(int) if x.name in lst else x)
    df['c_auto_2'] = df.c_auto_2.fillna(-1).astype(int)
    # replace all values in columns with the value of 10th/1st bit
    df['warn'] = df.warn.apply(tk_u.is_bit_setted, args=(10, ))
    df['dr_opt'] = df.dr_opt.apply(tk_u.is_bit_setted, args=(1, ))
    # Driver's part
    # Tips (5%, 10% and 15%) included. We can't count them separately without API improvement
    # 1. Private drivers and our drivers with car is in 'раскат' [1st bit of DR_OPT is set]
    # 1.1 Add (order's and paid waiting's cost) to p_driver_s IF:
    # payment type is 'Наличный' OR 'Залог' OR ('Картой вод' AND driver has personal terminal) [10th bit of WARN is set]
    df['p_driver_s'] = np.where(
        ((df.our_driver != '0') | ((df.our_driver == '0') & df.dr_opt)) &
        ((df.type_money == '0') | (df.type_money == '2') |
         (df.type_money == '12') | ((df.type_money == '5') & df.warn)),
        df.p_driver_s + df.c_auto + df.pp_sum, df.p_driver_s)
    # 1.2 Add (order's and paid services's cost) to p_driver_s IF:
    # payment type 2 is 'Наличный' OR 'Залог'
    # OR ('Картой вод' AND driver has personal terminal) [10th bit of WARN is set]
    df['p_driver_s'] = np.where(
        ((df.our_driver != '0') | ((df.our_driver == '0') & df.dr_opt)) &
        ((df.type_money_b == '0') | (df.type_money_b == '2') |
         (df.type_money_b == '12') | ((df.type_money_b == '5') & df.warn)),
        df.p_driver_s + df.c_auto_b, df.p_driver_s)
    # 2 Cashless payment
    # Subtract from p_driver_s paid services's cost IF payment type is 'Безнал'
    df['p_driver_s'] = np.where(
        ((df.type_money != '0') & (df.type_money != '2') &
         (df.type_money != '12')
         & np.logical_not((df.type_money == '5') & df.warn)),
        df.p_driver_s - df.p_auto, df.p_driver_s)
    # make part of the driver = 0 for cancelled orders
    df['p_driver_s'] = np.where(df.status == '3', 0, df.p_driver_s)
    # partner's part
    df['franch_perc'] = df.franch_perc.astype(float)
    df['Часть_партнера'] = df.c_auto + df.c_auto_b - df.oper_pay
    df.loc[df.c_auto_2 != -1, 'Часть_партнера'] = df.c_auto_2
    df.loc[df.dr_minimum > df['Часть_партнера'],
           'Часть_партнера'] = df.dr_minimum
    df['Часть_партнера'] = df['Часть_партнера'] + df.pp_sum
    df['Часть_партнера'] = round((df['Часть_партнера'] * df.franch_perc) / 100)
    # Required auto type merge
    df['type_auto'] = df.type_auto.astype(int)
    df = df.merge(car_classes_df,
                  left_on='type_auto',
                  right_on='id',
                  how='left',
                  suffixes=('', '_classes'))
    df.drop(columns=['type_auto', 'id_classes'],
            inplace=True)  # cleaning after merge
    df.rename({'name': 'type_auto'}, axis='columns',
              inplace=True)  # cleaning after merge
    # Executed auto type merge
    df['c_type_auto'] = df.c_type_auto.fillna(-1)
    df['c_type_auto'] = df.c_type_auto.astype(int)
    df = df.merge(car_classes_df,
                  left_on='c_type_auto',
                  right_on='id',
                  how='left',
                  suffixes=('', '_classes'))
    df.drop(columns=['c_type_auto', 'id_classes'], inplace=True)
    df.rename(columns={'name': 'c_type_auto'}, inplace=True)
    # Order options merge
    df['option_1'] = df.option_1.fillna('0').apply(int, base=2)
    df['option_2'] = df.option_2.fillna('0').apply(int, base=2)
    if 'option_3' in df.columns:
        df['option_3'] = df.option_3.fillna('0').apply(int, base=2)
    else:
        df['option_3'] = 0
    df['opt_4'] = df['opt_4'].fillna('0').astype(int)
    extract_ride_options(df=df, opt_df=options_df)
    # Create 'ПРЦ предлагалось', 'ПРЦ использовано' and 'Расчет по базе 2?' columns
    df['stat_opt'] = df.stat_opt.fillna(0).astype(int)
    unfold_stat_opt(df=df)
    # Drivers from 'Обменник'
    df['family_driver'] = np.where(df['driver'] == '-1',
                                   'Водитель из обменника',
                                   df['family_driver'])
    # df.driver.replace('-1', np.NaN, inplace=True)
    # Addresses transform
    df = df.apply(lambda x: x.fillna('') if x.name in
                  ['a_in', 'a_in_house', 'a_out', 'a_out_house'] else x)
    df['Адрес подачи'] = df['a_in'] + ' ' + df['a_in_house']
    df['Адреса назначения'] = df['a_out'] + ' ' + df['a_out_house']
    # Status mapping
    df['status'] = df.status.map(renaming_dicts.ord_status)
    # Our / owner-driver mapping
    df['our_driver'] = df.our_driver.map(renaming_dicts.our_or_owner_driver)
    # Payment type mapping
    df['type_money'] = df.type_money.map(renaming_dicts.payment_type)
    df['type_money_b'] = df.type_money_b.map(renaming_dicts.payment_type)
    # Incoming source mapping
    df['come_from'] = df.come_from.map(renaming_dicts.incoming_type)
    # Get rid of possible bug entries
    df = df[df.x_in != 0.]
    df = df[df.x_out != 0.]
    # Map geo zones
    get_zone(df=df, geozone_df=geo_df, mode='in')
    get_zone(df=df, geozone_df=geo_df, mode='out')
    # Final renaming, dropping and saving
    df.rename(renaming_dicts.orders, axis='columns', inplace=True)
    df.drop(columns=[
        'a_in', 'a_in_house', 'a_out', 'a_out_house', 'stat_opt', 't_work',
        'option_1', 'option_2', 'option_3', 'opt_4', 'warn', 'dr_opt',
        'franch_perc'
    ],
            inplace=True)
    df.replace(r'^\s*$', np.NaN, regex=True,
               inplace=True)  # replace all empty strings with NaNs
    # df.to_csv(f"data/{date}_заказы.csv", sep=';', index=False)
    saving_path = tk_u.set_bigshare_dir(date)
    df.to_csv(f"{saving_path}/{date}_заказы.csv", sep=';', index=False)