def IPv_analysis(IPv_type, exp_n, res_directory, coll, from_d, to_d, ext):
    input_file_path = res_directory + exp_n + '/5.split_data_for_analysis/' + IPv_type + '/' + coll + '_' + from_d + '-' + to_d + ext
    output_file_path = res_directory + exp_n + '/6.more_specifics_analysis/' + IPv_type + '/' + coll + '_' + from_d + '-' + to_d + '.csv'

    write_flag = f.overwrite_file(output_file_path)

    if write_flag:
        print "Loading " + input_file_path + "..."

        df = f.read_file(file_ext, input_file_path)
        df_sort = df.sort_values(by=['MONITOR', 'PREFIX', 'TIME'])
        df_sort = df_sort.reset_index(drop=True)
        df_sort = df_sort.drop(['Unnamed: 0'], axis=1)

        print "Data loaded successfully"

        # 1.Prefix visibility analysis
        print 'Getting visibility per prefix...'
        monitors, prefixes, visibilities_per_prefix, updates_per_prefix, ASes = prefix_visibility_analysis(
            df_sort, exp_n)

        df_prefixes_per_monitor = pd.DataFrame({
            'MONITOR': monitors,
            'PREFIX': prefixes
        })

        # 2.Clustering prefixes into more specifics, least_specifics and uniques (non-specifics)
        pref_types, deeps = clustering_prefixes(df_prefixes_per_monitor)

        df_visibility_per_prefix = pd.DataFrame({
            'MONITOR': monitors,
            'PREFIX': prefixes,
            'VISIBILITY': visibilities_per_prefix,
            'UPDATES': updates_per_prefix,
            'TYPE': pref_types,
            'DEEP': deeps,
            'ORIGIN': ASes
        })

        # 3.Clustering more specifics prefixes into TOP, single level and more specifics of other more specifics
        df_more_specifics = df_visibility_per_prefix[
            df_visibility_per_prefix['TYPE'] == 'more_specific']
        df_more_specifics = df_more_specifics.reset_index(drop=True)
        df_more_specifics = df_more_specifics.drop(['Unnamed: 0'], axis=1)

        pref_types, deeps = clustering_prefixes(df_more_specifics)

        # Replace types for more detailed types
        df_more_specifics['TYPE'] = pref_types

        df_others = df_visibility_per_prefix[
            df_visibility_per_prefix['TYPE'] != 'more_specific']
        df_others = df_others.reset_index(drop=True)
        df_others = df_others.drop(['Unnamed: 0'], axis=1)

        df_visibility_per_prefix = df_more_specifics.append(df_others,
                                                            ignore_index=True)

        output_file_path = res_directory + exp_n + '/6.more_specifics_analysis/' + IPv_type + '/' + collector + '_' + from_d + '-' + to_d + '.csv'
        f.save_file(df_visibility_per_prefix, ext, output_file_path)
    result_directory = experiment['resultDirectory']
    file_ext = experiment['resultFormat']

    step_dir = '/5.split_data_for_analysis'
    exp.per_step_dir(exp_name, step_dir)

    step_dir = '/5.split_data_for_analysis/IPv4'
    exp.per_step_dir(exp_name, step_dir)

    step_dir = '/5.split_data_for_analysis/IPv6'
    exp.per_step_dir(exp_name, step_dir)

    input_file_path = result_directory + exp_name + '/4.concatenate_RIB_data/' + collector + '_' + from_date + '-' + to_date + file_ext
    output_file_path = result_directory + exp_name + step_dir + '/' + collector + '_' + from_date + '-' + to_date + file_ext

    write_flag = f.overwrite_file(output_file_path)

    if write_flag == 1:
        print "Loading " + input_file_path + "..."

        df_advises = f.read_file(file_ext, input_file_path)

        print "Data loaded successfully"

        print "Splitting {} advises...".format(len(df_advises))

        df_IPv4_updates, df_IPv6_updates = separate_IPv_types(df_advises)

        df_IPv4_updates = df_IPv4_updates.drop(['Unnamed: 0'], axis=1)
        df_IPv6_updates = df_IPv6_updates.drop(['Unnamed: 0'], axis=1)
    file_ext = experiment['resultFormat']

    # this will be used to filter data that would be out of the experiment window
    from_time = exp.get_experiment_from_time(exp_name)
    to_time = exp.get_experiment_to_time(exp_name)

    # VARIABLES (pathlib)
    file_path = '/srv/agarcia/passive_mrai/bgp_updates/' + collector + '/'
    bgpdump_path = '/srv/agarcia/TFM/bgpdump'
    # bgpdump_path = '/usr/local/bin/bgpdump'
    step_dir = '/1.load_data'
    exp.per_step_dir(exp_name, step_dir)
    output_file_path = result_directory + exp_name + step_dir + '/' + collector + '_' + from_date + '-' + to_date + file_ext

    write_flag = exp.check_date_ok(from_date) and exp.check_date_ok(to_date)
    write_flag = write_flag and f.overwrite_file(output_file_path)

    if 'rrc' in collector:
        hop_size = 5
    elif 'route-views' in collector:
        hop_size = 15
    else:
        print('Collector type could not be recognised. Finishing execution...')
        write_flag = False

    if write_flag:

        # UPDATES LOAD
        from_min = int(from_date.split('.')[1][2:4])
        to_min = int(to_date.split('.')[1][2:4])