def generateCSVFromUpdatesFile(updates_file, files_path, readables_path, DEBUG,
                               output_file):

    sys.stdout.write(
        'Starting to generate CSV file from {}\n'.format(updates_file))

    db_handler = DBHandler('')
    file_already_exists = db_handler.checkIfUpdatesFileExists(
        updates_file,
        BGPDataHandler.getDateFromFileName(updates_file).year)
    db_handler.close()

    if file_already_exists:
        return ''

    filename = updates_file.split('/')[-1]
    csv_file = '{}/{}.csv'.format(files_path, filename)

    if os.path.exists(csv_file):
        with open(output_file, 'a') as output:
            output.write(
                'CSV file for updates file {} already exists.\n'.format(
                    updates_file))
        return 'already_existed'

    if updates_file.endswith('log.gz'):
        unzipped_file = '{}/{}'.format(files_path, filename[:-3])

        if not os.path.exists(unzipped_file):
            with gzip.open(updates_file, 'rb') as gzip_file,\
                open(unzipped_file, 'wb') as output:
                try:
                    output.write(gzip_file.read())
                except IOError:
                    with open(output_file, 'a') as output:
                        output.write(
                            'IOError unzipping file {}\n'.format(updates_file))
                    return ''

        filtered_file = '{}.filtered'.format(unzipped_file)

        if not os.path.exists(filtered_file):
            with open(filtered_file, 'w') as filtered:
                cmd = shlex.split('grep debugging {}'.format(unzipped_file))
                p = subprocess.Popen(cmd, stdout=subprocess.PIPE)

                cmd2 = shlex.split('grep rcvd')
                p2 = subprocess.Popen(cmd2, stdin=p.stdout, stdout=filtered)
                p2.communicate()
                p.kill()

        announcements_file = '{}.announcements'.format(unzipped_file)

        if not os.path.exists(announcements_file):
            with open(announcements_file, 'w') as announcements_f:
                cmd = shlex.split("grep -v withdrawn {}".format(filtered_file))
                p = subprocess.Popen(cmd, stdout=announcements_f)
                p.communicate()

        withdrawals_file = '{}.withdrawals'.format(unzipped_file)

        if not os.path.exists(withdrawals_file):
            with open(withdrawals_file, 'w') as withdrawals_f:
                cmd = shlex.split("grep withdrawn {}".format(filtered_file))
                p = subprocess.Popen(cmd, stdout=withdrawals_f)
                p.communicate()


#       2015/08/01 00:01:31 debugging: BGP: 202.12.28.1 rcvd UPDATE about 199.60.233.0/24 -- withdrawn
# We first get a TextFileReader to read the file in chunks (in case it is too big)
        withdrawals_reader = pd.read_csv(
            withdrawals_file,
            iterator=True,
            chunksize=1000,
            header=None,
            sep=' ',
            index_col=False,
            usecols=[0, 1, 4, 8],
            names=['update_date', 'update_time', 'bgp_neighbor', 'prefix'])

        # We then put the chunks into a single DataFrame
        withdrawals_df = pd.concat(withdrawals_reader, ignore_index=True)

        withdrawals_df['upd_type'] = 'W'
        withdrawals_df['peerAS'] = -1
        withdrawals_df['source_file'] = updates_file

        withdrawals_df.to_csv(csv_file,
                              header=False,
                              index=False,
                              quoting=2,
                              columns=[
                                  'update_date', 'update_time', 'upd_type',
                                  'bgp_neighbor', 'peerAS', 'prefix',
                                  'source_file'
                              ])

        with open(announcements_file,
                  'rb+') as announcements_f, open(csv_file, 'a') as csv_f:
            update_date = ''
            update_time = ''
            bgp_neighbor = ''
            peerAS = -1
            prefixes = []

            for line in announcements_f:
                if 'flapped' in line:
                    continue

                line_parts = line.strip().split()

                # If a new announcement starts
                #               2015/08/01 00:01:26 debugging: BGP: 64.71.180.177 rcvd UPDATE w/ attr: nexthop 64.71.180.177, origin i, path 6939 3491 12389 57617
                #               2015/08/01 00:01:26 debugging: BGP: 64.71.180.177 rcvd 91.106.234.0/24
                #               2015/08/01 00:01:26 debugging: BGP: 64.71.180.177 rcvd 37.1.77.0/24
                #               2015/08/01 00:01:26 debugging: BGP: 64.71.180.177 rcvd 37.1.64.0/20
                #               2015/08/01 00:01:26 debugging: BGP: 64.71.180.177 rcvd 91.106.232.0/21
                if 'UPDATE' in line:
                    # If we were processing another announcement, we write it
                    # to the csv file
                    if len(prefixes) > 0:
                        for prefix in prefixes:
                            csv_f.write('"{}","{}","{}","{}",{},"{}","{}"\n'\
                                        .format(update_date, update_time,
                                                'A', bgp_neighbor, peerAS,
                                                prefix, updates_file))

                    update_date = line_parts[0]
                    update_time = line_parts[1]
                    bgp_neighbor = line_parts[4]
                    if 'path' in line:
                        peerAS = line.split('path')[1].split()[0]

                        if '.' in peerAS:
                            left, right = peerAS.split('.')
                            peerAS = int(left) * 65536 + int(right)
                        else:
                            peerAS = int(peerAS)
                    else:
                        peerAS = -1
                    prefixes = []

                else:
                    prefixes.append(line_parts[6].replace('...duplicate', ''))

            # We have to write to the csv file the last announcement
            if len(prefixes) > 0:
                for prefix in prefixes:
                    csv_f.write('"{}","{}","{}","{}",{},"{}","{}"\n'\
                                .format(update_date, update_time,
                                        'A', bgp_neighbor, peerAS,
                                        prefix, updates_file))
        os.remove(unzipped_file)
        os.remove(filtered_file)
        os.remove(announcements_file)
        os.remove(withdrawals_file)

    elif updates_file.endswith('bgpupd.mrt'):
        readable_file = BGPDataHandler.getReadableFile(updates_file, False,
                                                       readables_path, DEBUG)

        readable_woSTATE = '{}.woSTATE'.format(readable_file)
        if not os.path.exists(readable_woSTATE):
            with open(readable_woSTATE, 'w') as woSTATE:
                cmd = shlex.split('grep -v STATE {}'.format(readable_file))
                p = subprocess.Popen(cmd, stdout=woSTATE)
                p.communicate()

        readable_announcements = '{}.announcements'.format(readable_file)
        if not os.path.exists(readable_announcements):
            with open(readable_announcements, 'w') as announcements:
                cmd = shlex.split('grep \'|A|\' {}'.format(readable_woSTATE))
                p = subprocess.Popen(cmd, stdout=announcements)
                p.communicate()

        announcements_df = getDF(readable_announcements, 'A', updates_file)

        readable_withdrawals = '{}.withdrawals'.format(readable_file)
        if not os.path.exists(readable_withdrawals):
            with open(readable_withdrawals, 'w') as withdrawals:
                cmd = shlex.split('grep \'|W|\' {}'.format(readable_woSTATE))
                p = subprocess.Popen(cmd, stdout=withdrawals)
                p.communicate()

        withdrawals_df = getDF(readable_withdrawals, 'W', updates_file)

        updates_df = pd.concat([announcements_df, withdrawals_df])

        updates_df.to_csv(csv_file,
                          header=False,
                          index=False,
                          quoting=2,
                          columns=[
                              'update_date', 'update_time', 'upd_type',
                              'bgp_neighbor', 'peerAS', 'prefix', 'source_file'
                          ])

        os.remove(readable_file)
        os.remove(readable_woSTATE)
        os.remove(readable_announcements)
        os.remove(readable_withdrawals)

    return csv_file