예제 #1
0
def main():
    global limit_step
    global complex_messages

    # configs
    config = LoadConfigs('config.ini')
    template_paths = GetConfigParameter(config, 'Templates', 'paths').split(',')
    filemaxlines = int(GetConfigParameter(config, 'OutputFile', 'filemaxlines'))
    delimiter = GetConfigParameter(config, 'OutputFile', 'delimiter')
    quotechar = GetConfigParameter(config, 'OutputFile', 'quotechar')
    outputdir = GetConfigParameter(config, 'OutputFile', 'outputdir')
    analitics_path = outputdir + '/' + GetConfigParameter(config, 'Analitics', 'filename')
    metrics = GetConfigParameter(config, 'Analitics', 'metrics').split(',') # add check if it is in query
    limit_step = int(GetConfigParameter(config, 'ClickHouse', 'limit_step'))
    clickhouse_host = GetConfigParameter(config, 'ClickHouse', 'host')
    colums = GetConfigParameter(config, 'ClickHouse', 'colums').split(',')
    date_start = sys.argv[1]
    date_end = sys.argv[2]

    complex_messages = []
    total_count = limit_step*2

    GetTemplatesFromCSV(template_paths)

    client = ClickHouseClient(clickhouse_host, on_progress=OnProgress)
    query_format = GenerateQueryFormat(config, colums)

    filename = GenerateFileName(outputdir, date_start, date_end)
    csvwriter = CSVWriter(filemaxlines, filename, delimiter, quotechar, u'\ufeff', colums + ['FullText', 'MessageType', 'Template'])
    analitics = Analitics(analitics_path, metrics)
    for single_date in DateRange(date_start, date_end):
        date_str = single_date.strftime("%Y-%m-%d")
        ColoredPrint('\n[Determiner]', 'Working on date(' + date_str + ')', bcolors.OKGREEN, bcolors.WARNING)
        limit_count = 0
        msg_count = 0
        while limit_count < total_count:
            query = query_format.format(date_str, limit_count, limit_step)
            result = client.select(query, on_progress=OnProgress, send_progress_in_http_headers=1)
            print()
            data_len = len(result.data)
            total_count = limit_count + data_len + 1
            counter = Counter(data_len, 0.2)
            for v in result.data:
                message = Message(colums, v)
                id, c_message = ToComplexMessage(message)
                if c_message.isFullFiled():
                    operator_id = message.get('OperatorGroupId')
                    originator = message.get('OutgoingOriginator')
                    c_message.determineTemplate(GetTemplates(operator_id, originator))
                    c_message.writePartsToCsvFile(csvwriter)
                    analitics.addData(date_str[:-3], c_message.getParameters(metrics), c_message.getType(), c_message.getCount())
                    complex_messages.pop(id)
                msg_count+=1
                counter.step(bcolors.OKGREEN + '[' + date_str + '] ' + bcolors.ENDC + str(msg_count) + ' messages handled')
            counter.lastTell(bcolors.OKGREEN + '[' + date_str + '] ' + bcolors.ENDC + str(msg_count) + ' messages handled')
            del result
            limit_count += limit_step
    WriteDownAllRemainingData(csvwriter, analitics, metrics)
    csvwriter.close()
    SaveAnalitics(analitics, delimiter, quotechar)
예제 #2
0
def main():
    tables = TableImporter()
    writer = CSVWriter()

    table = tables.get_table()

    while table is not None:
        writer.write(compute(table))
        table = tables.get_table()
예제 #3
0
    def do_write(self, filename):
        """
        Writes the results into the specified file
        usage: write <filename>
        """
        print("Writing the contents to " + filename)
        csvWriter = CSVWriter()
        print("Writing the results")
        print("Found %i results" % len(self.results))

        try:
            csvWriter.write_to_file(filename, self.results)
            print("All results written to file succesfully!")
        except PermissionError:
            print("\nNo permission to write to file. Might it be accidently left open?")
예제 #4
0
def spawn_collectors(c):
    root_url = "http://%s:%s/%s" % (c['hostname'], c['port'], c['base_url'])
    pprint(c)
    for data_type_name in c['data_type']:
        data_type = c['data_type'][data_type_name]

        if data_type['enable'] == False:
            continue

        filter_dict = data_type.get('filter', None)

        for endpoint_name in data_type['endpoints']:
            endpoint = data_type['endpoints'][endpoint_name]

            url = root_url + endpoint['url']
            interval = endpoint['interval']

            prom_key = "%s:%s" % (data_type_name, endpoint_name)
            labels = endpoint['labels']
            values = endpoint['values']
            print(data_type_name, endpoint_name)
            pc = PrometheusClient(c['prometheus'], prom_key, labels, values)
            csv = CSVWriter(c['csvwriter'], prom_key, labels + values)

            if data_type_name == 'queue' and endpoint_name == 'config':
                worker = QueueConfigCollector(c['dpid'], url, interval, pc,
                                              csv, filter_dict)
                worker.start()
                continue

            worker = Collector(c['dpid'], url, interval, pc, csv, filter_dict)
            worker.start()
예제 #5
0
def verify():
    jama = Jama()
    csv = CSVWriter()
    projects = jama.getProjects()
    csv.write("projects.csv", projects)
    project_ids = [project["id"] for project in projects]
    item_type_ids = [item_type["id"] for item_type in jama.getItemTypes()]
    for item_type_id in item_type_ids:
        csv.write("{}.csv".format(item_type_id), jama.getItems(item_type_id))
    relationships = []
    for project_id in project_ids:
        # if the relationships file gets too big you can create a file for each project's relationships
        # instead of extending this list
        relationships.extend(jama.getRelationships(project_id))
    csv.write("relationships.csv", relationships)

    # if the comments file gets too big you can split the list and scv.write() each half
    csv.write("comments.csv", jama.getComments())
예제 #6
0
파일: main.py 프로젝트: gugos/crc-utility
def main():
    host = input('Hostname: ')
    port = input('Port: ')
    username = input('Username: '******'Password: '******'',
        username,
        password,
    ]

    with paramiko.SSHClient() as ssh:
        try:
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect(host,
                        port=int(port),
                        username=username,
                        password=password)
        except Exception as ex:
            print(ex)
            sys.exit(1)

        session = ssh.invoke_shell()
        dir_path = os.getcwd() + os.sep + time.strftime(
            '%Y-%m-%d_%H:%M') + '_output'
        if not os.path.exists(dir_path):
            os.mkdir(dir_path)
        csv_writer = CSVWriter(
            dir_path, ['HOSTNAME', 'TELNET', 'SSH', 'VERSION', 'SERIALNUMBER'])
        session_manager = SessionManager(session, auth_list, csv_writer,
                                         dir_path)

        try:
            with open('hostnames.txt') as file:
                hostnames = [hostname.rstrip('\n') for hostname in file]
        except Exception as ex:
            print(ex)
            sys.exit(1)

        with concurrent.futures.ThreadPoolExecutor() as executor:
            executor.map(session_manager.manage, hostnames)
        csv_writer.close()
예제 #7
0
def main():
    degree = 60

    tables = TableImporter()
    writer = CSVWriter()

    data_to_write = []
    table = tables.get_table()
    while table is not None:
        x_velocity = compute_x_velocity(table)
        initial_velocity = compute_initial_velocity(x_velocity, degree)
        times = get_times(table)
        ys = get_ys(table)
        theory_ys = compute_y_at_times(initial_velocity, degree, times)
        data_to_write.append(rms(ys, theory_ys))

        table = tables.get_table()

    writer.write(data_to_write)
예제 #8
0
def main():
    usage = "usage: %prog [options]"
    parser = OptionParser(usage)
    parser.add_option("-i",
                      "--input",
                      dest="report",
                      help="NUnit XML report file name")
    parser.add_option("-o",
                      "--output",
                      dest="filename",
                      help="Generated Excel file name without extension")
    parser.add_option(
        "-l",
        "--light",
        dest="light",
        default=False,
        action="store_true",
        help=
        "Activate light report generation (CSV files instead of Excel workbook)"
    )

    (options, args) = parser.parse_args()

    # Exit if missing of invalid options
    if not check_options(options):
        parser.print_help()
        sys.exit(1)

    # Read NUnitTestAssembly from input report
    assemblies = read_assemblies(options.report)

    # Create the necessary generators
    generators = create_generators(assemblies)

    # Generate the appropriate report
    writer = CSVWriter(generators) if options.light else ExcelWriter(
        generators)
    clean_filename(options)
    writer.generate_report(options.filename)

    print("[NUnit-Stats]: Bye")
예제 #9
0
def generate_sets(set_length=200, max_rows=10000, max_folders=1000):
    writer = CSVWriter()

    folder_name = writer.create_folder()
    csv_name = writer.create_csv_file(folder_name)

    reader = WikiReader(min_words_per_page=300)

    file_name = f"{folder_name}/{csv_name}.csv"

    for page_tokens in reader.get_and_reformat_pages(set_length):
        if writer.count_rows(file_name) > max_rows:
            if writer.count_csv_in_folder(folder_name) > max_folders:
                folder_name = writer.create_folder()

            csv_name = writer.create_csv_file(folder_name)

            file_name = f"{folder_name}/{csv_name}.csv"

        writer.add_rows(file_name,
                        [row for row in get_lines_separated(page_tokens)])
예제 #10
0
def main():
    r = 0.038 / 2
    d = 0.024
    mark = 30

    tables = TableImporter()
    writer = CSVWriter()

    table = tables.get_table()

    writer.string_write(["theory", "exp", "error"])

    while table is not None:
        times = get_1d_data(table, 0)
        ys = get_1d_data(table, 2)
        h = get_h(mark)
        valid_ys = filter_descending_movement(ys)
        valid_ys_in_meter = change_to_meter(valid_ys)

        start_point = (times[1], valid_ys_in_meter[1])
        end_point = (times[len(valid_ys) - 2],
                     valid_ys_in_meter[len(valid_ys_in_meter) - 2])

        exp_time = end_point[0] - start_point[0]
        theory_time = theoretical_time(start_point, end_point, h, r, d)
        error = (exp_time - theory_time) / theory_time * 100

        writer.write([theory_time, exp_time, error])

        table = tables.get_table()
예제 #11
0
async def get_artists_data(client):
    queries_count = int(ARTISTS_NEEDED / 200)
    offset = 0
    csv_writer = CSVWriter()
    for idx in range(queries_count):
        artists_list = list()
        resp = await client.artists_list(offset=offset+200*idx)
        for artist in resp['obj']['data']:
            artist_id = artist['chartmetric_artist_id']
            fan_stats = await collect_fan_stats(artist_id, client)
            artist.update(fan_stats)
            tracks_helper = TracksCollector(artist_id, client)
            tracks_list = await tracks_helper.tracks_list()
            tracks = []
            for track in tracks_list:
                tmp_dict = dict(artist_id=artist_id)
                if 'id' in track:
                    tmp_dict['track_id'] = track['id']
                else:
                    tmp_dict['track_id'] = track['track_id']
                tmp_dict['name'] = track['name']
                if track['release_dates']:
                    tmp_dict['release_dates'] = ' '.join(
                        [d if d else '' for d in track['release_dates']])
                tracks.append(tmp_dict)
            csv_writer.write(tracks, 'artists_tracks')
            artists_list.append(artist)

        print(f'Took {offset+200*idx} offset')
        csv_writer.write(artists_list, 'artists')
    await client.close()
예제 #12
0
def main():
    tables = TableImporter()
    writer = CSVWriter()
    table = tables.get_table(delimiter=",", header=5, footer=7)

    error_rate_result = []
    step = 3

    while table is not None:
        consistent_table = slice(table, 0, [15, 45])
        average_rpm = average(get_column(consistent_table, 3))
        w = rpm_to_rad_per_sec(average_rpm)

        theory_rotation = compute_theory_rotation(step_to_length(step), w)
        experiment_degree = linear_regression(get_column(consistent_table, 0),
                                              get_column(consistent_table, 1))
        experiment_rotation = degree_to_radian(experiment_degree)

        error_rate_result.append(
            (experiment_rotation - theory_rotation) / theory_rotation * 100)

        table = tables.get_table(delimiter=",", header=5, footer=7)

    writer.write(error_rate_result)
예제 #13
0
def main():
    tables = TableImporter()
    writer = CSVWriter()

    result_times = []
    result_degrees = []
    table = tables.get_table()
    while table is not None:
        times = get_times(table)
        ratio = compute_ratio(table)
        atan = compute_atan(ratio)
        positive_atan = atan_to_positive(atan)
        accumulated_atan = accumulate_degree(positive_atan)
        resulting_degree = set_starting_point_to_zero(accumulated_atan)
        result_times.append(times)
        result_degrees.append(resulting_degree)

        table = tables.get_table()

    result_to_write = [result_times[0]]
    for i in range(len(result_times)):
        result_to_write.append(result_degrees[i])

    writer.write_2d(result_to_write)
예제 #14
0
def print_first_30_photos(preds):
    csv_writer = CSVWriter('first_predictions.csv')
    image_saver = ImageSaver('images/')
    prediction_labels = np.argmax(preds, axis=1)
    test_labels = np.argmax(dataset.test_labels, axis=1)
    test_features = dataset.test_features
    csv_writer.append_to_file(
        ['#', 'Paveikslėlis', 'Nuspėta klasė', 'Tikroji klasė'])
    for index in range(30):
        csv_writer.append_to_file([
            index + 1, '', LABELS[prediction_labels[index]],
            LABELS[test_labels[index]]
        ])
        image_saver.plt.imshow(test_features[index])
        image_saver.save_image(index)
예제 #15
0
from csv_reader import CSVReader
from csv_writer import CSVWriter
from trustpilot_url_encrypt import TrustPilotURLEncryption
from config import Config

if len(sys.argv) < 2:
    print('Requires at least one filename')
    exit()

for filename in sys.argv[1:]:
    input_file = filename
    output_file = filename.replace('.csv', '_links.csv')
    print('reading', input_file)
    config = Config('config.yaml')
    reader = CSVReader(input_file)
    writer = CSVWriter(output_file)
    # write the header only once in the output file
    write_header = True
    # go through the source csv
    for row in reader():
        # build the record for trust pilot we get something like
        record = {}
        for field in ('name', 'email', 'ref'):
            record[field] = row[config['fields'][field]]
        url = TrustPilotURLEncryption.encrypt(record)
        new_row = []
        for key in reader.header:
            new_row.append(row[key])
        new_row.append(url)
        if write_header:
            write_header = False
예제 #16
0
            statusFinder = PlayerStatusFinder(canonicalName(playerName),
                                              position)
            result = statusFinder.findStatus(currentYear, keepers,
                                             oneYearAgoTransactions,
                                             twoYearsAgoTransactions, None,
                                             oneYearAgoDraft, twoYearsAgoDraft,
                                             None)
            if result is not None:
                status, draftedYear, twoYearsAgoCost, oneYearAgoCost = result
                players.append(
                    Player(playerName, position, status, draftedYear,
                           twoYearsAgoCost, oneYearAgoCost))
            else:
                raise Exception("Fail", "Missing player: " + playerName)
        teams.append(Team(teamName, owner, players))
    csvWriter = CSVWriter(teams, currentYear)
    csvWriter.writeToCSV("out/status_" + str(currentYear) + ".csv")

    while False:
        player = input("Player?\n")
        if player == "":
            break
        playerStatusFinder = PlayerStatusFinder(canonicalName(player))
        status = playerStatusFinder.findStatus(currentYear, keepers,
                                               oneYearAgoTransactions,
                                               twoYearsAgoTransactions, None,
                                               oneYearAgoDraft,
                                               twoYearsAgoDraft, None)
        statusRepresentation = playerStatusFinder.statusRepresentation(status)
        print(statusRepresentation)
예제 #17
0
from loading import LHELoader
from csv_writer import CSVWriter
from ppjjww_wmuvm_process import *


if __name__=="__main__":

    try: inpath = sys.argv[1]
    except: logging.err("Input LHE file path expected!"); sys.exit(-1)

    try: outpath = sys.argv[2]
    except: logging.err("Output CSV file path expected!"); sys.exit(-1)


    lhe = LHELoader(open(inpath))
    csv = CSVWriter(open(outpath, "w"))

    logging.info("Loading and parsing events...")
    for i,eventlines in enumerate(lhe.yield_events()):
        if i%10000==0: logging.dbg("%i events read..." % i)

        particles = eventlines_to_particlesdict(eventlines, \
                        particles_naming_function = name_particles)
        variables = calculate_dependent_variables(particles)
        csv.write_dict(variables)
      
    logging.info("events_counter = %i" % lhe.events_counter)



예제 #18
0
from ppjjww_wmuvm_process import *

if __name__ == "__main__":

    try:
        inpath = sys.argv[1]
    except:
        logging.err("Input LHE file path expected!")
        sys.exit(-1)

    try:
        outpath = sys.argv[2]
    except:
        logging.err("Output CSV file path expected!")
        sys.exit(-1)

    lhe = LHELoader(open(inpath))
    csv = CSVWriter(open(outpath, "w"))

    logging.info("Loading and parsing events...")
    for i, eventlines in enumerate(lhe.yield_events()):
        if i % 10000 == 0: logging.dbg("%i events read..." % i)

        particles = eventlines_to_particlesdict(eventlines, \
                        particles_naming_function = name_particles)
        variables = calculate_dependent_variables(particles)
        if len(variables) != 0:
            csv.write_dict(variables)

    logging.info("events_counter = %i" % lhe.events_counter)
예제 #19
0
                    type=str,
                    choices=['csv', 'tfrecords'],
                    required=True)
parser.add_argument("-n_splits", type=int, required=False, default=1)
parser.add_argument("-shuffle", action="store_true")
parser.add_argument("-stratify", action="store_true")
parser.add_argument("-group", type=str, required=False, help="group name")
parser.add_argument("-metadata", type=Path)
parser.add_argument("-path_column", type=str, default="path")
parser.add_argument("-label_column", type=str, default="label")
parser.add_argument("-seed", type=int, required=False)
args = parser.parse_args()

n_splits = args.n_splits

partitions = ImageDatasetBuilder(data_root=args.input_data_root,
                                 n_splits=n_splits,
                                 with_shuffle=args.shuffle,
                                 with_stratify=args.stratify,
                                 group=args.group,
                                 metadata=args.metadata,
                                 path_column=args.path_column,
                                 label_column=args.label_column,
                                 seed=args.seed).build()

if args.output_format == 'csv':
    CSVWriter(n_splits=n_splits, output_dir=args.output_dir).write(partitions)
else:
    ImageTFRecordsWriter(n_splits=n_splits,
                         output_dir=args.output_dir).write(partitions)
예제 #20
0
__author__ = "Janne"

import xml.etree.ElementTree as ET
from ieeeprocessor import IEEEProcessor
from csv_writer import CSVWriter

# Construct the tree by parsing the XML file
tree = ET.parse("searchresult.xml")
# Then, get the root of the tree
root = tree.getroot()

# Then, parse it to Entry
processor = IEEEProcessor()
processor.ProcessSearchResults(root)

print("Found %i entries" % len(processor.entries))

# Okay, now we need to process all the entries into a .csv file
# Initialize the csv writer
csvWriter = CSVWriter()
csvWriter.write_to_file("test.csv", processor.entries)
예제 #21
0
def main():
    tables = TableImporter()
    writer = CSVWriter()

    table = tables.get_table()

    while table is not None:
        min_max_table = get_min_max(table, 2)

        period = get_period(min_max_table)
        amplitude = get_amplitude(min_max_table, 2)

        writer.string_write(["period"], end=",")
        writer.write(period)

        writer.string_write(["amplitude"], end=",")
        writer.write(amplitude)
        writer.write([])

        table = tables.get_table()