Ejemplo n.º 1
0
def main(parsed_args):

    pp_args(parsed_args)

    if 'key' in parsed_args.keys():
        set_key_globals(int(parsed_args['key']))
    else:
        set_key_globals(26)

    if 'old' in parsed_args.keys():
        set_global_command(parsed_args['old'])
    else:
        set_global_command('ax64')

    runs, actions, passalong = genruns(parsed_args)
    comment(total_runs=len(runs), actions=actions, passalong=passalong)

    fname = 'runs.txt'
    comment('W {} runs to {}'.format(len(runs), fname))
    pretty_print('W {} runs to {}'.format(len(runs), fname))

    write_runs_to_file(runs, actions, passalong, fname)
    comment('=' * 16, 'running...')
    runs_output, vectors = do_runs(runs, actions, passalong)
    comment('=' * 16, 'done!')
    write_csv('times.csv', 'NO.,TIME', runs_output)
    write_csv('vectors.csv', 'IP,OP,KEY,ROUNDS,IV,LZS', vectors)

    comment('W {} runtimes to times.csv'.format(len(runs_output)))
    pretty_print('W {} runtimes to times.csv'.format(len(runs_output)))

    comment('W {} test vectors written vectors.csv'.format(len(vectors)))
    pretty_print('W {} test vectors written vectors.csv'.format(len(vectors)))
Ejemplo n.º 2
0
def drop_nones():
    rows, header = helpers.load_csv(dirs.dirs_dict["discoveries"]["instagram"])

    new_rows = list()
    for row in rows:
        if row["username"] and row["user_id"]:
            new_rows.append(row)
    helpers.write_csv(dirs.dirs_dict["discoveries"]["instagram"], new_rows, header)

    return
Ejemplo n.º 3
0
def write_ranks(rks_dict):
    discovery_list, discovery_header = helpers.load_csv(dirs.dirs_dict["discoveries"]["instagram"])
    # fixes bug where some influencers are reported as None
    discovery_list = filter(lambda infl: bool(infl['username']), discovery_list)

    for i, el in enumerate(discovery_list):
        discovery_list[i]["pagerank"] = rks_dict[el["user_id"]]
    discovery_header.append("pagerank")
    discovery_list.sort(key=lambda k: k["pagerank"], reverse=True)
    helpers.write_csv(dirs.dirs_dict["discoveries"]["instagram"]+"-pageranked", discovery_list, discovery_header)
    return None
Ejemplo n.º 4
0
def drop_nones():
    rows, header = helpers.load_csv(dirs.dirs_dict["discoveries"]["instagram"])

    new_rows = list()
    for row in rows:
        if row['username'] and row['user_id']:
            new_rows.append(row)
    helpers.write_csv(dirs.dirs_dict["discoveries"]["instagram"], new_rows,
                      header)

    return
Ejemplo n.º 5
0
    def flush_info(self, subdir="profiles"):
        filename = dirs.dirs_dict[subdir][self.network]
        header = ["time_pulled"] + configs.profile_attributes

        to_write = {a: helpers.format_attr(getattr(self, a), a) for a in header}
        
        if not (to_write['user_id'] and to_write['username']):
            return

        helpers.write_csv(filename, [to_write,], header, write_type='a+')

        return to_write
Ejemplo n.º 6
0
def dedup(folder, network, on_keys):
    rows, header = helpers.load_csv(dirs.dirs_dict[folder][network])
    if not rows:
        return

    stored_keys = set()
    new_rows = list()
    for row in rows:
        row_key = tuple(row[on_key] for on_key in on_keys)
        if row_key not in stored_keys:
            new_rows.append(row)
            stored_keys.add(row_key)
    helpers.write_csv(dirs.dirs_dict[folder][network], new_rows, header)
    return
Ejemplo n.º 7
0
 def flush_follows(self):
     header = ["follower_id", "follows_id", "time_written"]
     if not self.follows:
         return None
     filename = dirs.dirs_dict["relationships"][self.network]
     all_follow_rows = self.get_follows()
     helpers.write_csv(filename, all_follow_rows, header, write_type='a+')
     # with open(filename, "a+") as f:
     #     writer = csv.writer(f)
     #     if (not os.path.isfile(filename)) or os.path.getsize(filename) == 0:
     #         writer.writerow(header)
     #     for follows_dict in all_follow_rows:
     #         writer.writerow([follows_dict[h] for h in header])
     return None
Ejemplo n.º 8
0
 def flush_follows(self):
     header = ["follower_id", "follows_id", "time_written"]
     if not self.follows:
         return None
     filename = dirs.dirs_dict["relationships"][self.network]
     all_follow_rows = self.get_follows()
     helpers.write_csv(filename, all_follow_rows, header, write_type='a+')
     # with open(filename, "a+") as f:
     #     writer = csv.writer(f)
     #     if (not os.path.isfile(filename)) or os.path.getsize(filename) == 0:
     #         writer.writerow(header)
     #     for follows_dict in all_follow_rows:
     #         writer.writerow([follows_dict[h] for h in header])
     return None
Ejemplo n.º 9
0
def dedup(folder, network, on_keys):
    rows, header = helpers.load_csv(dirs.dirs_dict[folder][network])
    if not rows:
        return

    stored_keys = set()
    new_rows = list()
    for row in rows:
        row_key = tuple(row[on_key] for on_key in on_keys)
        if row_key not in stored_keys:
            new_rows.append(row)
            stored_keys.add(row_key)
    helpers.write_csv(dirs.dirs_dict[folder][network], new_rows, header)
    return
Ejemplo n.º 10
0
def write_ranks(rks_dict):
    discovery_list, discovery_header = helpers.load_csv(
        dirs.dirs_dict["discoveries"]["instagram"])
    # fixes bug where some influencers are reported as None
    discovery_list = filter(lambda infl: bool(infl['username']),
                            discovery_list)

    for i, el in enumerate(discovery_list):
        discovery_list[i]["pagerank"] = rks_dict[el["user_id"]]
    discovery_header.append("pagerank")
    discovery_list.sort(key=lambda k: k["pagerank"], reverse=True)
    helpers.write_csv(
        dirs.dirs_dict["discoveries"]["instagram"] + "-pageranked",
        discovery_list, discovery_header)
    return None
Ejemplo n.º 11
0
def main(rolls, write_csv, write_chart, plot_chart):
    """The main entrypoint."""
    print(f"Running dice mode with {rolls} rolls.\n")
    data = run(rolls)
    df = helpers.get_df(data)

    print(df.to_string(index=False))

    if write_csv:
        helpers.write_csv(df, 'dice', rolls)

    if write_chart:
        helpers.write_chart(df, 'dice', rolls)

    if plot_chart:
        helpers.plot_chart(df)
Ejemplo n.º 12
0
    def flush_info(self, subdir="profiles"):
        filename = dirs.dirs_dict[subdir][self.network]
        header = ["time_pulled"] + configs.profile_attributes

        to_write = {
            a: helpers.format_attr(getattr(self, a), a)
            for a in header
        }

        if not (to_write['user_id'] and to_write['username']):
            return

        helpers.write_csv(filename, [
            to_write,
        ], header, write_type='a+')

        return to_write
Ejemplo n.º 13
0
def run():
    if args.tag != 0:
        db.edit_tag(args.tag, 1)
    elif len(args.diff) == 2:
        if args.reg:
            out = db.regressed_matches(args.diff[0], args.diff[1],
                                       args.modules)
            print(out)
        else:
            out = db.calc_diffs(args.diff[0], args.diff[1], args.modules)
            print(out)
        write_csv(out,
                  diffs_columns,
                  output=args.output,
                  sort=False,
                  delim=args.csvdelimiter)
    elif args.target:
        write_csv(db.regressed_matches_for_project(args.target[0],
                                                   args.target[1],
                                                   args.target[2],
                                                   args.modules),
                  diffs_columns,
                  output=args.output,
                  sort=False,
                  delim=args.csvdelimiter)
    elif args.measures != 0:
        dbm = DatabaseMeasure(db, args.measures, args.modules)
        print("Precision for run " + str(args.measures) + ": " +
              str(dbm.precision()))
        print("Recall for run " + str(args.measures) + ": " +
              str(dbm.recall()))
        print("F1score for run " + str(args.measures) + ": " +
              str(dbm.f1score()))
    else:
        write_csv(db.calc_latest_diff(),
                  diffs_columns,
                  output=args.output,
                  sort=False,
                  delim=args.csvdelimiter)
    db.close()
Ejemplo n.º 14
0
                    continue
                score = score.split('\n')

                homescore = 0
                awayscore = 0
                if len(score) == 2:
                    homescore = 2
                elif len(score) == 3:
                    homescore = 2
                    awayscore = 1
                elif len(score) == 4:
                    homescore = 3
                    awayscore = 1
                elif len(score) == 5:
                    homescore = 3
                    awayscore = 2

                home_names.append(player1)
                away_names.append(player2)
                home_scores.append(homescore)
                away_scores.append(awayscore)
                tournaments.append(tournament)
                years.append(year)
                cities.append(city)
                countries.append(country)

        cols_name = ['Home', 'Away', 'Home Score', 'Away Score', 'Tournament', 'Year', 'City', 'Country']
        cols = [home_names, away_names, home_scores, away_scores, tournaments, years, cities, countries]
        write_csv('1984-2005_tennis_matches.csv', cols_name, cols)

Ejemplo n.º 15
0
makedirs(download_dir, exist_ok=True)

url = 'http://books.toscrape.com/'

print('getting categories')
categories = get_categories(url)

for category in categories:
    print('\n' + 'getting books from ' + category['title'] + ':')
    books = get_books_from_category(category['url'])

    books_data = []
    for book in books:
        books_data.append(get_book_data(book, category['title']))
        print(' ' + books_data[-1]['title'])

    print('\n' + 'writing ' + category['title'] + '.csv')
    write_csv(download_dir + category['title'] + '.csv', books_data)

    print('')

    images_dir = download_dir + category['title'] + '/'
    makedirs(images_dir, exist_ok=True)

    for data in books_data:
        file_name = data['title'].replace("/", " - ") + ' - ' + \
                    data['universal product code (upc)'] + '.jpg'

        print('downloading ' + file_name)
        download_image(data['image url'], images_dir + file_name)
Ejemplo n.º 16
0
import random
from helpers import write_csv

cities_list = []
with open('cities.txt', 'r') as f:
    for line in f.readlines():
        cities_list.append(line.strip())

ids = []
names = []
cities = []
lives_in_campus = []

i = 0
with open('students.txt', 'r') as f:
    for line in f.readlines():
        if len(line.strip()) > 20:
            continue
        i += 1
        ids.append(i)
        names.append(line.strip())
        cities.append(random.choice(cities_list))
        lives_in_campus.append(random.choice([True, False]))

write_csv('students.csv', ['Id', 'Name', 'City', 'Lives in campus'],
          [ids, names, cities, lives_in_campus])
Ejemplo n.º 17
0
        try:
            champion = winners.find_all(
                'div',
                class_='tourney-detail-winner')[0].find('a').text.strip()
        except:
            continue

        details = tournament.find_all('td', class_='tourney-details')
        surface = details[1].find('span').text.strip()
        url = details[4].find('a')['href']

        names.append(name)
        cities.append(city)
        countries.append(country)
        start_dates.append(start_date)
        prizes.append(prize)
        champions.append(champion)
        surfaces.append(surface)
        urls.append(url)
        years.append(year)

    cols_name = [
        'Tournament', 'City', 'Country', 'Start Date', 'Prize', 'Champion',
        'Surface', 'URL', 'Year'
    ]
    cols = [
        names, cities, countries, start_dates, prizes, champions, surfaces,
        urls, years
    ]
    write_csv('1975_2005_tennis_tournaments.csv', cols_name, cols)
                                transfer_end_date.append("-")
                            else:
                                transfer_end_date.append(
                                    transfer_join_date[-len(transfers) + i -
                                                       1])

    cols_name = [
        'ManagerName', 'Nationality', 'WorkedInClub', 'Country', 'FromDate',
        'ToDate', 'Position'
    ]
    cols = [
        coaches_name, coaches_nationality, coaches_trained_teams,
        coaches_team_country, coaches_from_date, coaches_end_date,
        coaches_position
    ]
    write_csv('2_retired_managers_history.csv', cols_name, cols)

    cols_name = [
        'PlayerName', 'Nationality', 'Season', 'JoinDate', 'EndDate',
        'LeftTeam', 'CountryLeftTeam', 'JoinedTeam', 'CountryJoinedTeam',
        'Value'
    ]
    cols = [
        transfer_player_name, transfer_player_nationality, transfer_season,
        transfer_join_date, transfer_end_date, transfer_left_team,
        transfer_left_team_country, transfer_joined_team,
        transfer_joined_team_country, transfer_value_or_type
    ]
    write_csv('2_retired_players_history.csv', cols_name, cols)
    print("Am salvat pentru link-ul: ", link)
Ejemplo n.º 19
0
import random
from helpers import write_csv

spec = ['Robotics', 'Algorithms', 'Semantics', 'Distributed Systems', 'Computer Graphics', 'Computer Architecture', 'Computer Systems']

ids = []
names = []
specializations = []
office_rooms = []

i = 0
with open('professors.txt', 'r') as f:
	for line in f.readlines():
		if len(line.strip()) > 20:
			continue
		i += 1
		ids.append(i)
		names.append(line.strip())
		specializations.append(random.choice(spec))

		dig1 = random.choice([i for i in range(1, 7)])
		dig2 = random.choice([i for i in range(1, 9)])
		office_rooms.append('PR' + str(dig1) + '0' + str(dig2))


write_csv('professors.csv', ['Id', 'Name', 'Specialization', 'Office'], [ids, names, specializations, office_rooms])
			===== pentru verificare =====
			print (player_name)
			for i in range(len(transfers)):
				print (transfer_join_date[-len(transfers) + i], " ", transfer_end_date[-len(transfers) + i])
				print (transfer_season[-len(transfers) + i])
				print (transfer_left_team[-len(transfers) + i])
				print (transfer_joined_team[-len(transfers) + i])
				print (transfer_value_or_type[-len(transfers) + i])


cols_name = ['TeamName', 'Country', 'NoPlayers', 'AvgAge', 'NoForeigners',
	'TotalPlayersValue', 'Coach', 'AgeCouch', 'Stadium', 'StadiumCapacity']
cols = [team_names, countries, squads_no, avg_ages, no_foreigners,
	players_total_values, coaches, age_coaches, stadiums, stadiums_capacity]
write_csv('teams.csv', cols_name, cols)

cols_name = ['ManagerName', 'Nationality', 'WorkedInClub', 'CountryClub', 'FromDate', 'ToDate', 'Position']
cols = [coaches_name, coaches_nationality, coaches_trained_teams,
		coaches_team_country, coaches_from_date,
		coaches_end_date, coaches_position]
write_csv('active_managers_history.csv', cols_name, cols)

cols_name = ['PlayerName', 'Age', 'Birthdate', 'Nationality',
	'Position', 'Club', 'TShirtNumber', 'Value', 'Captain']
cols = [player_names, player_ages, player_birth_dates, player_nationalities,
	players_positions, player_team_names, player_tshirt_numbers,
  player_values, captains]
write_csv('players.csv', cols_name, cols)

cols_name = ['PlayerName', 'Season','JoinDate', 'EndDate',
Ejemplo n.º 21
0
    for row in rows[1:]:
        cols = row.find_all('td')
        if len(cols) == 1:
            championship = cols[0].find('img')['title'].strip()
            continue

        season = cols[0].findNext('a').text.strip()
        season = get_season(season)
        seasons.append(season)
        team = cols[2].text.strip()
        team_names.append(team)
        coach = cols[3].text.strip()
        if coach == "":
            coach = "-"
        coaches.append(coach)
        countries.append(country)
        championships.append(championship.replace(" ", "") + "_" + season)

# cols_name = ['ChampionshipWinner', 'Season', 'Championship', 'Country', 'CoachWinner']
# cols = [team_names, seasons, championships, countries, coaches]
# write_csv('championship_winner.csv', cols_name, cols)

cols_name = ['ChampionshipWinner', 'Season', 'Championship', 'CoachWinner']
cols = [team_names, seasons, championships, coaches]
write_csv('championsLeague_winners.csv', cols_name, cols)

# cols_name = ['ChampionshipWinner', 'Season', 'Championship', 'CoachWinner']
# cols = [team_names, seasons, championships, coaches]
# write_csv('europaLeague_winners.csv', cols_name, cols)
Ejemplo n.º 22
0
for i in range(0, len(dates)):
    print(dates[i])
    for nr_page in range(14):
        current_url = f"{root_url}{dates[i]}&countryCode=all&rankPage={nr_page * 100 + 1}-{(nr_page + 1) * 100}"
        page = requests.get(current_url)
        soup = BeautifulSoup(page.content, "html.parser")

        players_table = soup.find('table', class_='mega-table')

        for player_row in players_table.find('tbody').find_all('tr'):
            player_name = player_row.find(
                'td', class_='player-cell').find('a').text.strip()
            player_url = player_row.find(
                'td', class_='player-cell').find('a')['href']
            player_age = player_row.find('td', class_='age-cell').text.strip()
            player_rank = player_row.find('td',
                                          class_='rank-cell').text.strip()

            if player_name in already_saved_players or player_name in players_names:
                continue

            players_names.append(player_name)
            players_urls.append(player_url)
            players_age.append(player_age)
            players_rank.append(player_rank)

    cols_name = ['Name', 'URL', 'Age', 'Rank']
    cols = [players_names, players_urls, players_age, players_rank]
    write_csv('new_tennis_players.csv', cols_name, cols)
Ejemplo n.º 23
0
                        tokens = cols[4].find('a').text.strip().split(":")
                        if tokens[0] == '-' or len(tokens) == 1:
                            continue

                        home_teams.append(cols[3].find('img')['alt'].strip())
                        away_teams.append(cols[5].find('img')['alt'].strip())
                        home_scores.append(tokens[0])
                        if 'AET' in tokens[1]:
                            tokens[1] = tokens[1][:tokens[1].index('AET') - 1]
                        away_scores.append(tokens[1])
                        if int(tokens[0]) > int(tokens[1]):
                            winners.append("HomeTeam")
                        elif int(tokens[0]) < int(tokens[1]):
                            winners.append("AwayTeam")
                        else:
                            winners.append("Draw")
                        dates.append(date)
                        stages.append(stage)

        filename = "Campionate/" + championship_name + "_" + year + "!" + str(
            int(year) + 1) + '.csv'
        cols_name = [
            'HomeTeam', 'AwayTeam', 'HomeScore', 'AwayScore', 'Winner',
            'Stage', "Date"
        ]
        cols = [
            home_teams, away_teams, home_scores, away_scores, winners, stages,
            dates
        ]
        write_csv(filename, cols_name, cols)
Ejemplo n.º 24
0
def sell_trade(etoro_instance):
    try:
        detail = ""
        # etoro_instance.login()

        logger.info(
            f"Going to get last Ordered Trades (For Opening Selling Position ) ..."
        )

        lastOrderedTrades = helpers.lastOrderedTrade(isBuy=True)

        if not lastOrderedTrades:
            logger.info(
                f"No Last Ordered Trades found to open a selling position.")

        for t1, lastOrderedTrade in enumerate(lastOrderedTrades):

            instrumentID = lastOrderedTrade['InstrumentID']
            instrumentData = helpers.find_instrument_by_id(instrumentID)
            lastOrderedTrade.update({
                k: v
                for k, v in instrumentData.items() if k in (
                    "InstrumentDisplayName",
                    "SymbolFull",
                )
            })
            instrumentSymbol = instrumentData["SymbolFull"]
            instrumentDisplayName = instrumentData["InstrumentDisplayName"]
            instrumentTitle = f"{instrumentSymbol} - {instrumentDisplayName}"
            positionID = lastOrderedTrade['PositionID']

            logger.info(
                f"\n[{t1+1}/{len(lastOrderedTrades)}] :\n{lastOrderedTrade}\n")

            logger.info(f"Going to get User Trade History ...")
            tradeHistory = etoro_instance.get_trade_history

            closedOrder = helpers.isOrderClosed(
                positionID,
                data_list=tradeHistory,
                path=config.closed_trade_history_file)

            if not closedOrder:
                logger.info(f"<[{instrumentTitle}]: postionID->"
                            f"{positionID} instrumentID->{instrumentID}>"
                            " is not closed yet. skipping this...")
                continue

            # here opens a selling postion for this trade

            logger.info(
                f"Going to open Selling Trade for: {instrumentTitle}\n")
            sell_trade, sell_trade_res = etoro_instance.trade(ins=instrumentID,
                                                              IsBuy=False)

            if sell_trade is False:
                detail = (
                    f"Couldnot open selling position for: '{instrumentTitle}'"
                    f" reason :\n{sell_trade_res}\n")
                logger.warning(detail)
            else:
                detail = (f"Opened selling position for: '{instrumentTitle}'"
                          f" response :\n{sell_trade_res}\n")
                logger.info(detail)

            logger.info(f"\nGoing to update User Date ...")
            user_data = etoro_instance.get_login_info
            logger.info(f"\nGot User Data :\n{user_data}\n")

            logger.info(f"\nGoing to update User Trade History ...")
            user_trade_history = etoro_instance.get_trade_history
            logger.info(f"\nGot User Trade History :\n{user_trade_history}\n")

        msg = "<sell_trade> finished ..."
        print('+' * len(msg))
        print(msg)
        print('+' * len(msg))

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        err_detail = e, fname, exc_tb.tb_lineno
        detail = f"Following Error occured in sell_trade ::\n{err_detail}\n"
        logger.error(detail)

    #write trade logs
    helpers.write_csv("Selling", detail)
Ejemplo n.º 25
0
import csv
import sys
import json
import os
import glob
import pandas as pd

sys.path.append('../../helpers')
from helpers import write_csv

root_url = "http://ontheworldmap.com/all/cities/"

page = requests.get(root_url)
soup = BeautifulSoup(page.content, "html.parser")

cities_cols = soup.find_all('div', class_='col-3')
paranthesis = re.compile(r'\([^)]*\)')

cities = set()

for cities_col in cities_cols[:3]:
    for city_row in cities_col.find_all('li'):
        city_name = city_row.find('a').text
        city_name = re.sub(paranthesis, '', city_name).strip()

        cities.add(city_name)

cols_name = ['City']
cols = [cities]
write_csv('cities.csv', cols_name, cols)
Ejemplo n.º 26
0
def buy_trade(etoro_instance):
    try:
        detail = ""

        # etoro_instance.login()
        #checking  current balance
        clientCredit = helpers.clientCredit(
            login_data=etoro_instance.get_login_info)

        open_markets_only = config.analyze_open_markets_only
        logger.info(
            f"Analyzing Today Stocks For {'opened' if open_markets_only else 'all'} markets ..."
        )

        analyzer = AnalyzeStocks()

        top_markets = analyzer.today_price_analysis(
            stocks_sort_by=config.stocks_sort_by,
            time_slots_count=24,
            open_markets_only=open_markets_only,
            time_slots_pick=2)

        # top_markets = analyzer.trade_insights(
        # 	etoro_instance.get_insights(),
        #  	open_markets_only=open_markets_only, sort_by="growth")

        for t1, top_market in enumerate(top_markets):
            logger.info(f"\nGoing to open Buying Trade for :\n{top_market}\n")
            buy_trade, buy_trade_res = etoro_instance.trade(
                ins=top_market.get("InstrumentId"), IsBuy=True)
            if buy_trade is False:
                detail = (
                    f"Couldnot open buying position for: '{top_market.get('SymbolFull')}'"
                    f" reason :\n{buy_trade_res}\n")
                logger.warning(detail)
            else:
                detail = (
                    f"Opened buying position for: '{top_market.get('SymbolFull')}'"
                    f" response :\n{buy_trade_res}\n")
                logger.info(detail)
                break

        logger.info(f"\nGoing to update User Date ...")
        user_data = etoro_instance.get_login_info
        logger.info(f"\nGot User Data :\n{user_data}\n")

        logger.info(f"\nGoing to update User Trade History ...")
        user_trade_history = etoro_instance.get_trade_history
        logger.info(f"\nGot User Trade History :\n{user_trade_history}\n")

        msg = '<buy_trade> finished ...'
        print('+' * len(msg))
        print(msg)
        print('+' * len(msg))

    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        err_detail = e, fname, exc_tb.tb_lineno
        detail = f"Following Error occured in buy_trade ::\n{err_detail}\n"
        logger.error(detail)

    #write trade logs
    helpers.write_csv("Buying", detail)
Ejemplo n.º 27
0
def run():

    rootdir = os.path.dirname(os.path.dirname(__file__))
    parser = argparse.ArgumentParser()
    parser.add_argument("target",
                        help="project to work with (e.g. OpenFOAM, SU2)")
    parser.add_argument("-i",
                        "--install",
                        help="Install the project instead of running?",
                        dest='install',
                        default=False,
                        action="store_true")
    parser.add_argument("-c",
                        "--gencommands",
                        help="Generate compile commands manually?",
                        dest='gencommands',
                        default=False,
                        action="store_true")
    parser.add_argument(
        "--version",
        help=
        "Project version appendix (e.g. 6 for OpenFOAM-6, releases for SU2 7.0.0)",
        default="")
    parser.add_argument("-o",
                        "--output",
                        help="Name of output csv file",
                        default="")
    parser.add_argument("-d",
                        "--csvdelimiter",
                        help="Delimiter for csv output",
                        default=";")
    parser.add_argument(
        "--config",
        help="Name of config file or directory (default = project name + conf)",
        default="")
    parser.add_argument(
        "--oolint",
        help="Directory of OO-Lint(if automatic finding fails)",
        default="")
    parser.add_argument("--db", help="Database to interact with", default="")
    parser.add_argument("--diff",
                        help="Differences of runs to show(list of 2 ids)",
                        type=int,
                        nargs=2,
                        default=[])
    args = parser.parse_args()

    if args.oolint == "":
        args.oolint = find_opovlint()
    if args.target == "OpenFOAM":
        pr = project.openFOAM
    elif args.target == "SU2":
        pr = project.su2

    if args.config == "":
        config = args.target + "conf.json"
    else:
        config = args.config

    simple_columns = ["MatchType", "File", "Line", "Column", "Code", "Files"]
    pName = args.target
    if args.version != "":
        pName = pName + "-" + args.version
    elif args.target == "SU2":
        args.version = "6.2.0"
    if args.install or args.gencommands:
        if args.install:
            pr.setup(args.version)
        pr.environ(pName)
        if args.install:
            pr.preconfigure(args.version)
        pr.comgen(pName)
    else:
        if not os.path.exists(pName + "/compile_commands.json"):
            if not os.path.exists("compile_commands/" + pName + ".json"):
                print(
                    "No pre-generated compile command database available, generate manually with --gencommands"
                )
            else:
                replace_with("compile_commands/" + pName + ".json",
                             pName + "/compile_commands.json", "[root]",
                             os.getcwd())
        if args.db != "":
            db = Database(args.db)
        target_list = extract_list(pName)
        conf_path = os.path.join(rootdir, "config/" + config)
        #support multiple configs
        if os.path.isdir(conf_path):
            accList = []
            for conf in os.listdir(conf_path):
                accList += execute_find_type(target_list,
                                             pName,
                                             args.oolint,
                                             delim=args.csvdelimiter,
                                             config=conf_path + "/" + conf)
            if args.db != "":
                db.add_run(args.target,
                           args.version,
                           args.oolint,
                           config=conf_path + "/" + conf)
                db.add_matches(accList, args.csvdelimiter)
            if args.output != "":
                write_csv(accList,
                          simple_columns,
                          output=args.output,
                          delim=args.csvdelimiter)

        else:
            tList = execute_find_type(target_list,
                                      pName,
                                      args.oolint,
                                      delim=args.csvdelimiter,
                                      config=conf_path)
            if args.db != "":
                db.add_run(args.target,
                           args.version,
                           args.oolint,
                           config=conf_path)
                db.add_matches(tList, args.csvdelimiter)
            if args.output != "":
                write_csv(tList,
                          simple_columns,
                          output=args.output,
                          delim=args.csvdelimiter)
        if args.db != "":
            db.close()
Ejemplo n.º 28
0
 def test_write_csv(self):
     convertfunc = lambda x: 0 if b'b' in x else 1  # convertfucntion for Prediction column to 0 if bg, and 1 if signal
     converters = {"Prediction": convertfunc}
     data = load_csv(self.path, converters=converters)
     write_csv(data, "test/test_write.csv")
Ejemplo n.º 29
0
#!/usr/bin/env python
# coding: utf-8

import sys
import argparse

from helpers import load_input, anonymize_cols, write_csv


if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='Script para anonimizar clumnas en un CSV.')
    parser.add_argument('-c', '--columns' help='Columns to anonymize', required=True, nargs='*')
    parser.add_argument('-i', '--input', help='CSV que se desea anonimizar', required=True)
    parser.add_argument('-o', '--output', help='Nombre del Archivo de salida', required=True)
    args = parser.parse_args()

    # Load resource
    input_data = load_input(args.input)
    anonymized_data = anonymize_cols(input_data, args.columns)
    status = write_csv(anonymized_data, args.output)

    if status:
        print ('Se genero correctamente "%s".' % status)
    else:
        print ('Ocurrio un fallo a procesar el archivo: "%s".' % args.input)
Ejemplo n.º 30
0
print('Done dump')

texts_list = []
titles_list = []
categories_list = []
url_list = []
source_list = []
date_list = []

for key in categories.keys():
    if not texts[key] or not titles[key]:
        continue

    texts_list.append(texts[key])
    titles_list.append(titles[key])
    categories_list.append(categories[key])
    url_list.append(url[key])
    source_list.append(source[key])
    date_list.append(date[key])

print('Writing csv')
write_csv(
    "category_news.csv",
    ["Titles", "Texts", "Categories", "Url", "Source", "Date"],
    [
        titles_list, texts_list, categories_list, url_list, source_list,
        date_list
    ],
)
Ejemplo n.º 31
0
         ['Unity Games', 'Modelling', 'Animation'],
         ['CPU Analysis', 'Memory Analysis'],
         ['Operating Systems', 'Security', 'Compilers', 'Drivers']]

ids = []
student_ids = []
professor_ids = []
thesis_areas = []
thesis_title = []

professors = pandas.read_csv('professors.csv')

for i in range(1, 10000):
    ids.append(i)
    student_ids.append(i)

    prof = random.choice([i for i in range(1, 201)])
    professor_ids.append(prof)

    specialization = professors.loc[prof - 1, 'Specialization']
    specialization = spec.index(specialization)

    area = random.choice(areas[specialization])
    thesis_areas.append(area)
    thesis_title.append(area + ' ' +
                        str(random.choice([i for i in range(1, 101)])))

student_ids = random.sample(student_ids, len(student_ids))
write_csv('thesis.csv', ['Id', 'Student_id', 'Professor_id', 'Area', 'Title'],
          [ids, student_ids, professor_ids, thesis_areas, thesis_title])
Ejemplo n.º 32
0
tournaments.append('EuropaLeague_2014/2015')
stadiums.append('PGE Narodowy')
cities.append('Warsaw')
countries.append('Poland')

tournaments.append('EuropaLeague_2015/2016')
stadiums.append('St. Jakob-Park')
cities.append('Basel')
countries.append('Switzerland')

tournaments.append('EuropaLeague_2016/2017')
stadiums.append('Friends Arena')
cities.append('Stockholm')
countries.append('Sweden')

tournaments.append('EuropaLeague_2017/2018')
stadiums.append('Groupama Stadium')
cities.append('Lyon')
countries.append('France')

tournaments.append('EuropaLeague_2018/2019')
stadiums.append('Baku Olympic Stadium')
cities.append('Baku')
countries.append('Azerbaijan')

cols_name = ['Tournament', 'Stadium', 'City', 'Country']
cols = [tournaments, stadiums, cities, countries]
write_csv('finals_stadiums.csv', cols_name, cols)


Ejemplo n.º 33
0
                # Iterate through each covenant in a facility
                # If there is an invalid covenant then break and invalidate current facility
                for covenant in facility.covenants:
                    if not covenant.valid_loan(loan):
                        valid_facility = False
                        break

            # If facility is valid check if it is cheaper than the current facility
            # If None assign automatically
            if valid_facility:
                if cheapest_facility is None:
                    cheapest_facility = facility
                else:
                    if cheapest_facility.interest_rate > facility.interest_rate:
                        cheapest_facility = facility

    # Update the expected_yield and amount for the facility once a loan has been assigned
    # if the facility is not None
    if cheapest_facility is not None:
        cheapest_facility.update_expected_yield_and_amount(loan)
    facilities_loans_assignment[loan_id] = cheapest_facility.facility_id

facility_yields = {}
for bank_id, bank in banks.iteritems():
    for facility_id, facility in bank.facilities.iteritems():
        facility_yields[facility_id] = round(facility.expected_yield)

write_csv('assignment.csv', ['load_id', 'facility_id'],
          facilities_loans_assignment)
write_csv('yields.csv', ['facility_id', 'expected_yield'], facility_yields)