コード例 #1
0
ファイル: worker.py プロジェクト: wuyusheng1023/nh3
def main():
    while True:
        sleep(0.01)
        command_handler(r, meassys)
        status_setter(meassys, r)
        setting_handler(r)

        if next(GENS['local_pub']):
            dttm = get_utc_time()
            data = meassys.data

            if data is None:
                print(dttm, 'No data.')
                continue

            data, status, errors = data
            data = {
                'dttm': dttm,
                'data': data,
                'status': status,
                'errors': errors
            }
            print(dttm, data['data']['Status'], data['status'])

            json_data = json.dumps(data)
            push_to_redis(r, 'data', json_data)

            if next(GENS['local_record']):
                save_data(data)

            if meassys.status != 'Idle':
                if is_auto_zero():
                    meassys.cal_gas_zero_start()
                else:
                    meassys.cal_gas_zero_stop()
コード例 #2
0
def fetch_groups(gl):
    '''
    Download all the groups on the https://gitlab.gnome.org.
    '''

    start = time.time()
    print('Fetching groups.')

    # donot include the `Archive` group
    blacklist = [4001]

    groups = json.loads(
        requests.get('https://gitlab.gnome.org/api/v4/groups',
                     params={
                         'per_page': 100
                     }).text)
    save_data(groups, 'groups.json')
    print(f'Downloaded and saved {len(groups)} groups.')

    # create a list of group_ids for downloading the projects in the each group
    group_ids = []
    for group in groups:
        if group['id'] not in blacklist:
            group_ids.append(group['id'])

    finish = time.time()
    print(f'Took {round(finish-start, 2)} seconds.')

    return group_ids
コード例 #3
0
def fetch_projects(gl, group_ids):
    '''
    Download all the projects on the https://gitlab.gnome.org.
    '''

    start = time.time()
    print('Fetching projects.')

    # get the all the projects in each group
    projects = []
    for group_id in group_ids:
        group = gl.groups.get(id=group_id, lazy=True)
        group_projects = group.projects.list(all=True)
        projects += group_projects

    projects = [project.attributes for project in projects]

    save_data(projects, 'projects.json')
    print(f'Downloaded and saved {len(projects)} projects.')

    # create a list of project_ids for downloading the issues, merge_requests, commits in the each project
    project_ids = []
    for project in projects:
        project_ids.append(project['id'])

    finish = time.time()
    print(f'Took {round(finish-start, 2)} seconds.')

    return project_ids
コード例 #4
0
def clean_fixtures_data():
    """
    Cleans the fixtures data and filters it.
    """

    # load the fixtures data
    all_fixtures = load_data("fixtures.json", "data/original")

    # define headers that we need to keep
    headers = headers = [
        "event", "finished", "team_a", "team_a_difficulty", "team_h",
        "team_h_difficulty"
    ]

    # list to store the filtered information
    filtered_fixtures = []

    # iterate over all the teams and remove unwanted information
    for fixture in all_fixtures:
        if fixture["event"] is not None:
            # remove unwanted keys from the fixture's data
            fixture = {header: fixture[header] for header in headers}
            filtered_fixtures.append(fixture)

    # only retain the fixtures that are yet to take place
    filtered_fixtures = [
        fixture for fixture in filtered_fixtures
        if fixture['event'] >= next_event
    ]

    # save the data in a JSON file
    save_data(filtered_fixtures, "filtered_fixtures.json", "data")
コード例 #5
0
def get_weather_data(apikey, locs, cols, start_date, end_date, offset):
    """ Retrieves daily historical weather data for the specified locations
    using the Dark Sky API. Output is saved as a CSV in the 'data' folder.

    Args:
        api_key (str): Dark Sky API key.
        locs (str): Geocoded locations file name (with extension).
        cols (str): File name contain custom column names.
        start_date (datetime.datetime): Start date for historical data range.
        end_date (datetime.datetime): End date for historical data range.
        offset (int): Step size for iterator (number of days).

    """
    locs_path = get_datafile(locs)
    locs = pd.read_csv(locs_path)

    # get columns list
    columns = get_datafile(cols)
    with open(columns) as f:
        cols = [line.strip() for line in f]

    # extract data for each location for date range b/w start and end date
    tbl = []
    for index, row in locs.iterrows():
        for single_date in daterange(start_date, end_date, offset):
            forecast = forecastio.load_forecast(apikey,
                                                row['Lat'],
                                                row['Lng'],
                                                time=single_date,
                                                units='si')
            h = forecast.daily()
            tz = forecast.json['timezone']
            d = h.data
            for p in d:
                # get date info
                utc = p.d['time']
                dts = dt.datetime.utcfromtimestamp(utc)
                isodate = dt.datetime.utcfromtimestamp(utc).isoformat()
                date_info = [tz, isodate, dts.year, dts.month, dts.day]

                # get location info
                loc, lat, lng = row['Location'], row['Lat'], row['Lng']
                elevation = row['Elevation']
                loc_info = [loc, lat, lng, elevation]

                # get weather attributes - need to handle possible KeyErrors
                temp_high = p.d.get('temperatureHigh', None)
                temp_low = p.d.get('temperatureLow', None)
                humidity = p.d.get('humidity', None) * 100
                pressure = p.d.get('pressure', None)
                attr_info = [temp_high, temp_low, humidity, pressure]
                tbl.append(loc_info + date_info + attr_info)

    # convert output to data frame
    df = pd.DataFrame(tbl)
    df.columns = cols
    filename = 'historical_data.csv'
    save_data(df, filename, sep='|')
コード例 #6
0
    def save_twitter_filter_data(self, json_data):
        '''Saves the given json data into the filter directory'''
        time_folder = int(time.time() / 300) * 300

        filelocation = os.path.join(self.conf['save_data_location'],
                                "statuses/filter", str(time_folder),
                                str(int(time.time())) + "_" +
                                uuid.uuid4().hex +
                                ".json")
        save_data(json_data, filelocation)
コード例 #7
0
    def save_twitter_sample_data(self, json_data):
        '''Saves data from the sample API to json files'''
        time_folder = int(time.time() / 300) * 300

        filelocation = os.path.join(self.conf['save_data_location'],
                                "statuses/sample", str(time_folder),
                                str(int(time.time())) + "_" +
                                md5.md5(json.dumps(json_data)).hexdigest() +
                                ".json")
        save_data(json_data, filelocation)
コード例 #8
0
ファイル: scraper.py プロジェクト: springerBuck/fantasyAI
def download_fixtures_data():
    """
    Downloads the fixtures data.
    """

    # load the fixture specific data from the FPL RestAPI Endpoint
    fixtures = json.loads(
        requests.get('https://fantasy.premierleague.com/api/fixtures/').text)

    # save the data in a JSON file
    save_data(fixtures, 'fixtures.json', 'data/original')
コード例 #9
0
def process_awards():
    '''
    Process the awards for top 10 users.
    '''

    try:
        # awards = load_data('awards.json')
        awards = requests.get(
            'https://raw.githubusercontent.com/ravgeetdhillon/gnome-hackers/website/artifacts/data/awards.json'
        )
        awards = json.loads(awards.text)
    except:
        awards = []

    # sort the data for each criteria and save them in their respective json files
    criteria = ['days_1', 'days_7', 'days_15', 'days_30']
    for key in criteria:

        users = load_data('processed_users.json')
        users = sorted(users, key=lambda k: k['points'][key],
                       reverse=True)[:10]

        for user in users:
            for u in awards:
                if user['id'] == u['id']:
                    break
            else:
                awards.append({
                    'id': user['id'],
                    'awards': {
                        'gold': 0,
                        'silver': 0,
                        'bronze': 0,
                        'top10': 0,
                    }
                })

        for u in awards:
            for index, user in enumerate(users, start=1):
                if u['id'] == user['id']:

                    if index == 1:
                        u['awards']['gold'] += 1
                    elif index == 2:
                        u['awards']['silver'] += 1
                    elif index == 3:
                        u['awards']['bronze'] += 1
                    u['awards']['top10'] += 1

                    break

    save_data(awards, 'awards.json')
コード例 #10
0
ファイル: scraper.py プロジェクト: springerBuck/fantasyAI
def download_teams_data():
    """
    Downloads the teams data.
    """

    # load the team specific data from the FPL RestAPI Endpoint
    data = json.loads(
        requests.get(
            'https://fantasy.premierleague.com/api/bootstrap-static/').text)
    teams = data['teams']

    # save the data in a JSON file
    save_data(teams, 'teams.json', 'data/original')
コード例 #11
0
ファイル: scraper.py プロジェクト: springerBuck/fantasyAI
def download_gameweeks_data():
    """
    Downloads the gameweeks data.
    """

    # load the fixture specific data from the FPL RestAPI Endpoint
    data = json.loads(
        requests.get(
            'https://fantasy.premierleague.com/api/bootstrap-static/').text)
    gameweeks = data['events']

    # save the data in a JSON file
    save_data(gameweeks, 'gameweeks.json', 'data/original')
コード例 #12
0
ファイル: scraper.py プロジェクト: springerBuck/fantasyAI
def download_users_team_data(user_team_id, recent_gw_id):
    """
    Downloads the users team data.
    """

    # load the fixture specific data from the FPL RestAPI Endpoint
    user_team = json.loads(
        requests.get(
            f'https://fantasy.premierleague.com/api/entry/{user_team_id}/event/{recent_gw_id}/picks/'
        ).text)

    # save the data in a JSON file
    save_data(user_team, 'user_team.json', 'data/original')
コード例 #13
0
def fetch_users(gl):
    '''
    Download all the users on the https://gitlab.gnome.org.
    '''

    start = time.time()
    print('Fetching users.')

    users = gl.users.list(all=True)
    users = [user.attributes for user in users]
    save_data(users, 'users.json')
    print(f'Downloaded and saved {len(users)} users.')

    finish = time.time()
    print(f'Took {round(finish-start, 2)} seconds.')
コード例 #14
0
def add_awards():
    '''
    Add the processed awards to the processed users.
    '''

    awards = load_data('awards.json')
    users = load_data('processed_users.json')

    for user in users:
        for u in awards:
            if u['id'] == user['id']:
                user['awards'] = u['awards']
                break

    save_data(users, 'processed_users.json')

    return users
コード例 #15
0
ファイル: app.py プロジェクト: todokku/musica
def app():
    """
    Main function for the app.
    """

    # generate the credits
    drive_creds = create_crendentials(DRIVE_REFRESH_TOKEN, DRIVE_SCOPES)
    youtube_creds = create_crendentials(YOUTUBE_REFRESH_TOKEN, YOUTUBE_SCOPES)

    # get a list of files to select which song to upload
    files = get_files(creds=drive_creds)

    # check if there is any new music to upload
    if select_audio_image(files) is not None:

        # select an audio file and it's corresponding image
        audio, image = select_audio_image(files)

        audio_id = audio['id']
        audio_name = audio['name']
        image_id = image['id']
        image_name = image['name']

        # download the selected audio and image
        download_file(audio_id, audio_name, creds=drive_creds)
        download_file(image_id, image_name, creds=drive_creds)

        # convert the downloaded audio and image into a video
        conversion = convert_to_video(audio_name, image_name)
        print(conversion)
        
        # if conversion is successful, upload the video to Youtube
        if conversion is not False:
            response = upload_video(conversion, creds=youtube_creds)
            print(response)
            print('New track released.')

            # save the audio id in uploaded
            uploaded = load_data('uploaded.json')
            uploaded.append(audio_id)
            save_data(uploaded, 'uploaded.json')

    else:
        print('No new track to release.')
コード例 #16
0
ファイル: consolidate.py プロジェクト: legislativo-br/parser
def main():
    logging.info('Iniciando procedimento')
    logging.info(
        'Checando se o arquivo de dados consolidados por partido existe')

    if os.path.exists(constants.PARTIDO_FILE_PATH):
        logging.info('Arquivo existe')
    else:
        logging.info('Arquivo não existe')
        data = pd.DataFrame(parser.generate_party_data())
        logging.info('Salvando dados')
        save_data(df=data, filepath=constants.PARTIDO_FILE_PATH)

    logging.info(
        'Checando se o arquivo de dados consolidados por candidato existe')
    if os.path.exists(constants.CANDIDATO_FILE_PATH):
        logging.info('Arquivo existe')
    else:
        logging.info('Arquivo não existe')
        data = pd.DataFrame(parser.generate_candidate_data())
        logging.info('Salvando dados')
        save_data(df=data, filepath=constants.CANDIDATO_FILE_PATH)
コード例 #17
0
ファイル: scraper.py プロジェクト: springerBuck/fantasyAI
def download_players_data():
    """
    Downloads the players data.
    """

    # load the player specific data from the FPL RestAPI Endpoint
    data = json.loads(
        requests.get(
            'https://fantasy.premierleague.com/api/bootstrap-static/').text)
    players = data['elements']

    for player in players:
        player_id = player['id']
        print(f'Downloading data for player: {player_id}')
        data = json.loads(
            requests.get(
                f'https://fantasy.premierleague.com/api/element-summary/{player_id}/'
            ).text)
        player['history'] = data['history']

    # save the data in a JSON file
    save_data(players, 'players.json', 'data/original')
コード例 #18
0
def clean_teams_data():
    """
    Cleans the data for the teams.
    """

    # load the teams data
    all_teams = load_data("teams.json", "data/original")

    # define headers that we need to keep
    headers = ["id", "name"]

    # list to store the filtered information
    filtered_teams = []

    # iterate over all the teams and remove unwanted information
    for team in all_teams:

        # remove unwanted keys from the team's data
        team = {header: team[header] for header in headers}
        filtered_teams.append(team)

    # save the data in a JSON file
    save_data(filtered_teams, "filtered_teams.json", "data")
コード例 #19
0
def main():
    '''
    Main function for the process.py.
    '''

    # initialize the users array to store the data about the users contributing to the GNOME
    users = []

    # load the commits, merge requests and issues
    commits = load_data('commits.json')
    merge_requests = load_data('merge_requests.json')
    issues = load_data('issues.json')
    all_users = load_data('users.json')

    # process the commits, merge requests and issues and generate points for the users
    users = process_issues(users, issues)
    users = process_merge_requests(users, merge_requests)
    users = process_commits(users, commits)
    users = process_users(users, all_users)

    # download the avatar image from each user
    fetch_images(users)

    save_data(users, 'processed_users.json')
コード例 #20
0
    users = []

    # load the commits, merge requests and issues
    commits = load_data('commits.json')
    merge_requests = load_data('merge_requests.json')
    issues = load_data('issues.json')
    all_users = load_data('users.json')

    # process the commits, merge requests and issues and generate points for the users
    users = process_issues(users, issues)
    users = process_merge_requests(users, merge_requests)
    users = process_commits(users, commits)
    users = process_users(users, all_users)

    # download the avatar image from each user
    fetch_images(users)

    save_data(users, 'processed_users.json')


if __name__ == '__main__':
    main()
    process_awards()
    users = add_awards()

    # sort the data for each criteria and save them in their respective json files
    criteria = ['days_1', 'days_7', 'days_15', 'days_30']
    for key in criteria:
        users = sorted(users, key=lambda k: k['points'][key], reverse=True)
        save_data(users, f'sorted_users_acc_{key}.json')
コード例 #21
0
def fetch_projects_data(gl, project_ids):
    '''
    Download all the merge requests, issues and commits for each project on the https://gitlab.gnome.org.
    '''

    start = time.time()
    print('Fetching merge requests, issues and commits.')

    merge_requests = []
    issues = []
    commits = []

    for index, project_id in enumerate(project_ids):

        print(index, end=', ')
        project = gl.projects.get(id=project_id, lazy=True)

        since = get_date_30_days_now()

        try:
            project_merge_requests = project.mergerequests.list(
                all=True,
                query_parameters={
                    'state': 'all',
                    'created_after': since
                })
            merge_requests += project_merge_requests
        except Exception as e:
            print(f'{e}. Raised while getting merge requests.')

        try:
            project_issues = project.issues.list(
                all=True, query_parameters={'created_after': since})
            issues += project_issues
        except Exception as e:
            print(f'{e}. Raised while getting issues.')

        try:
            project_commits = project.commits.list(
                all=True, query_parameters={'since': since})
        except Exception as e:
            print(f'{e}. Raised while getting commits.')

        for commit in project_commits:
            commit = commit.attributes
            commit = gl.projects.get(id=commit['project_id'],
                                     lazy=True).commits.get(id=commit['id'])
            commits.append(commit)

    merge_requests = [
        merge_request.attributes for merge_request in merge_requests
    ]
    issues = [issue.attributes for issue in issues]
    commits = [commit.attributes for commit in commits]

    save_data(merge_requests, 'merge_requests.json')
    print(f'Downloaded and saved {len(merge_requests)} merge requests.')

    save_data(issues, 'issues.json')
    print(f'Downloaded and saved {len(issues)} issues.')

    save_data(commits, 'commits.json')
    print(f'Downloaded and saved {len(commits)} commits.')

    finish = time.time()
    print(f'Took {round(finish-start, 2)} seconds.')
コード例 #22
0
    geodata = 'geocoded_locations.txt'
    obs = int(sys.argv[1])

    # throw error if number of obs is 0 or less
    if obs < 1:
        raise ValueError('Number of observations cannot be less than 1')

    # instantiate new WeatherGenerator object
    weatherGenerator = WeatherGenerator(obs, start_date, end_date, histdata,
                                        geodata)

    # start timing
    start_time = timeit.default_timer()

    # generate random weather observations
    weatherGenerator.initialize_output()
    weatherGenerator.generate_position_data()
    weatherGenerator.generate_time_data()
    weatherGenerator.generate_weather_data()
    weatherGenerator.generate_condition_data()
    weatherGenerator.order_output()
    output = weatherGenerator.return_output()

    # save data frame to CSV in 'output' directory
    save_data(output, filename='generated_weather_data.csv', subdir='output')

    # stop timing and print elapsed time
    elapsed = str("{:.4f}".format(timeit.default_timer() - start_time))
    print('Generated ' + str(obs) + ' random weather observations in ' +
          elapsed + ' seconds')
コード例 #23
0
 def save_trend_data(self, json_data, woeid):
     '''Saves Twitter Trend data to a json file'''
     filelocation = os.path.join(self.conf['save_data_location'],
                             "trends/place/weoid_" + str(woeid) + "_" +
                             str(int(time.time())) + ".json")
     save_data(json_data, filelocation)
コード例 #24
0
ファイル: run.py プロジェクト: Erycite/neuromod
    for ckey, val in comb.items():
        keys = ckey.split('.')  # get list from dotted string
        replace(params, keys, val)

    # save parameters in the data_folder
    if not os.path.exists(opts.data_folder):
        os.makedirs(opts.data_folder)
    shutil.copy('./' + opts.param_file,
                opts.data_folder + '/' + str(comb) + '_' + opts.param_file)

    if not opts.analysis:
        Populations = h.build_network(sim, params)
        h.record_data(params, Populations)
        h.perform_injections(params, Populations)
        h.run_simulation(sim, params)
        h.save_data(Populations, opts.data_folder, str(comb))
        sim.end()

    #if opts.analysis:
    scores = h.analyse(params, opts.data_folder, str(comb), opts.remove)

    # map storage of scores
    if search and opts.map:
        if i == 0:
            with open(opts.data_folder + '/map.csv', "w",
                      newline="") as csvfile:
                fh = csv.writer(csvfile)
                fh.writerow([
                    '#' + testParams[1] + ':[' +
                    ",".join(map(str, search[testParams[1]])) + "]"
                ])
コード例 #25
0
def clean_players_data():
    """
    Cleans the data for all the players.
    """

    # load the players data
    all_players = load_data("players.json", "data/original")

    # define headers that we need to keep
    headers = [
        "first_name", "second_name", "minutes", "total_points",
        "points_per_game", "team", "element_type", "now_cost", "status", "id",
        "history"
    ]

    # list to store the filtered information
    filtered_players = []

    # only keep the required headers and remove the unwanted information
    for player in all_players:

        # remove unwanted keys from the player's data
        player = {header: player[header] for header in headers}

        # add player's full name
        player_name = f"{player['first_name']} {player['second_name']}"
        player["full_name"] = player_name.lower()

        # convert floats casted into a string to floats
        player["points_per_game"] = float(player["points_per_game"])

        # divide stats according to the season
        stats_headers = [
            "minutes", "total_points", "points_per_game", "now_cost", "history"
        ]

        for season in all_seasons:
            player_season_stats = {"season": season}
            for header in stats_headers:
                player_season_stats[header] = player[header]
                del player[header]

        player["seasons"] = [player_season_stats]

        # calculate the net points only, remove the playing points
        for season in player["seasons"]:
            player_gw_history = []
            for count, gw in enumerate(season["history"][::-1]):
                if count < 5:
                    if gw["minutes"] >= 60:
                        net_points = gw["total_points"] - 2
                    elif 0 < gw["minutes"] < 60:
                        net_points = gw["total_points"] - 1
                    else:
                        net_points = gw["total_points"]
                    player_gw_history.append(net_points)

        season["gw_history"] = player_gw_history
        del season["history"]

        filtered_players.append(player)

    # only retain the players who have played atleast one minute in the season
    filtered_players = [
        player for player in filtered_players
        if (player["seasons"][0]["minutes"] > 0
            and player["seasons"][0]["total_points"] > 0
            and len(player["seasons"][0]["gw_history"]) != 0)
    ]

    # save the data in a JSON file
    save_data(filtered_players, "filtered_players.json", "data")
コード例 #26
0
def simulate_mp(params):
    N, grid_size, exp, xmin, i = params
    model = ContactModel(N, grid_size, grid_size, exp, STEPS, xmin, seed=8)
    model.run(STEPS)
    save_data(model, N, i, exp, grid_size, xmin)
コード例 #27
0
            fer.append(1 - 0.1 * fixture["team_h_difficulty"])
        if len(fer) == 5:
            break

    team["fer"] = fer
    team["fer_points"] = np.mean(fer) * (1 - np.var(fer))
    avg_fer_points += team["fer_points"]

avg_fer_points /= len(teams)
for team in teams:
    team["fer_points"] = round(team["fer_points"] / avg_fer_points, 3)
    if team["fer_points"] > max_fer_points:
        max_fer_points = team["fer_points"]

teams = sorted(teams, key=lambda k: k["fer_points"], reverse=True)
save_data(teams, "teams_cleaned.json", "data")


# generate the desired data for each player for each season.
max_consistency = {}
for season in all_seasons:
    max_consistency[season] = 0


for player in players:
    # get player's position
    player["position"] = positions[player["element_type"]]
    player["value_points"] = 0

    # get player's team and next fixture difficulty rating
    for team in teams:
コード例 #28
0
ファイル: run.py プロジェクト: dguarino/SlowDyn
            os.makedirs(opts.data_folder+str(run))
        shutil.copy('./'+opts.param_file, opts.data_folder+ str(run)+'/'+opts.param_file+'_'+str(comb)+'.py')

        if not opts.analysis:
            already_computed = 0
            for pop in params['Populations'].keys():
                if os.path.exists(opts.data_folder + str(run) +'/'+pop+str(comb)+'.pkl'):
                    already_computed = already_computed + 1
            if already_computed > 0:
                print "already computed"
            else:
                Populations = h.build_network(sim,params)
                h.record_data(params, Populations)
                h.perform_injections(params, Populations)
                h.run_simulation(sim,params)
                h.save_data(Populations, opts.data_folder + str(run), str(comb))
                sim.end()
        else :
            if search:
                already_computed = 0
                for pop in params['Populations'].keys():
                    if os.path.exists(opts.data_folder + str(run) +'/'+pop+str(comb)+'.png'):
                        already_computed = already_computed + 1
                if already_computed > len(params['Populations']) - 1:
                    print "already analysed"
                else:
                    ratio,fqcy,psd,freq, fqcy_ratio = h.analyse(params, opts.data_folder + str(run), str(comb), removeDataFile)
                    print "ratio",ratio,"fqcy",fqcy,"psd",psd,"freq",freq
                    
                    gen = (pop for pop in params['Populations'].keys() if pop != 'ext')
                    for pop in gen:
コード例 #29
0
ファイル: run_closed.py プロジェクト: dguarino/SlowDyn
def run_simulation(run):

    info = {}
    # run combinations
    for i,comb in enumerate(combinations):
        print "param combination",i, "trial",run
        print "current set:",comb

        # replacement
        for ckey,val in comb.iteritems():
            keys = ckey.split('.') # get list from dotted string
            replace(params,keys,val)

        # save parameters in the data_folder
        if not os.path.exists(opts.data_folder+str(run)):
            os.makedirs(opts.data_folder+str(run))
        shutil.copy('./'+opts.param_file, opts.data_folder + str(run)+'/'+opts.param_file+'_'+str(comb)+'.py')

        if not opts.analysis: #run simulation
	    # run simulation if it hasn't already been ran for these parameters
            already_computed = 0
            for pop in params['Populations'].keys():
                if os.path.exists(opts.data_folder + str(run) +'/'+pop+str(comb)+'.pkl'):
                    already_computed = already_computed + 1
            if already_computed > 0:
                print "already computed"
            else:
                Populations = h.build_network(sim,params)
                h.record_data(params, Populations)
                h.perform_injections(params, Populations)
                print "Running Network"
                timer = Timer()
                timer.reset()
		inject_spikes = partial(inject_spikes_pop,Populations = Populations)
                sim.run(params['run_time'], [inject_spikes])
                simCPUtime = timer.elapsedTime()
                print "Simulation Time: %s" % str(simCPUtime)
                h.save_data(Populations, opts.data_folder + str(run), str(comb))
                sim.end()

        else : #do analysis
            if search: #then compute the csv needed for the search maps
                already_analysed = 0 #analyse only those that haven't already been analysed
                for pop in params['Populations'].keys():
                    if os.path.exists(opts.data_folder + str(run) +'/'+pop+str(comb)+'.png'):
                        already_analysed = already_analysed + 1
                if already_analysed >= len(params['Populations'])-1 :
                    print "already analysed"
                else:	
                    ratio,fqcy,psd,freq, fqcy_ratio = h.analyse(params, opts.data_folder + str(run), str(comb), opts.remove)
 		    #save computed values in csv files                   
                    gen = (pop for pop in params['Populations'].keys() if pop != 'ext')
                    for pop in gen:
                        if i == 0:
                            with open(opts.data_folder+ str(run)+'/map-'+pop+'.csv', 'wb') as csvfile:
                                mywriter = csv.writer(csvfile)
                                mywriter.writerow( ['#'+str(testParams[1])+ ':' +str(search[testParams[1]]) ] )
                                mywriter.writerow( ['#'+str(testParams[0])+ ':' +str(search[testParams[0]]) ] )

                            with open(opts.data_folder+ str(run)+'/psdmap-'+pop+'.csv', 'wb') as csvfile:
                                mywriter = csv.writer(csvfile)
                                mywriter.writerow( ['#'+str(testParams[1])+ ':' +str(search[testParams[1]]) ] )
                                mywriter.writerow( ['#'+str(testParams[0])+ ':' +str(search[testParams[0]]) ] )
                                if pop in freq:
                                    mywriter.writerow(freq[pop])
			    info[pop] = []

                        if pop in ratio and pop in fqcy:
                            info[pop].append([ratio[pop],fqcy[pop],fqcy_ratio[pop]])
                            if (i+1)%len(search[testParams[1]]) == 0:
                                with open(opts.data_folder+str(run)+'/map-'+pop+'.csv', 'a') as csvfile:
                                    mywriter = csv.writer(csvfile)
                                    mywriter.writerow(info[pop])
                                    info[pop] = []
                        if pop in psd:
                            with open(opts.data_folder+str(run)+'/psdmap-'+pop+'.csv', 'a') as csvfile:
                                mywriter = csv.writer(csvfile)
                                mywriter.writerow(psd[pop])

            else:
                h.analyse(params, opts.data_folder+str(run), str(comb), opts.remove)