コード例 #1
0
 def test_pagination(self, params, get_response):
     if params['page'] > 0:
         with allure.step(f'Получить списки названий регионов '):
             _first_page_items = get_data(page=1,
                                          page_size=15).json().get('items')
             first_regions_names = set(
                 [region.name for region in get_regions(_first_page_items)])
             other_page_items = get_response.json().get('items')
             other_regions_names = set(
                 [region.name for region in get_regions(other_page_items)])
         with allure.step(
                 f'Список регионов первой и {params["page"]} страницы отличны'
         ):
             assert not first_regions_names == other_regions_names
     else:
         with allure.step('Запрос не обработан сервером'):
             assert get_response.status_code == 500
コード例 #2
0
 def test_default_page_number(self, get_regions_list_class, page=1):
     with allure.step('Получить списки названий регионов'):
         default_regions_names = set(
             [region.name for region in get_regions_list_class])
         _response_page_items = get_data(page=page).json().get('items')
         response_regions_names = set(
             [region.name for region in get_regions(_response_page_items)])
     with allure.step(f'Номер страницы соответствует {page}'):
         assert default_regions_names == response_regions_names
コード例 #3
0
ファイル: server.py プロジェクト: InfoScienceLabs/Godot
def _updateRegions():
    global locations

    log.info("started updating regions")
    accounts = Accounty.select()
    for account in accounts:
        log.info(account.api_key)
        regions = get_regions(account.api_key)
        log.info(str(regions))
        locations[account.id] = regions
    log.info("finished updating regions")
コード例 #4
0
ファイル: plotting.py プロジェクト: WeilerP/us-election-tda
def get_region_plot(graph, data, layout, columns_to_color, node_elements,
                    colorscale):
    '''Function to generate a figure of the mapper graph colored by identified
    regions
    
    Parameters
    ----------
    graph : igraph object
        Mapper graph
    data : ndarray (n_samples x n_dim)
        Data used for mapper
    layout : igraph.layout.Layout
        Layout of graph
    columns_to_color : list
        List of columns to color by
    node_elements : tuple
        Tuple of arrays where array at positin x contains the data points for
        node x
    colorscale : list
        List of colors to use for each region

    Returns
    -------
    fig : igraph object
    '''

    regions = utils.get_regions()

    # set node color:
    # 1. assign to each node of a region its color (zip())
    # 2. convert zip elements to list (map())
    # 3. flatten list (itertools.chain())
    # 4. sort values by keys
    # 5. convert to ordered dictionary
    # 6. extract values and convert to list
    node_color = list(
        collections.OrderedDict(
            sorted(itertools.chain(
                *map(list,
                     [zip(regions[region],
                          itertools.repeat(colorscale[region]))
                      for region in range(len(regions))])))).values())

    # set plotly arguments:
    # 1. set uniform node size
    # 2. hide scale of marker color
    plotly_kwargs = {
        'node_trace_marker_size': [1] * len(node_elements),
        'node_trace_marker_showscale': False}

    return visualization.create_network_2d(graph, data, layout, node_color,
                                           columns_to_color=columns_to_color,
                                           plotly_kwargs=plotly_kwargs)
コード例 #5
0
ファイル: repair-floating-ips.py プロジェクト: canarie/dair
CHAINS = [PREROUTING, OUTPUT, SNAT]

logger = logging.getLogger('repair-floating-ips')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('/var/log/dair/repair-floating-ips.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)

region_rc_filename = sys.argv[1]
region = utils.get_regions(region_rc_filename)[0]

url_parsed = urlparse(region.url)
novaRegion=boto.ec2.regioninfo.RegionInfo(name = "nova", endpoint = url_parsed.hostname)
conn = boto.connect_ec2(aws_access_key_id=region.access_key,
                        aws_secret_access_key=region.secret_access_key,
                        is_secure=True,
                        region=novaRegion,
                        port=url_parsed.port,
                        path=url_parsed.path)

addresses = conn.get_all_addresses()
used_floating_ips = set()

for address in addresses:
    if not address.instance_id.startswith('None'):
コード例 #6
0
def get_regions_list_class(get_response_class):
    yield get_regions(get_response_class.json().get('items'))
コード例 #7
0
def get_regions_list(get_response):
    yield get_regions(get_response.json().get('items'))
コード例 #8
0
ファイル: main.py プロジェクト: davelaser/sandbox
			
		path = os.path.join(os.path.dirname(__file__),'templates/version3/includes/guardian-data.html')
		self.response.out.write(template.render(path, global_mashup))
	else:
		
		memcacheKey = str(destination)+":"+str(price)+":"+str(startDate.date().isoformat())+":"+str(endDate.date().isoformat())
		memcachedHotels = memcache.get(key=memcacheKey, namespace='lastminute')
		logging.info("Looking up MEMCACHE for : "+memcacheKey)
		logging.info(memcachedHotels)
		if memcachedHotels is not None:
			global_mashup['hotels'] = memcachedHotels
			path = os.path.join(os.path.dirname(__file__),'templates/version3/includes/'+info_type+'.html')
			self.response.out.write(template.render(path, global_mashup))
			logging.info("AjaxAPIHandler_v3() : Retrieving Hotels from MEMCACHE for destination "+str(destination))
		else:
			regions = utils.get_regions()
			if destination in regions:
				hotelsData = datastore.get_hotels_by_region(destination, price)
				logging.info(hotelsData)
				if hotelsData is None:
					logging.info("No results for "+str(destination))
					path = os.path.join(os.path.dirname(__file__),'templates/version3/includes/no-results.html')
					self.response.out.write(template.render(path, global_mashup))
					return
				logging.info("Setting hotels to MEMCACHE with key : "+memcacheKey)
				memcache.set(key=memcacheKey, value=hotelsData, time=6000, namespace='lastminute')
				global_mashup['region'] = destination
				
			else:
				# [ST]TODO: Reinstate arguments: rating
			    hotelsData = datastore.get_hotels(destination, price, startDate, endDate, None)
コード例 #9
0
import os
import csv

from utils import paths, clean, get_regions

regions, rev_regions = get_regions()

os.system("unzip data/datagov.zip -d data")

for category, category_paths in paths.iteritems():
    for path, sti in category_paths:
        path = "data/datagov/" + category + "/" + path

        data = []
        with open(path, "rb") as csvfile:
            reader = csv.reader(csvfile, delimiter=",", quotechar="\"")
            for row in reader:
                data.append([clean(field) for field in row])

        with open(path, "wb") as csvfile:
            writer = csv.writer(csvfile,
                                delimiter=",",
                                quotechar="\"",
                                quoting=csv.QUOTE_NONNUMERIC)

            for row in data:
                writer.writerow(row)
コード例 #10
0
def main():
    # Loads and manages the input arguments
    args = arg_parser()

    # Config file
    if args.config_file is not None:
        config_file = args.config_file
    else:
        config_file = "config.ini"

    if args.start_day is None or args.end_day is None:
        # Setup default temporal period in which to work
        # TODO: Print on logger we are using default dates
        start = utils.get_yesterday_midnight_datetime()
        end = utils.get_today_midnight_datetime()
    else:
        # Setup user temporal period in which to work
        # TODO: Print on logger we are using user dates
        start = utils.get_datetime_from_args(args.start_day)
        end = utils.get_datetime_from_args(args.end_day)
    start_timestamp = utils.get_timestamp(start)
    end_timestamp = utils.get_timestamp(end)

    # Read config file
    if not os.path.isfile(config_file):
        print("Configuration file not found: {}").format(config_file)
        sys.exit(-1)
    try:
        config = ConfigParser()
        config.optionxform = str
        config.read(config_file)
    except Exception as e:
        print("Problem parsing config file: {}").format(e)
        sys.exit(-1)

    # Read main config file
    mainconfig_file = config.get("mainconfig", "path")
    if not os.path.isfile(mainconfig_file):
        print("Main configuration file not found: {}").format(mainconfig_file)
        sys.exit(-1)
    try:
        main_config = ConfigParser()
        # Preserve case when reading configfile
        main_config.optionxform = str
        main_config.read(mainconfig_file)
    except Exception as e:
        print("Problem parsing main config file: {}").format(e)
        sys.exit(-1)

    # Get excluded regions, if any
    excluded_regions = json.loads(config.get("regionexclude","regions"))

    # Get included services, if any
    included_services = json.loads(config.get("servicesinclude","services"))

    # Setup monasca collector
    CONF_M_SECTION = 'monasca'
    keystone_endpoint = config.get('keystone','uri')
    monasca_endpoint = config.get('monasca','uri')
    user = config.get('profile','user')
    password = config.get('profile','password')
    collector = CollectorMonasca(user, password, monasca_endpoint, keystone_endpoint)

    # Setup mysql persister
    persister = PersisterMysql(config, start, end)

    # Get regions to work on
    if args.region_id is None:
        regions = utils.get_regions(main_config)
    else:
        regions = [args.region_id]

    # Loop each region
    for region in regions:
        # Skip excluded regions
        if region in excluded_regions:
            continue

        # Retrieve sanity checks aggregation
        day_agg = model.Aggregation('d', 86400, 'avg')
        sanities_data = collector.get_sanities_avg(region,day_agg.period, start_timestamp, end_timestamp)
        # Adapt and persist daily average of sanity checks aggregation into hourly base
        sanities = []
        for sanity_data in sanities_data:
            sanity = model_adapter.from_monasca_sanity_to_sanity(sanity_data, day_agg)
            sanities.append(sanity)
        persister.persist_sanity(sanities)

        # Retrieve processes aggregation
        hour_agg = model.Aggregation('h', 3600, 'avg')
        services_processes = collector.get_services_processes_avg(
            region, hour_agg.period, start_timestamp, end_timestamp, included_services)

        # Calculate and map processes aggregation
        processes = []
        for service in services_processes:
            for process_name in services_processes[service].keys():
                process_values = services_processes[service][process_name]
                process = model_adapter.from_monasca_process_to_process(process_values, hour_agg)
                processes.append(process)

        # Adapt and persist processes aggregation
        persister.persist_process(processes)

    # Calculate and persist host_service daily aggregation
    persister.persist_host_service_daily_avg(start, end)

    # Calculate and persist host_service daily aggregation
    if start.month != end.month:
        s_month, e_month = utils.get_range_for_daily_agg(start, end)
        persister.persist_host_service_monthly_avg(s_month, e_month)
コード例 #11
0
ファイル: plotting.py プロジェクト: WeilerP/us-election-tda
def get_county_plot_by_region(data, colorscale, node_elements, fips):
    '''Function to create figure of US with counties colored by region they
    belong to

    Parameters
    ----------
    data : ndarray (n_samples x n_dim)
        Data used for mapper
    colorscale : list
        List with colors to color counties by
    node_elements : tuple
        Tuple of arrays where array at positin x contains the data points for
        node x
    fips : list
        List of Federal Information Processing Standard (FIPS) county codes

    Returns
    -------
    fig: plotly figure object
    '''

    # convert colorscale from hex format to rgb
    colorscale = dict(zip(map(str, range(len(colorscale))),
                          utils.hex2rgb(colorscale.values())))
    # define color of counties belonging to two regions as their mean rgb value
    colorscale['1-3'] = utils.mean_rgb([colorscale['1'],
                                        colorscale['3']])
    colorscale['2-3'] = utils.mean_rgb([colorscale['2'],
                                        colorscale['3']])
    colorscale['3-4'] = utils.mean_rgb([colorscale['3'],
                                        colorscale['4']])
    colorscale['4-5'] = utils.mean_rgb([colorscale['4'],
                                        colorscale['5']])

    elements_per_region = utils.get_data_per_region(utils.get_regions(),
                                                    node_elements)

    # assign each county its color
    county_color = np.zeros(data.shape[0], dtype='int')
    # region 0
    county_color[list(elements_per_region[0])] = 0
    # region 1 and not 3
    county_color[list(elements_per_region[1]
                      .difference(elements_per_region[3]))] = 1
    # region 2 and not 3
    county_color[list(elements_per_region[2]
                      .difference(elements_per_region[3]))] = 2
    # region 3 and neither 1 nor 2
    county_color[list(elements_per_region[3]
                      .difference(elements_per_region[1])
                      .difference(elements_per_region[2]))] = 3
    # region 4 but not 3
    county_color[list(elements_per_region[4]
                      .difference(elements_per_region[3]))] = 4
    # region 5 but not 4
    county_color[list(elements_per_region[5]
                      .difference(elements_per_region[4]))] = 5

    # region 1 and 3
    county_color[list(elements_per_region[1]
                      .intersection(elements_per_region[3]))] = 6
    # region 2 and 3
    county_color[list(elements_per_region[2]
                      .intersection(elements_per_region[3]))] = 7
    # region 3 and 4
    county_color[list(elements_per_region[3]
                      .intersection(elements_per_region[4]))] = 8
    # region 4 and 5
    county_color[list(elements_per_region[4]
                      .intersection(elements_per_region[5]))] = 9

    county_color = county_color.tolist()

    return get_county_plot(
        fips=fips, values=county_color,
        colorscale=[f'rgb{rgb}' for rgb in list(colorscale.values())])
コード例 #12
0
def main():
    # or call logging.basicConfig()
    logging.info('Starting logger for monitoringHisto')
    LOG = logging.getLogger(__name__)

    # Loads and manages the input arguments
    args = arg_parser()

    # Config file
    if args.config_file is not None:
        config_file = args.config_file
    else:
        config_file = "config.ini"

    if args.start_day is None or args.end_day is None:
        # Setup default temporal period in which to work
        # TODO: Print on logger we are using default dates
        start = utils.get_yesterday_midnight_datetime()
        end = utils.get_today_midnight_datetime()
    else:
        # Setup user temporal period in which to work
        # TODO: Print on logger we are using user dates
        start = utils.get_datetime_from_args(args.start_day)
        end = utils.get_datetime_from_args(args.end_day)
    start_timestamp = utils.get_timestamp(start)
    end_timestamp = utils.get_timestamp(end)

    # Read config file
    if not os.path.isfile(config_file):
        print("Configuration file not found: {}").format(config_file)
        sys.exit(-1)
    try:
        config = ConfigParser()
        config.optionxform = str
        config.read(config_file)
    except Exception as e:
        print("Problem parsing config file: {}").format(e)
        sys.exit(-1)

    # Read main config file
    mainconfig_file = config.get("mainconfig", "path")
    if not os.path.isfile(mainconfig_file):
        print("Main configuration file not found: {}").format(mainconfig_file)
        sys.exit(-1)
    try:
        main_config = ConfigParser()
        # Preserve case when reading configfile
        main_config.optionxform = str
        main_config.read(mainconfig_file)
    except Exception as e:
        print("Problem parsing main config file: {}").format(e)
        sys.exit(-1)
    # Set logger level, if any
    l_level = config.get("logger", "log_level")
    if l_level:
        LOG.setLevel(l_level)
    LOG.debug("Prova")

    # Get excluded regions, if any
    excluded_regions = json.loads(config.get("regionexclude", "regions"))

    # Get included services, if any
    included_services = json.loads(config.get("servicesinclude", "services"))

    # Setup monasca collector
    CONF_M_SECTION = 'monasca'
    keystone_endpoint = config.get('keystone', 'uri')
    monasca_endpoint = config.get('monasca', 'uri')
    user = config.get('profile', 'user')
    password = config.get('profile', 'password')
    collector = CollectorMonasca(
        user, password, monasca_endpoint, keystone_endpoint)

    # Setup mysql persister
    persister = PersisterMysql(config, start, end)

    # Get regions to work on
    if args.region_id is None:
        regions = utils.get_regions(main_config)
    else:
        regions = [args.region_id]

    # Loop each region
    for region in regions:
        # Skip excluded regions
        if region in excluded_regions:
            continue

        # Retrieve sanity checks aggregation
        day_agg = model.Aggregation('d', 86400, 'avg')
        sanities_data = collector.get_sanities_avg(
            region, day_agg.period, start_timestamp, end_timestamp)
        if sanities_data is not None:
            # Adapt and persist daily average of sanity checks aggregation into
            # hourly base
            sanities = []
            for sanity_data in sanities_data:
                sanity = model_adapter.from_monasca_sanity_to_sanity(
                    sanity_data, day_agg)
                sanities.append(sanity)
            persister.persist_sanity(sanities)
        else:
            # No sanities data available on the specified range, rise a warning
            LOG.warning("No sanities data available on the specified range "
                        "for region: %s", region)
        # Retrieve processes aggregation
        hour_agg = model.Aggregation('h', 3600, 'avg')
        services_processes = collector.get_services_processes_avg(
            region, hour_agg.period, start_timestamp,
            end_timestamp, included_services)

        # Calculate and map processes aggregation
        processes = []
        for service in services_processes:
            if services_processes[service] is None:
                LOG.warning("No services process data available on the "
                            "specified range for region: %s and service: %s",
                            region, service)
                continue
            for process_name in services_processes[service].keys():
                process_values = services_processes[service][process_name]
                process = model_adapter.from_monasca_process_to_process(
                    process_values, hour_agg)
                processes.append(process)

        # Adapt and persist processes aggregation
        persister.persist_process(processes)

    # Calculate and persist host_service daily aggregation
    persister.persist_host_service_daily_avg(start, end)

    # Calculate and persist host_service monthly aggregation
    if start.month != end.month:
        s_month, e_month = utils.get_range_for_daily_agg(start, end)
        persister.persist_host_service_monthly_avg(s_month, e_month)
コード例 #13
0
ファイル: application.py プロジェクト: rajkorde/covidtrends
from utils import read_data, get_data, get_regions
from charts import get_confirmed_trend, get_recovered_trend, get_death_trend
from update import update_data
from constants import DATA_SOURCE, CODE_SOURCE

external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']

covidtrends_flask = flask.Flask(__name__)
app = dash.Dash(__name__,
                server=covidtrends_flask,
                external_stylesheets=external_stylesheets)

# Data processing
update_data()
confirmed_df, recovered_df, death_df = read_data()
regions = get_regions(confirmed_df)

# data_text = f'Data Source: <a href="{DATA_SOURCE}">Johns Hopkins CSEE</a>'
# code_text = f'Code Source: <a href="{CODE_SOURCE}">Github</a>'

footer_text_md = dcc.Markdown(f"""
    Data Source: [Johns Hopkins CSEE]({DATA_SOURCE})
    Code Source: [Github]({CODE_SOURCE})
    """)

dropdown_layout = html.Div([
    html.Div([
        html.B('Country'),
        dcc.Dropdown(id='countries-dd',
                     options=[{
                         'label': k,
コード例 #14
0
ファイル: repair-floating-ips.py プロジェクト: canarie/dair
logger = logging.getLogger('repair-floating-ips')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('/var/log/dair/repair-floating-ips.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)

region_rc_filename = sys.argv[1]
region = utils.get_regions(region_rc_filename)[0]

url_parsed = urlparse(region.url)
novaRegion = boto.ec2.regioninfo.RegionInfo(name="nova",
                                            endpoint=url_parsed.hostname)
conn = boto.connect_ec2(aws_access_key_id=region.access_key,
                        aws_secret_access_key=region.secret_access_key,
                        is_secure=True,
                        region=novaRegion,
                        port=url_parsed.port,
                        path=url_parsed.path)

addresses = conn.get_all_addresses()
used_floating_ips = set()

for address in addresses:
コード例 #15
0
def get_region_plot(pipe, data, layout, node_elements, colorscale):
    '''Function to generate a figure of the mapper graph colored by identified
    regions
    
    Parameters
    ----------
    pipe : MapperPipeline
        The Mapper pipeline to compute the mapper-graph
    data : ndarray (n_samples x n_dim)
        Data used for mapper
    layout : igraph.layout.Layout
        Layout of graph
    node_elements : tuple
        Tuple of arrays where array at positin x contains the data points for
        node x
    colorscale : list
        List of colors to use for each region

    Returns
    -------
    fig : igraph object
    '''

    regions = utils.get_regions()

    # set node color:
    # 1. assign to each node of a region its color (zip())
    # 2. convert zip elements to list (map())
    # 3. flatten list (itertools.chain())
    # 4. sort values by keys
    # 5. convert to ordered dictionary
    # 6. extract values and convert to list
    node_color = np.array(
        list(
            collections.OrderedDict(
                sorted(
                    itertools.chain(*map(list, [
                        zip(regions[region],
                            itertools.repeat(colorscale[region]))
                        for region in range(len(regions))
                    ])))).values()))
    # set plotly arguments:
    # 1. set uniform node size
    # 2. hide scale of marker color
    plotly_kwargs = {
        'node_trace_marker_size': [1] * len(node_elements),
        'node_trace_marker_showscale': False,
        'node_trace_hoverlabel': node_color,
        'node_trace_marker_color': node_color
    }

    fig = plot_static_mapper_graph(pipe,
                                   data,
                                   layout,
                                   layout_dim=2,
                                   color_by_columns_dropdown=False,
                                   plotly_kwargs=plotly_kwargs)
    # update colors to fig
    fig._data[1]['marker'][
        'color'] = node_color  # hack around with the new api
    return fig