Ejemplo n.º 1
0
def eu4(coord: GeoCoord) -> MapCoord:
    lat, lon = coord.lat, coord.lon
    # the eu4 map is mostly miller, but:
    # (1) the poles are trimmed
    if not radians(-56) < lat < radians(72):
        return 1, 1
    # (2) east siberia is stretched
    if radians(50) < lat and radians(154) < lon:
        lat += (lon - radians(154)) / 3
    # (3) australia is shunken and moved northward
    if lat < radians(-11) and radians(112) < lon < radians(154):
        lat = remap(lat, radians(-39), radians(-11), radians(-35),
                    radians(-11))
        lon = remap(lon, radians(112), radians(154), radians(116),
                    radians(151))
    # (4) new zealand is moved northward
    if lat < radians(-33) and radians(165) < lon:
        lat += radians(8)
    # (5) greenland and iceland and jan mayen are moved northward
    if radians(-57) < lon < radians(-8) and radians(59) < lat:
        lat += radians(5)
    # (6) the americas are moved northward
    elif lon < radians(-34):
        lat += radians(13)
        # (7) in addition, the bottom of south america is squished
        if lat < radians(-31):
            lat = remap(lat, radians(-45), radians(-31), radians(-37),
                        radians(-31))
    y, x = miller(clamp(lat, -pi / 2, pi / 2), lon)
    y = remap(y, -28 / 90, 61 / 90, -1, 1)
    return MapCoord(x, y)
Ejemplo n.º 2
0
def makeHeighmap(path, name, size, points, heights, tile):
    # bail if it doesnt look right
    total_samples = len(points)
    if total_samples != len(heights):
        print("Lengths don't match")
        return

    # convert mercator to pixels and map pixels to height values
    # bbox = getTileMercatorBoundingBox(tile[0], tile[1], tile[2])
    bbox = getBoundingBox(points)

    point_heights = {}
    for i in range(total_samples):
        x = int(remap(points[i][0], bbox[0], bbox[1], 0, size - 1))
        y = int(remap(points[i][1], bbox[2], bbox[3], size - 1, 0))
        point_heights[(x, y)] = heights[i]

    # subdivision from opencv, can do voronoi and its dual the delaunay triangulation
    subdiv = cv2.Subdiv2D((0, 0, size, size))
    for p in point_heights.iterkeys():
        subdiv.insert(p)
    (facets, centers) = subdiv.getVoronoiFacetList([])

    # an image where we will rasterize the voronoi cells
    image = numpy.zeros((size, size, 3), dtype = 'uint8')
    for i in xrange(0, len(facets)):
        ifacet_arr = []
        for f in facets[i]:
            ifacet_arr.append(f)
        ifacet = numpy.array(ifacet_arr, numpy.int)
        # the color is the height at the voronoi cite for this cell, offset to bring to unsigned 16bits
        height = point_heights[(centers[i][0], centers[i][1])] + 32768
        # to back them into a standard texture we split the high and low order bytes, note the order is G B R
        color = (int(math.floor(height % 255)), int(math.floor(height / 255) % 255), 0)
        # we exploit the fact that voronoi cells are convex polygons for faster rasterization
        cv2.fillConvexPoly(image, ifacet, color, cv2.CV_AA, 0)

    # we'll keep the result here
    cv2.imwrite(path + '/' + name + '.png', image)
Ejemplo n.º 3
0
def victoria2(coord: GeoCoord) -> MapCoord:
    lat, lon = coord.lat, coord.lon
    # the victoria 2 map is identical to the eu4 map, except AUS and south america are left unchanged.
    # (1) the poles are trimmed
    if not radians(-56) < lat < radians(72):
        return 1, 1
    # (2) east siberia is stretched
    if radians(50) < lat and radians(154) < lon:
        lat += (lon - radians(154)) / 3
    # (3) new zealand is moved northward, but less than eu4
    if lat < radians(-33) and radians(165) < lon:
        lat += radians(4)
    # (4) greenland and iceland and jan mayen are moved northward
    if radians(-57) < lon < radians(-8) and radians(59) < lat:
        lat += radians(5)
    # (5) the americas are moved northward
    elif lon < radians(-34):
        lat += radians(13)
    y, x = miller(clamp(lat, -pi / 2, pi / 2), lon)
    y = remap(y, -32 / 90, 61 / 90, -1, 1)
    return MapCoord(x, y)
def main(args):
    print('')
    print('working on: {}'.format(args.graph))

    graph = common.load_graph(args.graph)
    print('graph ({},{})'.format(len(graph.nodes), len(graph.edges)))

    if args.remap:
        print('')
        print('remapping:')
        graph, mapping = common.remap(graph)
        for k, v in mapping.items():
            print('  {} -> {}'.format(v, k))

    print('rounds: {}'.format(args.rounds))
    print('steps: {}'.format(args.steps))
    print('shots: {}'.format(args.shots))

    beta_vals = [
        value for value in common.frange(
            0.0, args.sample_range_scale * pi, args.steps, include_start=False)
    ]
    gamma_vals = [
        value for value in common.frange(0.0,
                                         args.sample_range_scale * 2.0 * pi,
                                         args.steps,
                                         include_start=False)
    ]

    print('beta: {}'.format(beta_vals))
    print('gamma: {}'.format(gamma_vals))

    names = []
    values = {}

    for r in range(args.rounds):
        bn = beta_template.format(r)
        names.append(bn)
        values[bn] = beta_vals

        gn = gamma_template.format(r)
        names.append(gn)
        values[gn] = gamma_vals

    best_config = None
    best_config_value = 0

    for config in common.dfs(names, values, {}):
        #print(config)

        num_bits = graph.max_node

        qp = QuantumProgram()
        qr = qp.create_quantum_register('qr', num_bits)
        cr = qp.create_classical_register('cr', num_bits)
        qc = qp.create_circuit('qaoa', [qr], [cr])

        for i in range(num_bits):
            qc.h(qr[i])

        for r in range(args.rounds):
            beta = config[beta_template.format(r)]
            gamma = config[gamma_template.format(r)]

            for i in range(num_bits):
                qc.u3(2 * beta, -pi / 2, pi / 2, qr[i])

            for e in graph.edges:
                qc.x(qr[e.fr])
                qc.u1(-gamma / 2.0, qr[e.fr])
                qc.x(qr[e.fr])
                qc.u1(-gamma / 2.0, qr[e.fr])
                qc.cx(qr[e.fr], qr[e.to])
                qc.x(qr[e.to])
                qc.u1(gamma / 2.0, qr[e.to])
                qc.x(qr[e.to])
                qc.u1(-gamma / 2.0, qr[e.to])
                qc.cx(qr[e.fr], qr[e.to])

        qc.measure(qr, cr)

        result = qp.execute(['qaoa'],
                            backend='local_qasm_simulator',
                            shots=args.shots)

        # Show the results
        #print(result)
        data = result.get_data('qaoa')
        #print(data['counts'])
        ec = common.expected_cut(graph, data['counts'])
        #print(ec)
        #print(result.get_ran_qasm('qaoa'))

        if ec > best_config_value:
            best_config = config
            best_config_value = ec

            print('')
            print('new best: {}'.format(best_config))
            print('expected cut: {}'.format(best_config_value))
            print('counts: {}'.format(data['counts']))
        else:
            sys.stdout.write('.')
            sys.stdout.flush()

        #print(nodes)
        #print(edges)

        # print_err('loading: {}'.format(args.sample_data))
        # with open(args.sample_data) as file:
        #     data = json.load(file)

        # for solution_data in data['solutions']:
        #     row = [solution_data['num_occurrences']] + solution_data['solution']
        #     print(', '.join([str(x) for x in row]))

    json_config = {
        'steps': args.steps,
        'expected_cut': best_config_value,
        'rounds': []
    }
    rounds = json_config['rounds']
    for r in range(args.rounds):
        beta = config[beta_template.format(r)]
        gamma = config[gamma_template.format(r)]
        rounds.append({'beta': beta, 'gamma': gamma})

    config_file = args.graph.replace('.qx',
                                     '_config_{:02d}.json'.format(args.rounds))
    print('write: {}'.format(config_file))
    with open(config_file, 'w') as file:
        file.write(json.dumps(json_config, **common.json_dumps_kwargs))
Ejemplo n.º 5
0
def contingency_table(wave):

    waveletter = chr(96+wave) # 1 -> "a" etc
    data = pd.read_csv(data_root_dir / ("UKDA-6614-tab/tab/ukhls_w" + str(wave)) / (waveletter + '_hhresp.tab'), sep = '\t')
    #data = pd.read_csv(data_root_dir / (waveletter+'_hhresp.tab'), sep ='\t')
    # hhsamp = pd.read_csv(data_root_dir / (waveletter+'_hhsamp.tab'), sep ='\t')

    # need to remove cases with one or more missing rooms/beds values *before* aggregating rooms
    data = data[(data[waveletter+'_hsrooms'] > 0) & (data[waveletter+'_hsbeds'] >= 0)]
    assert len(data[(data[waveletter+'_hsrooms'] > 0) & (data[waveletter+'_hsbeds'] < 0)]) == 0
    assert len(data[(data[waveletter+'_hsrooms'] < 1) & (data[waveletter+'_hsbeds'] >= 0)]) == 0

    # Rooms excl. bedrooms -> to rooms incl. beds, i.e. total 
    data[waveletter+'_hsrooms'] = data[waveletter+'_hsrooms'] + data[waveletter+'_hsbeds']
    # Census automatically turns 0 beds into 1 bed (do this without impacting total)
    data[waveletter+'_hsbeds'] = np.maximum(data[waveletter+'_hsbeds'], 1)

    # mapping to census category values
    tenure_map = { 1: 0, # 2 (owned) in census
               2: 1, # 3 (mortgaged) in census
               3: 2, 4: 2, # 5 (rented social) in census
               5: 3, 6: 3, 7: 3 # 6 (rented private) in census
             }
    data = remap(data, waveletter+'_tenure_dv', tenure_map)
    # constrain within range then shift
    data = constrain(data, waveletter+'_hsrooms', 1, 6, shift=-1)
    data = constrain(data, waveletter+'_hsbeds', 1, 4, shift=-1)
    data = constrain(data, waveletter+'_hhsize', 1, 4, shift=-1)

    hhtype_map = {
        1: 0, 2: 0, 3: 0, # single occ
        4: 3, 5: 3, # single parent
        6: 1, 8: 1, 10: 1, 11: 1, 12: 1, 19: 1, 20: 1, 21: 1, # couples
        16: 4, 17:4, 18: 4, 22: 4, 23: 4 # mixed
    }
    data = remap(data, waveletter+'_hhtype_dv', hhtype_map)

    # """ randomly assigning couples to married or cohabiting couples """
    # couples = data.index[data[waveletter+'_hhtype_dv'] == 1].tolist()
    # np.random.seed(9238456) # set seed to always get the same "random" numbers
    # to_change = np.random.choice(couples, size = round(0.25*len(couples)), replace=False)
    # data.loc[to_change, waveletter+'_hhtype_dv'] = 2

    # check whether couples are married or cohabiting
    marital_data = pd.read_csv(data_root_dir / ("UKDA-6614-tab/tab/ukhls_w" + str(wave)) / (waveletter + '_indall.tab'), sep = '\t')[['pidp', waveletter+'_mastat_dv']]
    couples = data.loc[data[waveletter+'_hhtype_dv'] == 1, [waveletter+'_hhtype_dv', waveletter+'_hidp', waveletter+'_hrpid']]
    couples = couples.merge(marital_data, how='left', left_on=waveletter+'_hrpid', right_on='pidp').set_index(couples.index)
    to_change = couples.index[couples[waveletter+'_mastat_dv']==10.0].to_list()
    data.loc[to_change, waveletter+'_hhtype_dv'] = 2    

    #data[waveletter+'_tenure_dv'].replace(tenure_map, inplace=True)

    a = data[waveletter+'_hhtype_dv']
    b = data[waveletter+'_tenure_dv']
    c = data[waveletter+'_hsrooms']
    d = data[waveletter+'_hhsize']
    e = data[waveletter+'_hsbeds']
    
    # f = hhsamp[waveletter+'_dweltyp']

    ctab = pd.crosstab(a, [b, c, d, e]) #add ',f' after 'e' too include dwelling 
    """ indexing requires unstacking """
    # unstack returns a multiindex Series not a dataframe
    # so construct a dataframe and make the multiindex into columns so we can filter
    ctab_us = pd.DataFrame({"frequency": ctab.unstack()}).reset_index()

    """ rename columns so they are consistent between files """
    ctab_us.columns = ['tenure', 'rooms', 'occupants', 'bedrooms', 'hhtype', 'frequency'] # add 'dwelling' after size if included in data
    return ctab_us
Ejemplo n.º 6
0
def census_map(data, var_name, wave):
    """ map survey data to census"""

    datadir = Path("data/UKDA-6614-tab/tab/ukhls_w%d" % wave)

    waveletter = chr(96 + wave)  # 1 -> "a" etc

    var_map = {
        '_hhtype_dv': {
            1: 0,
            2: 0,
            3: 0,  # single occ
            4: 3,
            5: 3,  # single parent
            6: 1,
            8: 1,
            10: 1,
            11: 1,
            12: 1,
            19: 1,
            20: 1,
            21: 1,  # couples
            16: 4,
            17: 4,
            18: 4,
            22: 4,
            23: 4  # mixed
        },
        '_tenure_dv': {
            1: 0,  # 2 (owned) in census
            2: 1,  # 3 (mortgaged) in census
            3: 2,
            4: 2,  # 5 (rented social) in census
            5: 3,
            6: 3,
            7: 3  # 6 (rented private) in census
        }
    }

    var_con = {'_hsrooms': [1, 6], '_hsbeds': [1, 4], '_hhsize': [1, 4]}

    if var_name in var_map.keys():

        data = remap(data, waveletter + var_name, var_map[var_name])
        if var_name == '_hhtype_dv':

            # check whether couples are married or cohabiting
            marital_data = pd.read_csv(datadir / (waveletter + '_indall.tab'),
                                       sep='\t')
            marital_data = marital_data[['pidp', waveletter + '_mastat_dv']]
            couples = data.loc[data[waveletter + '_hhtype_dv'] == 1, [
                waveletter + '_hhtype_dv', waveletter + '_hidp', waveletter +
                '_hrpid'
            ]]
            couples = couples.merge(marital_data,
                                    how='left',
                                    left_on=waveletter + '_hrpid',
                                    right_on='pidp').set_index(couples.index)
            to_change = couples.index[couples[waveletter +
                                              '_mastat_dv'] == 10.0].to_list()
            data.loc[to_change, waveletter + '_hhtype_dv'] = 2

    if var_name in var_con.keys():

        if var_name == '_hsbeds':  # Census automatically turns 0 beds into 1
            data[waveletter + '_hsbeds'] = np.maximum(
                data[waveletter + '_hsbeds'], 1)
        if var_name == '_hsrooms':  # Rooms excl. bedrooms -> to rooms incl. beds, i.e. total
            data[waveletter +
                 '_hsrooms'] = data[waveletter +
                                    '_hsrooms'] + data[waveletter + '_hsbeds']

        data = constrain(data,
                         waveletter + var_name,
                         var_con[var_name][0],
                         var_con[var_name][1],
                         shift=-1)

    return data
def main(args):
    print('')
    print('working on: {}'.format(args.graph))
    print('with configuration: {}'.format(args.config))

    graph = common.load_graph(args.graph)
    print('graph ({},{})'.format(len(graph.nodes), len(graph.edges)))

    if args.remap:
        print('')
        print('remapping:')
        graph, mapping = common.remap(graph)
        for k, v in mapping.items():
            print('  {} -> {}'.format(v, k))

    with open(args.config, 'r') as file:
        config = json.load(file)

    print('')
    print('config:')
    print('  rounds: {}'.format(len(config['rounds'])))

    num_bits = graph.max_node

    qp = QuantumProgram()
    qr = qp.create_quantum_register('qr', num_bits)
    cr = qp.create_classical_register('cr', num_bits)
    qc = qp.create_circuit('qaoa', [qr], [cr])

    for i in range(num_bits):
        qc.h(qr[i])

    for r in config['rounds']:
        beta = r['beta']
        gamma = r['gamma']

        for i in range(num_bits):
            qc.u3(2 * beta, -pi / 2, pi / 2, qr[i])

        for e in graph.edges:
            qc.x(qr[e.fr])
            qc.u1(-gamma / 2.0, qr[e.fr])
            qc.x(qr[e.fr])
            qc.u1(-gamma / 2.0, qr[e.fr])
            qc.cx(qr[e.fr], qr[e.to])
            qc.x(qr[e.to])
            qc.u1(gamma / 2.0, qr[e.to])
            qc.x(qr[e.to])
            qc.u1(-gamma / 2.0, qr[e.to])
            qc.cx(qr[e.fr], qr[e.to])

    qc.measure(qr, cr)

    print('')
    print('execute:')
    backend_name = args.backend

    if not 'local' in backend_name:
        with open('_config') as data_file:
            config = json.load(data_file)
        assert ('qx_token' in config)
        assert ('qx_url' in config)

        qp.set_api(config['qx_token'], config['qx_url'])
        backends = qp.available_backends()
        print('  backends found: {}'.format(backends))
        assert (backend_name in backends)
    print('  backend: {}'.format(backend_name))

    print('  shots: {}'.format(args.shots))
    result = qp.execute(['qaoa'], backend=backend_name, shots=args.shots)

    # Show the results
    #print(result)
    data = result.get_data('qaoa')

    print('')
    print('data:')
    for k, v in data.items():
        if k != 'counts':
            print('{}: {}'.format(k, v))

    print('')
    print('result:')
    print('  state dist.:')
    for i, state in enumerate(
            sorted(data['counts'].keys(),
                   key=lambda x: data['counts'][x],
                   reverse=True)):
        assignment = common.str2vals(state)
        cv = common.cut_value(graph, assignment)
        print('  {} - {} - {}'.format(cv, state, data['counts'][state]))
        if i >= 50:
            print('first 50 of {} states'.format(len(data['counts'])))
            break

    print('')
    print('  cut dist.:')
    cut_dist = common.cut_dist(graph, data['counts'])
    for i, cv in enumerate(sorted(cut_dist.keys(), reverse=True)):
        print('  {} - {}'.format(cv, cut_dist[cv]))
        if i >= 20:
            print('first 20 of {} cut values'.format(len(cut_dist)))
            break

    ec = sum([cv * prob for (cv, prob) in cut_dist.items()])
    print('  expected cut value: {}'.format(ec))

    samples = 100000
    print('')
    print('  rand cut dist. ({}):'.format(samples))
    rand_cut_dist = common.rand_cut_dist(graph, samples)
    for i, cv in enumerate(sorted(rand_cut_dist.keys(), reverse=True)):
        print('  {} - {}'.format(cv, rand_cut_dist[cv]))
        if i >= 20:
            print('first 20 of {} cut values'.format(len(rand_cut_dist)))
            break

    if args.show_qasm:
        print('')
        print(result.get_ran_qasm('qaoa'))