示例#1
0
def main():
    parser = argparse.ArgumentParser(description='do symbolic regression')
    parser.add_argument('train_file', help='file to train')
    parser.add_argument('-i', type=int, default=100, help='number of iteration')
    parser.add_argument('-p', type=int, default=1000, help='population size')
    parser.add_argument('-v', action='store_true', help='print detail information')
    parser.add_argument('-r', action='store_true', help='recover from dump')
    args = parser.parse_args()

    if not os.path.isfile(args.train_file):
        print('main: train file not exist')
        exit(1)

    with open(args.train_file, 'r') as f:
        lines = f.readlines()
    data, label = parse_data(lines)

    load_data(data, label)
    set_config({
        'ITER_NUM': args.i,
        'POP_SIZE': args.p,
        'PRESERVE_NUM': args.i // 2,
        'CONST': [0, 2, 2.5, 3, 4, 5, 10],
        'PROB_OP': {'+': 2, '-': 2, '*': 2, '/': 2, '^': 0.5, '~': 3,
                    'abs': 0.5, 'sin': 1.5, 'cos': 1.5, 'tan': 1,
                    'asin': 0.5, 'acos': 0.5, 'atan': 0.5,
                    'sinh': 0.5, 'cosh': 0.5, 'tanh': 0.5,
                    'exp': 2, 'sqrt': 2, 'log': 2},
        'VERBOSE': args.v
    })
    result = train(args.r)

    print('MSE: %s' % result[1])
    print('EXPR (postfix): %s' % result[0].to_postfix())
    print('EXPR (infix): %s' % result[0])
示例#2
0
def cvd(data, L, convert_stim, params, num_iters, gamma, tau_r, tau_d):
    '''

		Estimate latent factor activity on held-out test data, keeping static params fixed.

	'''

    f, s = core.load_data(data, convert_stim)
    N, T = f.shape
    K = s.shape[0]

    alpha, beta, w, b, sigma = params

    kernel = core.calcium_kernel(tau_r, tau_d, T)
    init_x = np.random.rand(L, T)

    latent_bounds = [(0, None)] * (L * T)
    args = [f, s, kernel, N, T, K, L, gamma, sigma, alpha, beta, w, b]

    t_start = time.time()
    x_hat = core.estimate_latents(init_x, args, latent_bounds, num_iters)
    t_end = time.time()
    print('Total elapsed time: %.2fs (%im).' % (t_end - t_start,
                                                (t_end - t_start) // 60))

    return x_hat
示例#3
0
def train(data, convert_stim, L, num_iters, iters_per_altern, gamma, tau_r,
          tau_d, imrate):
    '''

		Train the calcium-imaging latent variable analysis model.

	'''

    # Load training data
    f, s = core.load_data(data, convert_stim)
    N, T = f.shape
    K = s.shape[0]

    # Estimate imaging-noise variance
    sigma = core.estimate_noise_sdevs(f, N, T, imrate)

    # Initialise params
    kernel = core.calcium_kernel(tau_r, tau_d, T)
    init_alpha = np.random.normal(1, 1e-2, N)
    init_beta = np.zeros((N, ))
    init_w = core.fit_regressors(f, s, kernel, N, T, K)
    init_b = np.random.rand(N, L)
    init_x = 1e-1 * np.random.rand(L, T)

    initial_params = [init_alpha, init_beta, init_w, init_b, init_x]
    args = [f, s, kernel, N, T, K, L, gamma, sigma]

    # Set non-negative parameter bounds for L-BFGS-B optimisation
    eps = 1e-8
    static_bounds = [(eps, None)] * N  # alpha
    static_bounds += [(0, None)] * (N + N * K + N * L)  # beta, w, b
    latent_bounds = [(0, None)] * (L * T)  # x

    # Alternating optimisation of static parameters and latent variables
    t_start = time.time()
    alpha, beta, w, b, x = core.alternating_minimisation(
        initial_params, args, static_bounds, latent_bounds, num_iters,
        iters_per_altern)
    t_end = time.time()
    print('Total elapsed time: %.2fs (%im).' % (t_end - t_start,
                                                (t_end - t_start) // 60))

    # Parameter identification
    param_identification_args = [N, L, s]
    alpha_hat, beta_hat, w_hat, b_hat, x_hat, sigma_hat = core.identify_params(
        alpha, beta, w, b, x, sigma, param_identification_args)

    return [alpha_hat, beta_hat, w_hat, b_hat, x_hat, sigma_hat]
示例#4
0
def main(model, epochs, batch_size, save_intervals, category):

    my_model = core.create_model(model, category)

    print(
        "Python main program for generating images using {} with category {}".
        format(model, category))

    ## preprocess data images if init_train and save the images as pickle file
    final_images_stacked = core.load_data(my_model.rows)

    my_model.train(data=final_images_stacked,
                   epochs=epochs,
                   batch_size=batch_size,
                   save_intervals=save_intervals,
                   sample_intervals=save_intervals,
                   hi_sample_intervals=save_intervals)
示例#5
0
def select_model(folder,
                 data,
                 data_type,
                 return_fit='both',
                 convert_stim=False):
    '''
	
		Selects the model-fit with the highest posterior probability among a folder of fits.

		Arguments: 
			folder:
				Directory containing a set of model fits.

			data:
				Path to the associated data.

			data_type:
				Set to 'train' if input data is training data, 'test' for test data.

			return_type:
				Set to 'train' to return the fits for training data, 'test' to return the fits for test data,
				or 'both' to return both fits.

			convert_stim:
				Set to True if stimulus must be converted from a 1d to 2d representation.


	'''

    model_fits = [f for f in os.listdir(folder) if not f.startswith('.')]
    llhs = [None] * len(model_fits)
    f, s = core.load_data(data, convert_stim)
    N, T = f.shape
    for findx, fit in enumerate(model_fits):
        alpha, beta, w, b, x, sigma, tau_r, tau_d, gamma, L = load_fit(
            folder + '/' + fit, data_type)
        if L == 1:
            x = np.reshape(x, [int(L), T])
            b = np.reshape(b, [N, int(L)])
        kernel = core.calcium_kernel(tau_r, tau_d, T)
        K = s.shape[0]
        llhs[findx] = -core.log_joint(f, s, kernel, N, T, K, L, gamma, sigma,
                                      alpha, beta, w, b, x)

    return load_fit(folder + '/' + model_fits[np.argmax(llhs)], return_fit)
示例#6
0
def get_weather_statistics_view(request):
    """api view to return computed weather statistics"""

    get_data = request.GET

    city = get_data.get("city", "Kampala")
    start_date = get_data.get("start_date", "2020-01-01")
    end_date = get_data.get("end_date", "2020-01-31")

    # load city data for a given period
    weather_data = load_data(city, start_date, end_date)

    # get weather info
    weather_info = get_weather_info(weather_data)

    # generate weather dataframe to be used to
    # compute statistics
    df = generate_df(weather_info)

    # compute required statics
    computed_df = compute_statistics(df)
    statistics = computed_df.to_dict()

    stat_data = {
        "max_tempC": statistics["max_temp_C"]["max"],
        "max_tempF": statistics["max_temp_F"]["max"],
        "min_tempC": statistics["min_temp_C"]["min"],
        "min_tempF": statistics["min_temp_C"]["min"],
        "avg_tempC": statistics["avg_temp_C"]["mean"],
        "avg_tempF": statistics["avg_temp_F"]["mean"],
        "max_humidity": statistics["max_humidity"]["max"],
        "min_humidity": statistics["min_humidity"]["min"],
        "avg_humidity": statistics["avg_humidity"]["mean"]
    }

    return Response(data=stat_data)
示例#7
0
import core
import re

data = core.load_data("5.txt")

seats = []

for value in data:
    value = re.sub("[FL]", "0", value)
    value = re.sub("[BR]", "1", value)
    row = int(value[0:7], 2)
    column = int(value[7:], 2)
    id = row * 8 + column
    seats.append(id)

seats.sort()

min = seats[0]
max = seats[-1]

print("Max is %d" % max)

for i in range(min, max):
    if not i in seats:
        print("Seat %d is missing" % i)
示例#8
0
def main(m, vec):
    imgs = m.generate_images(vec)
    filename = args.filename.replace('{EP}', str(m.epoch))
    filename = filename.replace(
        '{D}', "{}{}".format(int(args.use_img), int(args.noise)))
    mi, ma = np.min(imgs), np.max(imgs)
    imgs = ((imgs - mi) / (ma - mi) * 255).astype(np.uint8)
    for i, im in enumerate(imgs):
        fn = filename.replace('{N}', str(i))
        utils.save_image(im, os.path.join(args.path, fn))


if "VAE" in args.model:
    if args.use_img:
        data = core.load_data()
        if args.shuffle: random.shuffle(data)
        data = data[0:args.n]
        vec = m.get_vector_representation(data)
        vec = vec[2]
    else:
        vec = np.random.normal(0, 1, (args.n, m.latent_dim))
    if args.noise:
        vec = vec + np.random.normal(0, 1, (args.n, m.latent_dim))

    if args.epocycle:
        eps = list(m.available_epochs())
        for ep in eps:
            m.load_epoch(ep)
            main(m, vec)
    else:
示例#9
0
                return
            l2 += 1
        l1 += 1


def find_3(numbers):
    l1 = 0
    for n1 in numbers:
        l2 = 0
        for n2 in numbers:
            l3 = 0
            for n3 in numbers:
                n1 = int(n1)
                n2 = int(n2)
                n3 = int(n3)
                if (n1 + n2 + n3
                        == 2020) and (l1 != l2) and (l1 != l3) and (l2 != l3):
                    print("%d + %d + %d = 2020. %d x %d x %d = %d" %
                          (n1, n2, n3, n1, n2, n3, n1 * n2 * n3))
                    return
                l3 += 1
            l2 += 1
        l1 += 1


numbers = core.load_data("1.txt")

find_2(numbers)

find_3(numbers)
示例#10
0
import core


def check(trees, dx=3, dy=1):
    total = 0
    x = dx
    width = len(trees[0])
    for y in range(dy, len(trees), dy):
        if trees[y][x] == "#":
            total += 1
        x += dx
        x %= width
    return total


trees = core.load_data("3.txt")

total1 = check(trees, 1, 1)
total2 = check(trees, 3, 1)
print("%d for %d, %d" % (total2, 3, 1))
total3 = check(trees, 5, 1)
total4 = check(trees, 7, 1)
total5 = check(trees, 1, 2)

total = total1 * total2 * total3 * total4 * total5

print("%d trees in all\n" % total)
示例#11
0
        while l < max:
            matches = re.match('^(nop|acc|jmp) \+*(\-*\d+)$', data[l])
            done.append(l)
            if matches.group(1) == "acc":
                a += int(matches.group(2))
                l += 1
            elif matches.group(1) == "jmp":
                if l == j:
                    l += 1
                else:
                    l += int(matches.group(2))
            elif matches.group(1) == "nop":
                l += 1
            if l in done:
                if alter:
                    l = 999999
                else:
                    print("Line #%d has already run. Accumulator is %d" %
                          (l, a))
                    return
        if l < 999999:
            print("Code complete. Accumulator is %d" % a)
            return


data = core.load_data("8.txt")

test(data, False)

test(data, True)
示例#12
0

def check_1(data):
    total = 0
    for group in data:
        combined = "".join(group)
        cntr = Counter(combined)
        total += len(cntr.items())
    print(total)


def check_2(data):
    total = 0
    for group in data:
        cntr = Counter(group[0])
        shared = cntr.items()
        for answers in group:
            cntr = Counter(answers)
            shared = list(set(shared) & set(cntr.items()))
        total += len(shared)
    print(total)


data = core.load_data("6.txt")

data = get_groups(data)

check_1(data)

check_2(data)
示例#13
0
def total_bags(bag, bags):
    total = 0
    for child in bag:
        if child in bags:
            number = bag[child]
            total += number + (number * total_bags(bags[child], bags))
    return total


def check_1(bags):
    total = 0
    for colour in bags:
        can = can_contain(bags[colour], bags)
        if can:
            total += 1
    print("%d matching" % total)


def check_2(bags):
    total = total_bags(bags["shiny gold"], bags)
    print("%d in total" % total)


data = core.load_data("7.txt")

bags = get_bags(data)

check_1(bags)

check_2(bags)
示例#14
0
		if matches == None:
			continue
		else:
			count = matches.group(4).count(matches.group(3))
			valid = (count >= int(matches.group(1))) and (count <= int(matches.group(2)))
			if valid:
				total += 1
	print("%s are valid" % total)

def validate_2(numbers):
	total = 0
	for line in numbers:
		matches = re.search('^(\d+)\-(\d+) (\w): (\w+)$', line)
		if matches == None:
			continue
		else:
			char1 = matches.group(4)[int(matches.group(1)) - 1]
			char2 = matches.group(4)[int(matches.group(2)) - 1]
			valid1 = char1 == matches.group(3)
			valid2 = char2 == matches.group(3)
			valid = valid1 ^ valid2
			if valid:
				total += 1
	print("%s are valid" % total)

passwords = core.load_data("2.txt")

validate_1(passwords);

validate_2(passwords);
示例#15
0
                    m = re.match('^(\d+)(cm|in)$', match[1])
                    if (m == None) or (int(
                            m.group(1)) < hgt[m.group(2)][0]) or (int(
                                m.group(1)) > hgt[m.group(2)][1]):
                        valid = False
                elif match[0] == "hcl":
                    m = re.match('^#([\da-f]{6})$', match[1])
                    if m == None:
                        valid = False
                elif match[0] == "ecl":
                    if not match[1] in [
                            'amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'
                    ]:
                        valid = False
                elif match[0] == "pid":
                    m = re.match('^\d{9}$', match[1])
                    if m == None:
                        valid = False
            if valid:
                total += 1
    print("%d valid" % total)


data = core.load_data("4.txt")

data = get_records(data)

validate_1(data)

validate_2(data)
示例#16
0
import sys

import core

league = sys.argv[1]
name, started, data = core.load_data(league)

print 'Enter results (Return or 0 for exit).'
while True:
    team_games_wins = raw_input('Enter team (p1/p2-games-wins): ')
    if team_games_wins in ['', '0']:
        break
    # quite painful way to seperate user input to relevant fields
    # maybe there is a better way
    team, games, wins = team_games_wins.split('-')
    games = int(games)
    wins = int(wins)
    assert(wins <= games), 'Can not have more wins than games.'
    # here we sort the players by alphabet to avoid duplicating entries
    # with first and second player interchanged
    first_player, second_player = sorted(team.upper().split('/'))
    # check whether this combination exists
    data = core.add_entry_to_data(data, first_player, second_player, games, wins)
    core.store_data(name, started, data)