Exemplo n.º 1
0
def calibration():
	t1 = t()
	t2 = 0
	i = 0
	calVal = 0
	while (t2-t1) <20:
		potVal = float(pot.read())
	    	calVal =  potVal + calVal
		i +=1
		t2 = t()
	calVal = (calVal/i)
	lcd.on_lcd('Calibrated: '+str(calVal),3)
	return calVal
Exemplo n.º 2
0
def get_best_model(model, param_grid, x, y, scoring, cv=4, refit=True, n_jobs=-1, verbose=1):
    t0 = t()
    grid = GridSearchCV(model, param_grid=param_grid, scoring=scoring, cv=cv,
                        refit=refit, n_jobs=n_jobs, verbose=verbose)
    grid.fit(x, y)
    best_params = grid.best_params_
    best_model = grid.best_estimator_ if refit else model
    best_score = grid.best_score_

    if verbose:
        for item in grid.grid_scores_:
            print("%s %s %s" % ('\tGRIDSCORES\t', stringify(best_model), item))
        print('BEST SCORE\t%s\t%2.6f in %2.2f seconds' % (stringify2(best_model, best_params), abs(best_score), td(t0)))

    return best_model, dict(best_params=best_params,
                best_score=grid.best_score_,
                grid_scores_=grid.grid_scores_)
def generate():
    sessionId = sha256(repr(t()).encode()).hexdigest()
    return (sessionId)
Exemplo n.º 4
0
assignments = -torch.ones_like(torch.Tensor(n_neurons))
proportions = torch.zeros_like(torch.Tensor(n_neurons, 10))
rates = torch.zeros_like(torch.Tensor(n_neurons, 10))

# Record spikes during the simulation.
spike_record = torch.zeros(update_interval, time, n_neurons)

# Get data labels.
labels = pipeline.env.labels

# Sequence of accuracy estimates.
accuracy = {'all' : [], 'proportion' : []}

# Train the network.
print('Begin training.\n')
start = t()

for i in range(n_train):
	if i % progress_interval == 0:
		print('Progress: %d / %d (%.4f seconds)' % (i, n_train, t() - start))
		start = t()
	
	if i % update_interval == 0 and i > 0:
		# Get network predictions.
		all_activity_pred = all_activity(spike_record, assignments, 10)
		proportion_pred = proportion_weighting(spike_record, assignments, proportions, 10)

		# Compute network accuracy according to available classification strategies.
		accuracy['all'].append(100 * torch.sum(labels[i - update_interval:i].long() == all_activity_pred) / update_interval)
		accuracy['proportion'].append(100 * torch.sum(labels[i - update_interval:i].long() == proportion_pred) / update_interval)
def item_average():
    np.random.seed(17)
    # allocate memory for results:
    RMSE_train = np.zeros(nfolds)
    RMSE_test = np.zeros(nfolds)
    MAE_train = np.zeros(nfolds)
    MAE_test = np.zeros(nfolds)
    i_pred_final_train = []
    i_pred_final_test = []
    pred_final = []
    print("Naive Approach_3_:_Item_Average")
    print("_________________________________")

    start = t()
    # for each fold:
    for fold in range(nfolds):
        train, test = Cross_Validation(data=ratings, nfolds=nfolds, fold=fold)

        # Sort training/test set by item
        train = train[train[:, 1].argsort()]
        test = test[test[:, 1].argsort()]

        # Store max number of items and uniq item ids
        num_items = max(np.vstack([train, test])[:, 1])
        uniq = np.unique(train[:, 1])

        # CumSum for the index of the occurance of each user
        index_per_item_train = np.cumsum(np.bincount(train[:, 1]))
        index_per_item_test = np.cumsum(np.bincount(test[:, 1]))

        # Initialize empty vectors for predictions
        pred_per_item_train = np.empty(len(train))
        pred_per_item_test = np.empty(len(test))

        # Create a list to store predictions for each unique movie
        prediction = np.empty(num_items)

        # Iterate for each movie
        for i in range(num_items):
            item_indices_train = slice(index_per_item_train[i],
                                       index_per_item_train[i + 1])
            item_indices_test = slice(index_per_item_test[i],
                                      index_per_item_test[i + 1])

            # Check if the specific item exists in the dataset and if
            if (i + 1) in uniq:
                prediction[i] = np.mean(train[item_indices_train, 2])
            else:
                prediction[i] = np.mean(train[:, 2])

            # Fill in the vectors with the predictions for each user
            pred_per_item_train[item_indices_train] = prediction[i]
            pred_per_item_test[item_indices_test] = prediction[i]

        # Measure the RMSE for train/test
        RMSE_train[fold] = RMSE(train[:, 2], pred_per_item_train)
        RMSE_test[fold] = RMSE(test[:, 2], pred_per_item_test)

        # Measure the MAE for train/test
        MAE_train[fold] = MAE(train[:, 2], pred_per_item_train)
        MAE_test[fold] = MAE(test[:, 2], pred_per_item_test)

        # Store predictions
        i_pred_final_train.append(pred_per_item_train)
        i_pred_final_test.append(pred_per_item_test)
        pred_final.append(prediction)

        # Print Errors
        print("Fold " + str(fold) + ": RMSE_train=" + str(round(RMSE_train[fold] , 6)) +\
              " RMSE_test=" + str(round(RMSE_test[fold],6)) +" || "+"MAE_train=" +\
              str(round(MAE_train[fold],6)) + " MAE_test=" + str(round(MAE_test[fold],6)))

    elapsed = t() - start
    mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

    print("\n")
    print("Average RMSE on Test_set: " + str(round(np.mean(RMSE_test), 5)))
    print("Average MAE on Test_set: " + str(round(np.mean(MAE_test), 5)))
    print("Time: " + str(elapsed % 60) + " seconds")
    print("Memory: " + str(mem_usage) + " kilobytes")
    print("=============================================================")
    print("=============================================================")
    print("\n")

    # Return predictions for each item
    return i_pred_final_train, i_pred_final_test, pred_final, RMSE_test, MAE_test
Exemplo n.º 6
0
    for i in a1:
        if a2.count(i) == 0:
            diff.append(i)
    for i in a2:
        if a1.count(i) == 0:
            if diff.count(i) == 0:
                diff.append(i)
    return diff


amount_test = 100
p0 = getPrimeList(amount_test)
p1, p2, p3, = [], [], []


start = t()
for i in range(2,amount_test):
    p1.append(i) if is_prime(i) else None
print(" # # Teste do método 'is_prime' para %d números" % amount_test)
print("     Tempo decorrido:", t()-start)
print("     Resultado -> OK") if np.array_equal(p0,p1) else print("     Resultado -> PROBLEMA",print_diff(p0,p1))

start = t()
for i in range(2,amount_test):
    p2.append(i) if is_prime_fermat(i) else None
print(" # # Teste do método 'is_prime_fermat' para %d números" % amount_test)
print("     Tempo decorrido:", t()-start)
print("     Resultado -> OK") if np.array_equal(p0,p2) else print("     Resultado -> PROBLEMA",print_diff(p0,p2))

start = t()
for i in range(2,amount_test):
Exemplo n.º 7
0
def extract_rubric_handler(upload_file, rubricator, syntax, type, encoding):
    get = lambda node_id: Rubric.objects.get(pk=node_id)

    file_path = filework.save_content_to_file(upload_file, settings.SYSTEM_ROOT + 'appdata/tmp/', file_ext='mrc')
    records = []
    s = t()
    #root = Rubric.add_root(name='root', rubricator=rubricator)
    #    roots = Rubric.get_root_nodes()
    #
    #    if not roots:
    #        root = Rubric.add_root(name='root', rubricator=rubricator)
    #    else:
    #        root = roots[0]
    #    print root


    #node = get(root.id).add_child(name='Memory', rubricator=rubricator)
    #print node
    #node = get(root.id).add_child(name='CPU', rubricator=rubricator)
    #node.add_child(name='PENTIUM', rubricator=rubricator)
    #node = Rubric.objects.get(name=u'PENTIUM', rubricator=rubricator)
    #print node
    #print Rubric.dump_bulk()
    try:
        root = Rubric.objects.get(name=rubricator.name, parent=None)
    except Rubric.DoesNotExist:
        root = Rubric(name=rubricator.name, parent=None)
        root.save()

    #    rubric = Rubric(name=u'dddd')
    #    rubric.insert_at(root)
    #    root.save()
    #    return
    #    rubric = Rubric(name=u'Физика', parent=root, rubricator=rubricator)
    #    rubric.save()
    #
    #    rubric = Rubric(name=u'Ядерная', parent=rubric, rubricator=rubricator)
    #    rubric.save()
    #
    #    return

    if syntax == 'USMARC':
        reader = pymarc.MARCReader(file(file_path), encoding=encoding, to_unicode=True)
    else:
        reader = pymarc.UNIMARCReader(file(file_path), encoding=encoding, to_unicode=True)

        for i, record in enumerate(reader):
        #            rubric = None
        #            if record['606']:
        #                rubric = record['606']['a']
        #
        #            print record['100']['a'], rubric
        #            continue
            fields = record.get_fields('606')
            for field in fields:
                rubric_name = field['a'].strip(' .')

                if rubric_name:
                    try:
                        rubric = Rubric.objects.get(name=rubric_name, parent=root)
                    except Rubric.DoesNotExist:
                        rubric = Rubric(name=rubric_name, parent=root)
                        rubric.save()
                        #rubric = root.add_child(name=rubric_name,rubricator=rubricator)

                    subrubrics = field.get_subfields('x')
                    if len(subrubrics):
                        for subrubric_name in subrubrics:
                            subrubric_name = subrubric_name.strip(' .')

                            try:
                                subrubric = Rubric.objects.get(name=subrubric_name, parent=rubric)
                            except Rubric.DoesNotExist:
                                subrubric = Rubric(name=subrubric_name, parent=rubric)
                                subrubric.save()
                                #rubric.add_child(name=subrubric_name, rubricator=rubricator)

            if i % 50 == 0:
                transaction.commit()
                print 'commit'
            print i

            #                    print 'sb:', subrubric_name, rubric_name

    transaction.commit()
    print Rubric.objects.all().count()
    print 'time:', t() - s
Exemplo n.º 8
0
from numpy.fft import *
import numpy as np
from time import time as t
import cv2
layers=['data', 'pool1','pool2','pool3','pool4','conv5_3']

print '=====   RESIZE  ====='
for layer in layers:
    r = np.load('resize/'+layer+'.npy')
    r = r[0,:,:,:]
    print layer, ': SIZE', r.shape, r.shape[0]*r.shape[1]*r.shape[2]

    t_fft = t()
    fft(r)
    print layer, ': FFT took', t()-t_fft

    t_fft2 = t()
    fft2(r)
    print layer, ': FFT2 took', t()-t_fft2

    t_ifft = t()
    ifft(r)
    print layer, ': IFFT took', t()-t_ifft

    t_ifft2 = t()
    ifft2(r)
    print layer, ': IFFT2 took', t()-t_ifft2

    r = r.transpose((1,2,0))
    t_resize = t()
    cv2.resize(r, (224,224))
Exemplo n.º 9
0
    status_services = restart_services()
    if status_services.get('error') != '':
        # TODO: run_forced_backup() si falló aquí, es porque falló todo,
        # hay que correr un backup de archivos no solo de imagen de docker
        # una opción es tener los archivos respaldados en .zip y sobreescribir
        # todo lo que se deba, pero no está implementado por eso retornamos el
        # mensaje del fallo.
        print(f'Falló todo: {status_services}')
        return status_services
    for attempts in range(3):
        services_running = check_health()
        if services_running.get('error'):
            print('error to get runing services')
        else:
            for service in services_running:
                if services_running[service] == 'DOWN':
                    restart_services()
                    break
            set_update_status("success")
            return
    set_update_status("error")
    return


if __name__ == '__main__':
    time_start = t()
    update_routine()
    time_end = t()
    delta = f'{time_end-time_start}'
    print(f'Update in {delta[0:5]} S')
Exemplo n.º 10
0
from __future__ import division, print_function
__author__ = 'Euclides Fernandes Filho <*****@*****.**>'
"""
ml_utils
Copyright (C) 2015  Euclides Fernandes Filho <*****@*****.**>
http://www.gnu.org/licenses/gpl-2.0.html#SEC4
"""
import re
import pandas as pd
from time import time as t
from sklearn.grid_search import GridSearchCV
td = lambda t0: t() - t0

def stringify2(model, feature_set):
    return "%s:%s" % (re.sub(r"[a-z]", '', model.__class__.__name__), feature_set)


def stringify(model):
    return "%s" % (re.sub(r"[a-z]", '', model.__class__.__name__))


def plot_fi(model, train):
    dfp = pd.DataFrame([dict(zip(train.columns, model.feature_importances_))]).T
    dfp.sort(0, ascending=0, inplace=1)
    dfp["cumsum"] = dfp[0].cumsum()
    dfp.sort(0, ascending=1, inplace=1)
    hsize = train.shape[1]/3.5
    hsize = 8 if hsize < 8 else hsize
    dfp.plot(kind="barh", figsize=(20, hsize))
    dfp.sort(0, ascending=0, inplace=1)
    return dfp
Exemplo n.º 11
0
## 23min execution time
from time import time as t
w = t()
n = 120000
l = []
for i in range(n + 1):
    l += [set([])]

for i in range(2, n + 1):
    if len(l[i]) == 0:
        for j in range(i, n + 1, i):
            l[j].add(i)

pr = [1, 1]
for i in range(2, n + 1):
    p = 1
    for j in l[i]:
        p *= j
    pr += [p]


def rad(a, b):
    p = pr[a] * pr[b] * pr[a + b]
    if p < (a + b):
        return True
    return False


def co(a, b):
    if l[a] == l[a] - l[b] - l[a + b]:
        if l[b] == l[b] - l[a + b]:
Exemplo n.º 12
0
import requests
from time import time as t
from time import gmtime, strftime, sleep

num = 24
url = 'https://mipt{}-mihailselezniov.c9users.io/'.format(num)
url_task = 'http://127.0.0.1:5000/'

retry = 0

while 1:
    if retry:
        task = retry
    else:
        r = requests.get(url_task)
        task = r.text
    t1 = t()
    r = requests.get(url + task)
    if r.status_code == 200:
        with open('fib_data{}.txt'.format(num), 'a') as f:
            f.write('{}:{}\n'.format(task, r.text))
        retry = 0
    else:
        retry = task
        print('=(')
    print(strftime("%Y-%m-%d %H:%M:%S", gmtime()), round(t() - t1), 'sec')
    sleep(10)
def user_average():
    np.random.seed(17)
    # allocate memory for results:
    RMSE_train = np.zeros(nfolds)
    RMSE_test = np.zeros(nfolds)
    MAE_train = np.zeros(nfolds)
    MAE_test = np.zeros(nfolds)
    u_pred_final_train = []
    u_pred_final_test = []

    print("Naive Approach_2_:_User_Average")
    print("_________________________________")

    start = t()
    # for each fold:
    for fold in range(nfolds):
        train, test = Cross_Validation(data=ratings, nfolds=nfolds, fold=fold)
        # CumSum for the index of the occurance of each user
        index_per_user_train = np.cumsum(np.bincount(train[:, 0]))
        index_per_user_test = np.cumsum(np.bincount(test[:, 0]))

        # Initialize empty vectors for predictions
        pred_per_user_train = np.empty(len(train))
        pred_per_user_test = np.empty(len(test))

        # Store unique user_id
        num_users = max(np.vstack([train, test])[:, 0])
        uniq = np.unique(train[:, 0])

        # Iterate for each user
        # 'i' iterates through [0:num_users]
        for i in range(num_users):
            user_indices_train = slice(index_per_user_train[i],
                                       index_per_user_train[i + 1])
            user_indices_test = slice(index_per_user_test[i],
                                      index_per_user_test[i + 1])

            # Check if the specific user exists in the dataset and if
            if (i + 1) in uniq:
                pred = np.mean(train[user_indices_train, 2])
            else:
                pred = np.mean(train[:, 2])

            # Fill in the vectors with the predictions for each user
            pred_per_user_train[user_indices_train] = pred
            pred_per_user_test[user_indices_test] = pred

        # Measure the RMSE for train/test
        RMSE_train[fold] = RMSE(train[:, 2], pred_per_user_train)
        RMSE_test[fold] = RMSE(test[:, 2], pred_per_user_test)

        # Measure the MAE for train/test
        MAE_train[fold] = MAE(train[:, 2], pred_per_user_train)
        MAE_test[fold] = MAE(test[:, 2], pred_per_user_test)

        # Store predictions
        u_pred_final_train.append(pred_per_user_train)
        u_pred_final_test.append(pred_per_user_test)

        # Print Errors
        print("Fold " + str(fold) + ": RMSE_train=" + str(round(RMSE_train[fold] , 6)) +\
              " RMSE_test=" + str(round(RMSE_test[fold],6)) +" || "+"MAE_train=" +\
              str(round(MAE_train[fold],6)) + " MAE_test=" + str(round(MAE_test[fold],6)))

    elapsed = t() - start
    mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

    print("\n")
    print("Average RMSE on Test_set: " + str(round(np.mean(RMSE_test), 5)))
    print("Average MAE on Test_set: " + str(round(np.mean(MAE_test), 5)))
    print("Time: " + str(elapsed % 60) + " seconds")
    print("Memory: " + str(mem_usage) + " kilobytes")
    print("=============================================================")
    print("=============================================================")
    print("\n")

    # Return predictions for each user
    return u_pred_final_train, u_pred_final_test, RMSE_test, MAE_test
def MatrixFactorization():
    # start timer
    start = t()

    # set the learning rate and the lambda coefficient
    learning_rate = 0.005
    lambda_reg = 0.05
    k = 10  # Num of features for Matrix Factorization
    num_iter = input("Provide number of iterations:  >_")
    num_iter = int(num_iter)

    # initialize the arrays that will contain the errors for each fold
    RMSE_train = np.zeros(nfolds)
    RMSE_test = np.zeros(nfolds)
    MAE_train = np.zeros(nfolds)
    MAE_test = np.zeros(nfolds)
    RMSE_all = np.empty((nfolds, 1, num_iter))
    MAE_all = np.empty((nfolds, 1, num_iter))

    # for each fold
    for fold in range(nfolds):
        # start fold timer
        start_fold = t()
        # Cross Validation, generate train/test set
        train, test = Cross_Validation(data=ratings, nfolds=nfolds, fold=fold)
        # Initialize U and M matrices with random numbers
        U = np.random.rand(max(train[:, 0]), k)
        M = np.random.rand(k, max(train[:, 1]))
        # initialize two lists that will contain the RMSE and MAE of each iteration, respectively
        RMSE_list = []
        MAE_list = []

        # for each iteration:
        for iteration in range(num_iter):
            # print current fold and current iteration
            print("Fold: " + str(fold + 1) + "," + " Iteration: " +
                  str(iteration + 1))
            x_hat = np.empty(len(train))

            # for each record in the train set
            for idx, rating in enumerate(train):
                # create a copy of the user vector #
                u = U[rating[0] - 1, :].copy()
                # calculate the rating (prediction) the user would give to the movie
                x_hat[idx] = np.dot(u, M[:, rating[1] - 1])
                # supress the rating between 1 and 5
                if x_hat[idx] < 1:
                    x_hat[idx] = 1
                elif x_hat[idx] > 5:
                    x_hat[idx] = 5
                # calculate the error
                e_ij = rating[2] - x_hat[idx]

                # update matrices U and M
                U[rating[0] -
                  1, :] += learning_rate * (2 * e_ij * M[:, rating[1] - 1] -
                                            lambda_reg * u)
                M[:, rating[1] -
                  1] += learning_rate * (2 * e_ij * u -
                                         lambda_reg * M[:, rating[1] - 1])
            # calculate the RMSE and MAE of this iteration, respectively
            rmse_iter = RMSE(train[:, 2], x_hat)
            mae_iter = MAE(train[:, 2], x_hat)

            print("RMSE: " + str(rmse_iter))
            print("MAE : " + str(mae_iter))
            print("--------------")
            # Add/append the errors to the lists containing the errors of previous iterations
            RMSE_list.append(rmse_iter)
            MAE_list.append(mae_iter)
        # RMSE_all and MAE_all contain the list of errors of all iterations for the current fold.
        RMSE_all[fold] = RMSE_list
        MAE_all[fold] = MAE_list
        # the RMSE and MAE of the current fold are the last calculated RMSE and MAE, respectively
        RMSE_train[fold] = RMSE_list[-1]
        MAE_train[fold] = MAE_list[-1]
        # calculate the number of users
        num_users = max(train[:, 0])
        # calculate the number of times a user appears in the test set
        num_ratings_perUser_test = np.bincount(test[:, 0])
        # the cumulative sum indicates the index in which every new user (user_id)
        # appears in the test set.
        index_perUser_test = np.cumsum(num_ratings_perUser_test)

        # evaluate the model on the test set (make predictions on the test set)
        pred = np.empty(len(test[:, 2]), object)
        for user_id in range(num_users):
            test_subset = test[
                index_perUser_test[user_id]:index_perUser_test[user_id + 1], :]
            pred[index_perUser_test[user_id]:index_perUser_test[
                user_id + 1]] = np.dot(U[user_id, :], M[:,
                                                        test_subset[:, 1] - 1])

        # calculate the RMSE and MAE of the test set
        RMSE_test[fold] = RMSE(test[:, 2], pred)
        MAE_test[fold] = MAE(test[:, 2], pred)
        # stop fold timer
        end_fold = t()
        # print how much time taken (in minutes) for the fold to be executed.
        # Also print the RMSE and MAE of the train and test fold
        print("Time taken for fold " + str(fold + 1) + "(in minutes):" +
              str((end_fold - start_fold) / 60))
        print("Fold " + str(fold + 1) + ": Root Mean Squared Error (RMSE) on train set: "+\
              str(RMSE_train[fold]) + "; Root Mean Squared Error (RMSE) on test set: " +\
              str(RMSE_test[fold]) )

        print("Fold " + str(fold + 1) + ": Mean Absolute Error (MAE) on train set: " +\
              str(MAE_train[fold]) + "; Mean Absolute Error (MAE) on test set: " +\
              str(MAE_test[fold]))
        print("")

    # print the average RMSE of the 5 folds, for the train and test sets.
    print("Mean of Root Mean Squared Error (RMSE) on train sets: " +
          str(np.mean(RMSE_train)))
    print("Mean of Root Mean Squared Error (RMSE) on test sets: " +
          str(np.mean(RMSE_test)))

    # print the average MAE of the 5 folds, for the train and test sets.
    print("Mean of Mean Absolute Error (MAE) on train sets: " +
          str(np.mean(MAE_train)))
    print("Mean of Mean Absolute Error (MAE) on test sets: " +
          str(np.mean(MAE_test)))

    # end timer
    end = t()
    # print how much time (in hours) took for the function to be evaluated
    print("Time taken to evaluate function (in hours): " +
          str(((end - start) / 60) / 60))

    # return matrix U and M, RMSE and MAE for each iteration of the 5 folds, and finally
    # the RMSE and MAE of the 5 test folds
    return (U, M, RMSE_all, MAE_all, RMSE_test, MAE_test)
def user_item_average(u_train, u_test, I_train, I_test, prediction):

    np.random.seed(17)
    # allocate memory for results:
    RMSE_train = np.zeros(nfolds)
    RMSE_test = np.zeros(nfolds)
    MAE_train = np.zeros(nfolds)
    MAE_test = np.zeros(nfolds)
    y_final = []
    test_final = []
    coef = []

    print("Naive Approach_4_:_User+Movie_Average")
    print("Linear Model: Y = a*User + b*Item + g")
    print("_____________________________________")

    # Start timer
    start = t()

    # For each fold
    for fold in range(nfolds):

        train, test = Cross_Validation(data=ratings, nfolds=nfolds, fold=fold)

        pred_item_train = np.empty(len(train))
        pred_item_test = np.empty(len(test))
        for i in range(len(train)):
            pred_item_train[i] = prediction[fold][train[i, 1] - 1]
        for i in range(len(test)):
            pred_item_test[i] = prediction[fold][test[i, 1] - 1]

        #return pred_item_train
        # calculate parameters a,b,c using the least squares method
        A = np.vstack((u_train[fold], pred_item_train, np.ones(len(train)))).T
        alpha, beta, gamma = np.linalg.lstsq(A, train[:, 2], rcond=1)[0]

        # Calculate y_hat for train/test set
        y_hat_train = alpha * u_train[fold] + beta * pred_item_train + gamma
        y_hat_test = alpha * u_test[fold] + beta * pred_item_test + gamma

        # Measure the RMSE for train/test
        RMSE_train[fold] = RMSE(train[:, 2], y_hat_train)
        RMSE_test[fold] = RMSE(test[:, 2], y_hat_test)

        # Measure the MAE for train/test
        MAE_train[fold] = MAE(train[:, 2], y_hat_train)
        MAE_test[fold] = MAE(test[:, 2], y_hat_test)

        # Store coefficients
        coef.append(np.array([alpha, beta, gamma]))
        del (A)

        y_final.append(y_hat_test)
        test_final.append(test)

        # Print Errors
        print("Fold " + str(fold) + ": RMSE_train=" + str(round(RMSE_train[fold] , 6)) +\
              " RMSE_test=" + str(round(RMSE_test[fold],6)) +" || "+"MAE_train=" +\
              str(round(MAE_train[fold],6)) + " MAE_test=" + str(round(MAE_test[fold],6)))

    elapsed = t() - start
    mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
    # print errors:
    print("\n")
    print("Average RMSE on Test_set: " + str(round(np.mean(RMSE_test), 6)))
    print("Average MAE on Test_set: " + str(round(np.mean(MAE_test), 6)))
    print("Time: " + str(elapsed % 60) + " seconds")
    print("Memory: " + str(mem_usage) + " kilobytes")
    print("=============================================================")
    print("=============================================================")
    print("\n")

    flag1 = input("Print Coefficients? [y/n]        >_ ")
    if flag1 == 'Y' or flag1 == 'y' or flag1 == 'yes':
        print("\t Coefficients\n")
        print("Fold || alpha || beta || gamma\n")
        for i in range(5):
            print(str(i) + "    ||" + str(coef[i]))

    return RMSE_test, MAE_test
Exemplo n.º 16
0
	#run = str(input('True or False: '))

	for letter in WRAPS:
		for i in check.ref_dict[letter]:
			out.append(i)
			#out.union(check.ref_dict[letter])

	out = set(out)

		#print(out)

	possible = my_tiles(tiles)

	ok = out.intersection(possible.plays)

	TEST = [ (word,WORD(word).score()) for word in ok]

	TEST= sorted(TEST,key = lambda item:item[1],reverse=True)

	#	print(OK[0])
	for word in TEST:
		print(word)

from time import time as t

if __name__ == "__main__":
	start = t()
	main()
	end = t()
	print(end-start)
Exemplo n.º 17
0
	if sdef is None:
		raise RuntimeError, "Can't get sdef (requires OS 10.4+)."
	return parsestring(sdef, path, style)


######################################################################
# TEST
######################################################################

if __name__ == '__main__':
#	p = '/Users/has/PythonDev/osaterminology_dev/sdef_test/UI Actions.app'
#	p = '/Users/has/PythonDev/OSATerminology_dev/sdef_test/UIActionsSuiteNoDTD.sdef'
#	p = '/Developer/Examples/Scripting Definitions/NSCoreSuite.sdef'
# 	p = '/Applications/Mail.app'
	from time import time as t
	tt=t()
	d = parsefile('/Users/has/PythonDev/appscript/~old/osaterminology_dev/sdefstuff/InDesignCS2.sdef', 'appscript') # 3.3 sec (AS); 4.1 sec (appscript, after adding caching to makeidentifier; was 6 sec)
	print t()-tt
#	p = '/System/Library/CoreServices/Finder.app'
#	p = '/Applications/TextEdit.app'
#	d = parsefile('/Users/has/dictionaryparsers/Automator.sdef')
#	d = parsefile('/Users/has/PythonDev/OSATerminology_dev/sdef_test/UIActionsSuite.sdef', 'applescript')
	
#	d = parseapp(p,'appscript')
#	d = parseapp(p)

	print d

#	print d.classes()
#	print d.classes().byname('document').allproperties().byname('text').types.resolve()
#	print d.classes().byname('document').allproperties().byname('name').types.resolve()
Exemplo n.º 18
0
from numpy.fft import *
import numpy as np
from time import time as t

a = np.load('resize/pool3.npy')
t1 = t()
a = a[0,:,:,:]
a = a.astype(np.complex64)
print t()-t1
print a.shape


t1 = t()
ifft2(a)
print 'NUMPY took', t()-t1

from pyfft.cuda import Plan
import pycuda.driver as cuda
from pycuda.tools import make_default_context
import pycuda.gpuarray as gpuarray

cuda.init()
context = make_default_context()
stream = cuda.Stream()

plan = Plan((128,128), stream=stream)

t1 = t()
gpu_data = gpuarray.to_gpu(a)
print 'togpu took', t()-t1
plan.execute(gpu_data)
Exemplo n.º 19
0
import threading as th
HOST = ''
PORT = 1111
BUFFER_SIZE = 1024
ADDR = (HOST, PORT)

rex = r'\d+\:\d+\:\d+'


def whileloop(sock):
    while (True):
        recv = sock.recv(BUFFER_SIZE)
        if not recv:
            break
        print(">>>>get message from %s : %s" % (addr, recv))
        sock.send(
            bytes(('Server:get your message :%s, TKS' % recv).encode("utf-8")))


server = s.socket(s.AF_INET, s.SOCK_STREAM)
server.bind(ADDR)
server.listen(5)
while (True):
    print("waiting for client to connecting...")
    sock, addr = server.accept()
    print(">>>%s connected currentTime:%s" %
          (addr, re.search(rex, t()).group()))
    tt = th.Thread(whileloop(sock))
    tt.start()
    tt.join()
Exemplo n.º 20
0
#ans: 748'317
from time import time as t
from bool_prime_sieve import prime_sieve

ss=t()

limit = int(1e6)
nums = prime_sieve(limit)

def gp(sieve):
	for prime in xrange(len(sieve)):
		if sieve[prime]: 
			pass

def trp(prime):
	#right trunc check
	p=prime
	power=0
	while prime>0:
#		print prime,'rtp'
		if not nums[prime]: return False
		else:
			#print prime
			prime/=10
			power+=1
	
	#left trunc check
	while power>0:
#		print p,'ltp'
		if not nums[p]: return False
		else:
Exemplo n.º 21
0
def ratings_file_to_h5(filepath, h5_obj, h5_path, track=1, test=False, rows=int(1e6)):
    # Pick the right class for track 1v2 data
    if   (track == 1) and (test == False):
        output_format = T1UserRating
    elif (track == 2) and (test == False):
        output_format = T2UserRating
    if   (track == 1) and (test == True):
        output_format = T1Test
    elif (track == 2) and (test == True):
        output_format = T2Test

    # Make a hdf5 table
    rating_table = h5_obj.createTable("/", h5_path, output_format, expectedrows=rows)


    # Start the timer . . .
    global time
    lasttime = t()
    # Open the file to read
    line_count = 0
    training_set = open(filepath, 'r')
    for user_no, line in enumerate(training_set):
        user_number, rating_number = line.strip().split("|")
        user_number = int(user_number)
        rating_number = int(rating_number)
        line_count += 1

        # Catch all the existing/desired ratings for a given user in this list . . . 
        aggregator = []
        for rating_line in range(rating_number):
            rating_data = training_set.next()
            # Diff formats for track 1 vs track 2
            split_up_line = rating_data.strip().split() 
            if   (track == 1) and (test == False):
                # Track one data has date, hh:mm:ss also so. . .
                item_number, rating, date, time = split_up_line
                item_number = int(item_number)
                rating      = int(rating)
                date        = int(date)
                hrs, mins, secs = time.split(":")
                min_time = int(mins) + (60*int(hrs))
                if int(secs) != 0:
                    raise ValueError("WTF")
                row_data = tuple([user_number, item_number, rating, date, min_time])
            elif (track == 1) and (test == True):
                # Track one data has date, hh:mm:ss also so. . .
                item_number, date, time = split_up_line
                item_number = int(item_number)
                date        = int(date)
                hrs, mins, secs = time.split(":")
                min_time = int(mins) + (60*int(hrs))
                if int(secs) != 0:
                    raise ValueError("WTF")
                row_data = tuple([user_number, item_number, date, min_time])
            elif (track==2) and (test==False):
                item_number, rating = rating_data.strip().split()
                item_number = int(item_number)
                rating      = int(rating)
                row_data = tuple([user_number, item_number, rating])
            elif (track==2) and (test==True):
                item_number = rating_data.strip().split()[0]
                item_number = int(item_number)
                row_data = tuple([user_number, item_number])

            aggregator.append(row_data)
            line_count += 1
        rating_table.append(aggregator)

        if ((user_no % 1000) == 0) and (user_no != 0):
            rate =  1000. / (t() - lasttime)
            print "Processed %i users, %i ratings (%f users/second)" % (user_no, line_count, rate)
            lasttime = t()
        # TODO: Debug Only!
        # if user_no > 5000: 
        #     print "TODO!"
        #     break

    print "Creating Index To User and Item"
    start_time = t()
    rating_table.cols.user.createIndex()
    rating_table.cols.item.createIndex()
    print "Done %f seconds. . ." % (t() - start_time)
Exemplo n.º 22
0
from time import time as t
def sundaram3(max_n):
    numbers = list(range(3, max_n+1, 2))
    half = (max_n)//2
    initial = 4
    
    for step in range(3, max_n+1, 2):
        for i in range(initial, half, step):
            numbers[i-1] = 0
        initial += 2*(step+1)
        
        if initial > half:
            return [2] + filter(None, numbers)
st = t()
print(sundaram3(1000000))
et = t()
pritn(st-et)
def main(seed=0,
         n_neurons=100,
         n_train=60000,
         n_test=10000,
         inhib=100,
         lr=1e-2,
         lr_decay=1,
         time=350,
         dt=1,
         theta_plus=0.05,
         theta_decay=1e-7,
         intensity=1,
         progress_interval=10,
         update_interval=250,
         plot=False,
         train=True,
         gpu=False):

    assert n_train % update_interval == 0 and n_test % update_interval == 0, \
                            'No. examples must be divisible by update_interval'

    params = [
        seed, n_neurons, n_train, inhib, lr, lr_decay, time, dt, theta_plus,
        theta_decay, intensity, progress_interval, update_interval
    ]

    test_params = [
        seed, n_neurons, n_train, n_test, inhib, lr, lr_decay, time, dt,
        theta_plus, theta_decay, intensity, progress_interval, update_interval
    ]

    model_name = '_'.join([str(x) for x in params])

    np.random.seed(seed)

    if gpu:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.cuda.manual_seed_all(seed)
    else:
        torch.manual_seed(seed)

    n_examples = n_train if train else n_test
    n_sqrt = int(np.ceil(np.sqrt(n_neurons)))
    n_classes = 10

    # Build network.
    if train:
        network = DiehlAndCook2015v2(n_inpt=784,
                                     n_neurons=n_neurons,
                                     inh=inhib,
                                     dt=dt,
                                     norm=78.4,
                                     theta_plus=theta_plus,
                                     theta_decay=theta_decay,
                                     nu=[0, lr],
                                     wmin=0,
                                     wmax=1)

        network.connections['X', 'Y'].update_rule = WeightDependentPostPre(
            connection=network.connections['X', 'Y'],
            nu=network.connections['X', 'Y'].nu)
    else:
        network = load_network(os.path.join(params_path, model_name + '.pt'))
        network.connections['X', 'Y'].update_rule = NoOp(
            connection=network.connections['X', 'Y'],
            nu=network.connections['X', 'Y'].nu)
        network.layers['Y'].theta_decay = 0
        network.layers['Y'].theta_plus = 0

    # Load MNIST data.
    dataset = MNIST(path=data_path, download=True)

    if train:
        images, labels = dataset.get_train()
    else:
        images, labels = dataset.get_test()

    images = images.view(-1, 784)
    images *= intensity

    # Record spikes during the simulation.
    spike_record = torch.zeros(update_interval, time, n_neurons)

    # Neuron assignments and spike proportions.
    if train:
        assignments = -torch.ones_like(torch.Tensor(n_neurons))
        proportions = torch.zeros_like(torch.Tensor(n_neurons, n_classes))
        rates = torch.zeros_like(torch.Tensor(n_neurons, n_classes))
        ngram_scores = {}
    else:
        path = os.path.join(params_path,
                            '_'.join(['auxiliary', model_name]) + '.pt')
        assignments, proportions, rates, ngram_scores = torch.load(
            open(path, 'rb'))

    # Sequence of accuracy estimates.
    curves = {'all': [], 'proportion': [], 'ngram': []}
    predictions = {scheme: torch.Tensor().long() for scheme in curves.keys()}

    if train:
        best_accuracy = 0

    spikes = {}
    for layer in set(network.layers) - {'X'}:
        spikes[layer] = Monitor(network.layers[layer],
                                state_vars=['s'],
                                time=time)
        network.add_monitor(spikes[layer], name='%s_spikes' % layer)

    # Train the network.
    if train:
        print('\nBegin training.\n')
    else:
        print('\nBegin test.\n')

    inpt_axes = None
    inpt_ims = None
    spike_ims = None
    spike_axes = None
    weights_im = None
    assigns_im = None
    perf_ax = None

    start = t()
    for i in range(n_examples):
        if i % progress_interval == 0:
            print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)')
            start = t()

        if i % update_interval == 0 and i > 0:
            if train:
                network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay

            if i % len(labels) == 0:
                current_labels = labels[-update_interval:]
            else:
                current_labels = labels[i % len(images) - update_interval:i %
                                        len(images)]

            # Update and print accuracy evaluations.
            curves, preds = update_curves(curves,
                                          current_labels,
                                          n_classes,
                                          spike_record=spike_record,
                                          assignments=assignments,
                                          proportions=proportions,
                                          ngram_scores=ngram_scores,
                                          n=2)
            print_results(curves)

            for scheme in preds:
                predictions[scheme] = torch.cat(
                    [predictions[scheme], preds[scheme]], -1)

            # Save accuracy curves to disk.
            to_write = ['train'] + params if train else ['test'] + params
            f = '_'.join([str(x) for x in to_write]) + '.pt'
            torch.save((curves, update_interval, n_examples),
                       open(os.path.join(curves_path, f), 'wb'))

            if train:
                if any([x[-1] > best_accuracy for x in curves.values()]):
                    print(
                        'New best accuracy! Saving network parameters to disk.'
                    )

                    # Save network to disk.
                    network.save(os.path.join(params_path, model_name + '.pt'))
                    path = os.path.join(
                        params_path,
                        '_'.join(['auxiliary', model_name]) + '.pt')
                    torch.save((assignments, proportions, rates, ngram_scores),
                               open(path, 'wb'))
                    best_accuracy = max([x[-1] for x in curves.values()])

                # Assign labels to excitatory layer neurons.
                assignments, proportions, rates = assign_labels(
                    spike_record, current_labels, n_classes, rates)

                # Compute ngram scores.
                ngram_scores = update_ngram_scores(spike_record,
                                                   current_labels, n_classes,
                                                   2, ngram_scores)

            print()

        # Get next input sample.
        image = images[i % len(images)]
        sample = poisson(datum=image, time=time, dt=dt)
        inpts = {'X': sample}

        # Run the network on the input.
        network.run(inpts=inpts, time=time)

        retries = 0
        while spikes['Y'].get('s').sum() < 5 and retries < 3:
            retries += 1
            image *= 2
            sample = poisson(datum=image, time=time, dt=dt)
            inpts = {'X': sample}
            network.run(inpts=inpts, time=time)

        # Add to spikes recording.
        spike_record[i % update_interval] = spikes['Y'].get('s').t()

        # Optionally plot various simulation information.
        if plot:
            # _input = image.view(28, 28)
            # reconstruction = inpts['X'].view(time, 784).sum(0).view(28, 28)
            _spikes = {layer: spikes[layer].get('s') for layer in spikes}
            input_exc_weights = network.connections[('X', 'Y')].w
            square_weights = get_square_weights(
                input_exc_weights.view(784, n_neurons), n_sqrt, 28)
            # square_assignments = get_square_assignments(assignments, n_sqrt)

            # inpt_axes, inpt_ims = plot_input(_input, reconstruction, label=labels[i], axes=inpt_axes, ims=inpt_ims)
            spike_ims, spike_axes = plot_spikes(_spikes,
                                                ims=spike_ims,
                                                axes=spike_axes)
            weights_im = plot_weights(square_weights, im=weights_im)
            # assigns_im = plot_assignments(square_assignments, im=assigns_im)
            # perf_ax = plot_performance(curves, ax=perf_ax)

            plt.pause(1e-8)

        network.reset_()  # Reset state variables.

    print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)')

    i += 1

    if i % len(labels) == 0:
        current_labels = labels[-update_interval:]
    else:
        current_labels = labels[i % len(images) - update_interval:i %
                                len(images)]

    # Update and print accuracy evaluations.
    curves, preds = update_curves(curves,
                                  current_labels,
                                  n_classes,
                                  spike_record=spike_record,
                                  assignments=assignments,
                                  proportions=proportions,
                                  ngram_scores=ngram_scores,
                                  n=2)
    print_results(curves)

    for scheme in preds:
        predictions[scheme] = torch.cat([predictions[scheme], preds[scheme]],
                                        -1)

    if train:
        if any([x[-1] > best_accuracy for x in curves.values()]):
            print('New best accuracy! Saving network parameters to disk.')

            # Save network to disk.
            if train:
                network.save(os.path.join(params_path, model_name + '.pt'))
                path = os.path.join(
                    params_path, '_'.join(['auxiliary', model_name]) + '.pt')
                torch.save((assignments, proportions, rates, ngram_scores),
                           open(path, 'wb'))

    if train:
        print('\nTraining complete.\n')
    else:
        print('\nTest complete.\n')

    print('Average accuracies:\n')
    for scheme in curves.keys():
        print('\t%s: %.2f' % (scheme, float(np.mean(curves[scheme]))))

    # Save accuracy curves to disk.
    to_write = ['train'] + params if train else ['test'] + params
    f = '_'.join([str(x) for x in to_write]) + '.pt'
    torch.save((curves, update_interval, n_examples),
               open(os.path.join(curves_path, f), 'wb'))

    # Save results to disk.
    results = [
        np.mean(curves['all']),
        np.mean(curves['proportion']),
        np.mean(curves['ngram']),
        np.max(curves['all']),
        np.max(curves['proportion']),
        np.max(curves['ngram'])
    ]

    to_write = params + results if train else test_params + results
    to_write = [str(x) for x in to_write]
    name = 'train.csv' if train else 'test.csv'

    if not os.path.isfile(os.path.join(results_path, name)):
        with open(os.path.join(results_path, name), 'w') as f:
            if train:
                f.write(
                    'random_seed,n_neurons,n_train,inhib,lr,lr_decay,time,timestep,theta_plus,theta_decay,intensity,'
                    'progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,'
                    'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n'
                )
            else:
                f.write(
                    'random_seed,n_neurons,n_train,n_test,inhib,lr,lr_decay,time,timestep,theta_plus,theta_decay,'
                    'intensity,progress_interval,update_interval,mean_all_activity,mean_proportion_weighting,'
                    'mean_ngram,max_all_activity,max_proportion_weighting,max_ngram\n'
                )

    with open(os.path.join(results_path, name), 'a') as f:
        f.write(','.join(to_write) + '\n')

    if labels.numel() > n_examples:
        labels = labels[:n_examples]
    else:
        while labels.numel() < n_examples:
            if 2 * labels.numel() > n_examples:
                labels = torch.cat(
                    [labels, labels[:n_examples - labels.numel()]])
            else:
                labels = torch.cat([labels, labels])

    # Compute confusion matrices and save them to disk.
    confusions = {}
    for scheme in predictions:
        confusions[scheme] = confusion_matrix(labels, predictions[scheme])

    to_write = ['train'] + params if train else ['test'] + test_params
    f = '_'.join([str(x) for x in to_write]) + '.pt'
    torch.save(confusions, os.path.join(confusion_path, f))
Exemplo n.º 24
0
# sheet.cell("F1").style.font.bold = True
sheet["G1"] = "Title"
# sheet.cell("G1").style.font.bold = True
sheet["H1"] = "Label"
# sheet.cell("H1").style.font.bold = True
sheet["I1"] = "Lead"
# sheet.cell("I1").style.font.bold = True
sheet["J1"] = "Content"
# sheet.cell("J1").style.font.bold = True
sheet["K1"] = "Tags"
# sheet.cell("K1").style.font.bold = True
counter = 2
"""
Executing SQL queries.
"""
start = t()
connection = sqlite3.connect("articles.sqlite")
cursor = connection.cursor()
id_number = 0
for row in cursor.execute(
        """   SELECT * FROM Articles
                                LEFT OUTER JOIN Relations ON Articles.id = Relations.id_article
                                LEFT OUTER JOIN Tags ON Relations.id_tag = Tags.id
                                WHERE (Articles.time BETWEEN ? AND ?)
                                AND (section LIKE '%/dom%' OR section LIKE '%/vrt%')
                                ORDER BY Articles.time DESC""", (begin, end)):
    if row[1] != id_number:
        sheet["A" + str(counter)] = row[1]
        sheet["B" + str(counter)] = row[2]
        sheet["C" + str(counter)] = row[3]
        sheet["D" + str(counter)] = row[4]
Exemplo n.º 25
0
#-------------------- modules import
from time import time as t

#----- start time
ss=t()

#-------------------- functions

#--- check txt if 1-9 pandigital
def is_pan(txt):
	txt=str(txt)
	if len(txt)>9 or len(txt)<9: return False
	else:
		pan=set('123456789')
		return pan==set(txt)


#--- concatenate multiplications
def com(num):
	c=''
	for i in xrange(1,10):
		c+=str(num*i)
		if len(c)>=9: break
	return c


#--------------- main
def main():
	maxi=0;n=0
	for num in xrange(9000,10000):
		a=com(num)
Exemplo n.º 26
0
def ann_to_snn(ann: Union[nn.Module, str],
               input_shape: Sequence[int],
               data: Optional[torch.Tensor] = None,
               percentile: float = 99.9) -> Network:
    # language=rst
    """
    Converts an artificial neural network (ANN) written as a ``torch.nn.Module`` into a near-equivalent spiking neural
    network.

    :param ann: Artificial neural network implemented in PyTorch. Accepts either ``torch.nn.Module`` or path to network
                saved using ``torch.save()``.
    :param input_shape: Shape of input data.
    :param data: Data to use to perform data-based weight normalization of shape ``[n_examples, ...]``.
    :param percentile: Percentile (in ``[0, 100]``) of activations to scale by in data-based normalization scheme.
    :return: Spiking neural network implemented in PyTorch.
    """
    if isinstance(ann, str):
        ann = torch.load(ann)

    assert isinstance(ann, nn.Module)

    if data is not None:
        print()
        print('Example data provided. Performing data-based normalization...')

        t0 = t()
        ann = data_based_normalization(ann=ann,
                                       data=data.detach(),
                                       percentile=percentile)

        print(f'Elapsed: {t() - t0:.4f}')

    snn = Network()

    input_layer = nodes.RealInput(shape=input_shape)
    snn.add_layer(input_layer, name='Input')

    children = []
    for c in ann.children():
        if isinstance(c, nn.Sequential):
            for c2 in list(c.children()):
                children.append(c2)
        else:
            children.append(c)

    i = 0
    prev = input_layer
    while i < len(children) - 1:
        current, nxt = children[i:i + 2]
        layer, connection = _ann_to_snn_helper(prev, current)

        i += 1

        if layer is None or connection is None:
            continue

        snn.add_layer(layer, name=str(i))
        snn.add_connection(connection, source=str(i - 1), target=str(i))

        prev = layer

    current = children[-1]
    layer, connection = _ann_to_snn_helper(prev, current)

    i += 1

    if layer is not None or connection is not None:
        snn.add_layer(layer, name=str(i))
        snn.add_connection(connection, source=str(i - 1), target=str(i))

    return snn
Exemplo n.º 27
0
method = 'sc'
# method = sys.argv[1]
# sentence_path = sys.argv[2]
keywords_path = sys.argv[2:]
# sentence_name = sys.argv[3]
name_list = [item[-13:] for item in keywords_path]
# tmp_path = sys.argv[4]
result_path = sys.argv[1]

def run(keywords_path, name_list, result_path):
	# print sentence_path+sentence_name
	# t1 = t()
	# extract_(sentence_path+sentence_name, tmp_path+sentence_name)
	# t2 = t()
	keywords_list = []
	for keywords in keywords_path:
		with open(keywords_path) as f:
			keywords_list.append(f.read().split('\n'))
	word2vec_(method, keywords_list, name_list, result_path)
	# t3 = t()
	# print t2-t1, t3-t1

# def run_(sentence_path, result_path):
# 	files = os.listdir(sentence_path)
# 	extract_(sentence_path, result_path)

if __name__ == '__main__':
	s = t()
	run(sentence_path, sentence_name, result_path)
	e = t()
	print(e-s)
Exemplo n.º 28
0
        keys_pressed = key.get_pressed()
        if keys_pressed[K_LEFT] and self.rect.x > 5:
            self.rect.x -= 10
        if keys_pressed[K_RIGHT] and self.rect.x < 600:
            self.rect.x += 10

    def fire(self):
        bullet1 = Bullet(bullet, self.rect.centerx, self.rect.top, -15)
        bullets.add(bullet1)


ammo = 5
numammo = 0
score = 0
lost = 0
waitseconds = t()
wait = False
direction = "down"
textscore = font.render("Счет: " + str(score), 1, (255, 255, 255))
textlose = font.render("Пропущено: " + str(lost), 1, (255, 255, 255))
finishlose = font.render("YOU LOSE", 1, (255, 0, 0))
finishwin = font.render("YOU WIN", 1, (255, 0, 0))


class Enemy(GameSprite):
    def update(self):
        global direction
        global lost
        speed = 2
        if direction == "down":
            self.rect.y += random.randint(1, 5)
def main(seed=0, n_train=60000, n_test=10000, inhib=250, kernel_size=(16,), stride=(2,), n_filters=25, n_output=100,
         time=100, crop=0, lr=1e-2, lr_decay=0.99, dt=1, theta_plus=0.05, theta_decay=1e-7, intensity=1, norm=0.2,
         progress_interval=10, update_interval=250, train=True, plot=False, gpu=False):

    assert n_train % update_interval == 0, 'No. examples must be divisible by update_interval'

    params = [
        seed, kernel_size, stride, n_filters, crop, lr, lr_decay, n_train, inhib, time, dt,
        theta_plus, theta_decay, intensity, norm, progress_interval, update_interval
    ]

    model_name = '_'.join([str(x) for x in params])

    if not train:
        test_params = [
            seed, kernel_size, stride, n_filters, crop, lr, lr_decay, n_train, n_test, inhib, time, dt,
            theta_plus, theta_decay, intensity, norm, progress_interval, update_interval
        ]

    np.random.seed(seed)

    if gpu:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.cuda.manual_seed_all(seed)
    else:
        torch.manual_seed(seed)

    side_length = 28 - crop * 2
    n_inpt = side_length ** 2
    n_examples = n_train if train else n_test
    n_classes = 10

    # Build network.
    if train:
        network = Network()

        conv_size = (
            int((side_length - kernel_size) / stride) + 1,
            int((side_length - kernel_size) / stride) + 1
        )

        input_layer = Input(n=n_inpt, traces=True, trace_tc=5e-2)

        output_layer = DiehlAndCookNodes(
            n=n_filters * conv_size[0] * conv_size[1], traces=True, rest=0, reset=0,
            thresh=1, refrac=0, decay=1e-2, trace_tc=5e-2, theta_plus=theta_plus,
            theta_decay=theta_decay
        )
        input_output_conn = LocallyConnectedConnection(
            input_layer, output_layer, kernel_size=kernel_size, stride=stride, n_filters=n_filters,
            nu=[0, lr], update_rule=WeightDependentPostPre, wmin=0, wmax=1,
            norm=norm, input_shape=(side_length, side_length)
        )

        w = torch.zeros(n_filters, *conv_size, n_filters, *conv_size)
        for fltr1 in range(n_filters):
            for fltr2 in range(n_filters):
                if fltr1 != fltr2:
                    for i in range(conv_size[0]):
                        for j in range(conv_size[1]):
                            w[fltr1, i, j, fltr2, i, j] = -inhib

        w = w.view(n_filters * conv_size[0] * conv_size[1], n_filters * conv_size[0] * conv_size[1])
        recurrent_conn = Connection(output_layer, output_layer, w=w)

        network.add_layer(input_layer, name='X')
        network.add_layer(output_layer, name='Y')
        network.add_connection(input_output_conn, source='X', target='Y')
        network.add_connection(recurrent_conn, source='Y', target='Y')

        output_layer = LIFNodes(
            n=n_output, traces=True, rest=0, reset=0, thresh=1, refrac=0, decay=1e-2, trace_tc=5e-2
        )

        hidden_output_connection = Connection(
            network.layers['Y'], output_layer, nu=[0, 5 * lr],
            update_rule=WeightDependentPostPre, wmin=0,
            wmax=1, norm=norm * n_output
        )

        w = -inhib * (torch.ones(n_output, n_output) - torch.diag(torch.ones(n_output)))
        output_recurrent_connection = Connection(
            output_layer, output_layer, w=w, update_rule=NoOp, wmin=-inhib, wmax=0
        )

        network.add_layer(output_layer, name='Z')
        network.add_connection(hidden_output_connection, source='Y', target='Z')
        network.add_connection(output_recurrent_connection, source='Z', target='Z')
    else:
        network = load_network(os.path.join(params_path, model_name + '.pt'))

        network.connections['X', 'Y'].update_rule = NoOp(
            connection=network.connections['X', 'Y'], nu=network.connections['X', 'Y'].nu
        )

        network.layers['Y'].theta = 0
        network.layers['Y'].theta_decay = 0
        network.layers['Y'].theta_plus = 0

        # del network.connections['Y', 'Y']

        network.connections['Y', 'Z'].update_rule = NoOp(
            connection=network.connections['Y', 'Z'], nu=0
        )

        # network.layers['Z'].theta = 0
        # network.layers['Z'].theta_decay = 0
        # network.layers['Z'].theta_plus = 0

        # del network.connections['Z', 'Z']

    conv_size = network.connections['X', 'Y'].conv_size
    locations = network.connections['X', 'Y'].locations
    conv_prod = int(np.prod(conv_size))
    n_neurons = n_filters * conv_prod

    # Voltage recording for excitatory and inhibitory layers.
    voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time)
    network.add_monitor(voltage_monitor, name='output_voltage')

    # Load MNIST data.
    dataset = MNIST(path=data_path, download=True)

    if train:
        images, labels = dataset.get_train()
    else:
        images, labels = dataset.get_test()

    images *= intensity
    images = images[:, crop:-crop, crop:-crop].contiguous().view(-1, side_length ** 2)

    spikes = {}
    for layer in set(network.layers):
        spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time)
        network.add_monitor(spikes[layer], name=f'{layer}_spikes')

    # Train the network.
    if train:
        print('\nBegin training.\n')
    else:
        print('\nBegin test.\n')

    spike_ims = None
    spike_axes = None
    weights_im = None
    weights2_im = None

    unclamps = {}
    per_class = int(n_output / n_classes)
    for label in range(n_classes):
        unclamp = torch.ones(n_output).byte()
        unclamp[label * per_class: (label + 1) * per_class] = 0
        unclamps[label] = unclamp

    predictions = torch.zeros(n_examples)
    corrects = torch.zeros(n_examples)

    start = t()
    for i in range(n_examples):
        if i % progress_interval == 0:
            print(f'Progress: {i} / {n_examples} ({t() - start:.4f} seconds)')
            start = t()

        if i % update_interval == 0 and i > 0:
            if train:
                network.save(os.path.join(params_path, model_name + '.pt'))
                network.connections['X', 'Y'].update_rule.nu[1] *= lr_decay

        # Get next input sample.
        image = images[i % len(images)]
        label = labels[i % len(images)].item()
        sample = bernoulli(datum=image, time=time, dt=dt, max_prob=0.7)
        inpts = {'X': sample}

        # Run the network on the input.
        if train:
            network.run(inpts=inpts, time=time, unclamp={'Z': unclamps[label]})
        else:
            network.run(inpts=inpts, time=time)

        if not train:
            retries = 0
            while spikes['Z'].get('s').sum() < 5 and retries < 3:
                retries += 1
                sample = bernoulli(datum=image, time=time, dt=dt, max_prob=0.7 + 0.1 * retries)
                inpts = {'X': sample}

                if train:
                    network.run(inpts=inpts, time=time, unclamp={'Z': unclamps[label]})
                else:
                    network.run(inpts=inpts, time=time)

        output = spikes['Z'].get('s')
        summed_neurons = output.sum(dim=1).view(per_class, n_classes)
        summed_classes = summed_neurons.sum(dim=1)
        prediction = torch.argmax(summed_classes).item()
        correct = prediction == label

        predictions[i] = prediction
        corrects[i] = int(correct)

        # Optionally plot various simulation information.
        if plot:
            _spikes = {
                'X': spikes['X'].get('s').view(side_length ** 2, time),
                'Y': spikes['Y'].get('s').view(n_neurons, time),
                'Z': spikes['Z'].get('s').view(n_output, time)
            }

            spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes)
            weights_im = plot_locally_connected_weights(
                network.connections['X', 'Y'].w, n_filters, kernel_size,
                conv_size, locations, side_length, im=weights_im
            )

            n_sqrt = int(np.ceil(np.sqrt(n_output)))
            side = int(np.ceil(np.sqrt(network.layers['Y'].n)))
            w = network.connections['Y', 'Z'].w
            w = get_square_weights(w, n_sqrt=n_sqrt, side=side)

            weights2_im = plot_weights(
                w, im=weights2_im, wmax=1
            )

            plt.pause(1e-8)

        network.reset_()  # Reset state variables.

    print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)')

    if train:
        network.save(os.path.join(params_path, model_name + '.pt'))

    if train:
        print('\nTraining complete.\n')
    else:
        print('\nTest complete.\n')

    accuracy = torch.mean(corrects).item() * 100

    print(f'\nAccuracy: {accuracy}\n')

    to_write = params + [accuracy] if train else test_params + [accuracy]
    to_write = [str(x) for x in to_write]
    name = 'train.csv' if train else 'test.csv'

    if not os.path.isfile(os.path.join(results_path, name)):
        with open(os.path.join(results_path, name), 'w') as f:
            if train:
                f.write(
                    'random_seed,kernel_size,stride,n_filters,crop,lr,lr_decay,n_train,inhib,time,timestep,theta_plus,'
                    'theta_decay,intensity,norm,progress_interval,accuracy\n'
                )
            else:
                f.write(
                    'random_seed,kernel_size,stride,n_filters,crop,lr,lr_decay,n_train,n_test,inhib,time,timestep,'
                    'theta_plus,theta_decay,intensity,norm,progress_interval,update_interval,accuracy\n'
                )

    with open(os.path.join(results_path, name), 'a') as f:
        f.write(','.join(to_write) + '\n')

    if labels.numel() > n_examples:
        labels = labels[:n_examples]
    else:
        while labels.numel() < n_examples:
            if 2 * labels.numel() > n_examples:
                labels = torch.cat([labels, labels[:n_examples - labels.numel()]])
            else:
                labels = torch.cat([labels, labels])

    # Compute confusion matrices and save them to disk.
    confusion = confusion_matrix(labels, predictions)

    to_write = ['train'] + params if train else ['test'] + test_params
    f = '_'.join([str(x) for x in to_write]) + '.pt'
    torch.save(confusion, os.path.join(confusion_path, f))
# Train the network.
if train:
    print('\nBegin training.\n')
else:
    print('\nBegin test.\n')

inpt_axes = None
inpt_ims = None
spike_ims = None
spike_axes = None
weights_im = None
assigns_im = None
perf_ax = None

start = t()
for i in range(n_examples):
    if i % progress_interval == 0:
        elapsed = t() - start
        print(f'Progress: {i} / {n_examples} ({elapsed:.4f} seconds)')
        start = t()

    if i % update_interval == 0 and i > 0:
        if i % len(labels) == 0:
            current_labels = labels[-update_interval:]
        else:
            current_labels = labels[i % len(images) - update_interval:i % len(images)]

        # Update and print accuracy evaluations.
        curves, preds = update_curves(
            curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments,
Exemplo n.º 31
0
from time import time_ns as t

cant = 10000000
inicio = t()
lista = [x * 2 if x % 2 == 0 else x * 3 for x in range(cant)]
fin = t()
print("Tiempo por comprension:", (fin - inicio) / 1000000000, "segundos")
inicio = t()
lista2 = []
for x in range(cant):
    if x % 2 == 0:
        lista2.append(x * 2)
    else:
        lista2.append(x * 3)
fin = t()
print("Tiempo por ciclos:", (fin - inicio) / 1000000000, "segundos")
Exemplo n.º 32
0
def main(seed=0,
         n_train=60000,
         n_test=10000,
         kernel_size=16,
         stride=4,
         n_filters=25,
         padding=0,
         inhib=500,
         lr=0.01,
         lr_decay=0.99,
         time=50,
         dt=1,
         intensity=1,
         progress_interval=10,
         update_interval=250,
         train=True,
         plot=False,
         gpu=False):

    if gpu:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
        torch.cuda.manual_seed_all(seed)
    else:
        torch.manual_seed(seed)

    if not train:
        update_interval = n_test

    if kernel_size == 32:
        conv_size = 1
    else:
        conv_size = int((32 - kernel_size + 2 * padding) / stride) + 1

    per_class = int((n_filters * conv_size * conv_size) / 10)

    # Build network.
    network = Network()
    input_layer = Input(n=1024, shape=(1, 1, 32, 32), traces=True)

    conv_layer = DiehlAndCookNodes(n=n_filters * conv_size * conv_size,
                                   shape=(1, n_filters, conv_size, conv_size),
                                   traces=True)

    conv_conn = Conv2dConnection(input_layer,
                                 conv_layer,
                                 kernel_size=kernel_size,
                                 stride=stride,
                                 update_rule=PostPre,
                                 norm=0.4 * kernel_size**2,
                                 nu=[0, lr],
                                 wmin=0,
                                 wmax=1)

    w = -inhib * torch.ones(n_filters, conv_size, conv_size, n_filters,
                            conv_size, conv_size)
    for f in range(n_filters):
        for i in range(conv_size):
            for j in range(conv_size):
                w[f, i, j, f, i, j] = 0

    w = w.view(n_filters * conv_size**2, n_filters * conv_size**2)
    recurrent_conn = Connection(conv_layer, conv_layer, w=w)

    network.add_layer(input_layer, name='X')
    network.add_layer(conv_layer, name='Y')
    network.add_connection(conv_conn, source='X', target='Y')
    network.add_connection(recurrent_conn, source='Y', target='Y')

    # Voltage recording for excitatory and inhibitory layers.
    voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time)
    network.add_monitor(voltage_monitor, name='output_voltage')

    # Load CIFAR-10 data.
    dataset = CIFAR10(path=os.path.join('..', '..', 'data', 'CIFAR10'),
                      download=True)

    if train:
        images, labels = dataset.get_train()
    else:
        images, labels = dataset.get_test()

    images *= intensity
    images = images.mean(-1)

    # Lazily encode data as Poisson spike trains.
    data_loader = poisson_loader(data=images, time=time, dt=dt)

    spikes = {}
    for layer in set(network.layers):
        spikes[layer] = Monitor(network.layers[layer],
                                state_vars=['s'],
                                time=time)
        network.add_monitor(spikes[layer], name='%s_spikes' % layer)

    voltages = {}
    for layer in set(network.layers) - {'X'}:
        voltages[layer] = Monitor(network.layers[layer],
                                  state_vars=['v'],
                                  time=time)
        network.add_monitor(voltages[layer], name='%s_voltages' % layer)

    inpt_axes = None
    inpt_ims = None
    spike_ims = None
    spike_axes = None
    weights_im = None
    voltage_ims = None
    voltage_axes = None

    # Train the network.
    print('Begin training.\n')
    start = t()

    for i in range(n_train):
        if i % progress_interval == 0:
            print('Progress: %d / %d (%.4f seconds)' %
                  (i, n_train, t() - start))
            start = t()

            if train and i > 0:
                network.connections['X', 'Y'].nu[1] *= lr_decay

        # Get next input sample.
        sample = next(data_loader).unsqueeze(1).unsqueeze(1)
        inpts = {'X': sample}

        # Run the network on the input.
        network.run(inpts=inpts, time=time)

        # Optionally plot various simulation information.
        if plot:
            # inpt = inpts['X'].view(time, 1024).sum(0).view(32, 32)

            weights1 = conv_conn.w
            _spikes = {
                'X': spikes['X'].get('s').view(32**2, time),
                'Y': spikes['Y'].get('s').view(n_filters * conv_size**2, time)
            }
            _voltages = {
                'Y': voltages['Y'].get('v').view(n_filters * conv_size**2,
                                                 time)
            }

            # inpt_axes, inpt_ims = plot_input(
            #     images[i].view(32, 32), inpt, label=labels[i], axes=inpt_axes, ims=inpt_ims
            # )
            # voltage_ims, voltage_axes = plot_voltages(_voltages, ims=voltage_ims, axes=voltage_axes)

            spike_ims, spike_axes = plot_spikes(_spikes,
                                                ims=spike_ims,
                                                axes=spike_axes)
            weights_im = plot_conv2d_weights(weights1, im=weights_im)

            plt.pause(1e-8)

        network.reset_()  # Reset state variables.

    print('Progress: %d / %d (%.4f seconds)\n' %
          (n_train, n_train, t() - start))
    print('Training complete.\n')
Exemplo n.º 33
0
from time import time as t

t_init = t()

import numpy as np
try:
    import matplotlib.pyplot as plt
except ModuleNotFoundError:
    pass


def round(t1, t2):  return (np.round(t2 - t1, 2))


from Processing import Processing
from Visualization import Visualization
from MNT import MNT
from NWP import NWP
from Observation import Observation
from Data_2D import Data_2D
from MidpointNormalize import MidpointNormalize
from Evaluation import Evaluation
from PRM_predict import create_prm, update_selected_path
from Utils import connect_GPU_to_horovod, select_range, check_save_and_load

"""
Stations
"""
"""
['BARCELONNETTE', 'DIGNE LES BAINS', 'RESTEFOND-NIVOSE',
       'LA MURE-ARGENS', 'ARVIEUX', 'PARPAILLON-NIVOSE', 'EMBRUN',
Exemplo n.º 34
0
def find_best_location(tag, city_id, tag_graph, base_map, all_kde):
    from time import time as t
    start = t()
    # Check if we already have a result
    result_exists = get_tag_status(tag, city_id)
    if result_exists:
        print "\tSkipping", tag
        return

    # Get the nearby tags, and then the lats and lons from them
    ok_photo_locations = get_similar_good_photo_locations(tag, city_id, tag_graph)
    if len(ok_photo_locations) <= 0:
        null_coord = helpers.Coordinate(None, None)
        write_result(tag, null_coord, city_id)
        return
    if len(ok_photo_locations) == 1:
        good_photo_locations = ok_photo_locations
    else:
        good_photo_locations = []
        for coord in ok_photo_locations:
            coord.set_xy(base_map)
            # Prune items in the ocean or bay
            if base_map.is_land(coord.x, coord.y):
                good_photo_locations.append(coord)

    # Set up a KDE of the good photos
    if len(good_photo_locations) <= 0:
        null_coord = helpers.Coordinate(None, None)
        write_result(tag, null_coord, city_id)
        return
    # If only one matching photo, that is the best location
    elif len(good_photo_locations) == 1:
        write_result(tag, good_photo_locations[0], city_id)
        save_plot(base_map, tag, good_photo_locations, good_photo_locations[0])
        return
    else:
        photo_kde = helpers.get_xy_kde(good_photo_locations)
        normalized_kde = make_normalized_kde(photo_kde, all_kde, base_map)

    print "Done getting photos in", t() - start, "seconds"
    start = t()

    # Cluster the points to seed minimiation
    #cluster_points = get_fit_starts(good_photo_locations)
    cluster_points = hand_coded_seed_points(base_map)

    print "Done clustering in", t() - start, "seconds"
    start = t()

    # Find the minimum of normalized KDE starting from the cluster locations
    best_coord = find_the_minimum(good_photo_locations, normalized_kde, cluster_points, base_map)

    print "Done fitting in", t() - start, "seconds"
    start = t()

    # Writing to the database
    write_result(tag, best_coord, city_id)

    # Plot the results
    heat_map = make_heat_map(good_photo_locations, normalized_kde)
    save_plot(base_map, tag, good_photo_locations, best_coord, cluster_points, heat_map)

    print "Done with plotting in", t() - start, "seconds"
    start = t()
Exemplo n.º 35
0
from time import time as t
import subprocess

time = t()
S = 11  # Vertexes
graph_al = [[] for v in range(S)]  # Adjacency List Graph


def read_graph(f):

    file = open(f, 'r')
    lines = file.readlines()
    for i in range(len(graph_al)):
        lines[i] = lines[i].strip('\n')
        graph_al[i] = lines[i].split(' ')
        for j in range(len(graph_al[i])):
            graph_al[i][j] = int(graph_al[i][j])


def path_generate(origin):

    graph_al_bool = [False for v in range(len(graph_al))]  # Verification List
    distance = 0
    i = origin
    new_i = origin
    counter = 0
    print('From origin: {}...'.format(i))

    while (False in graph_al_bool):
        graph_al_bool[i] = True
        minimum = 0
#from time import clock as t
from time import time as t 

t0=t()
import scraperwiki
t1=t()
print 'Scraperwiki import overhead:',t1-t0

t0=t()
g=scraperwiki.utils.swimport('geo_1')
t1=t()
print 'Swimport overhead', t1-t0
# Blank Python

t0=t()
for i in range(0,2):
    print g.Fetchdata('SW1A1AA')
t1=t()
print 'Fetchdata cost', t1-t0

t0=t()
for i in range(0,2):
    print scraperwiki.geo.gb_postcode_to_latlng('SW1A1AA')
t1=t()
print 'scraperwiki.geo cost', t1-t0#from time import clock as t
from time import time as t 

t0=t()
import scraperwiki
t1=t()
print 'Scraperwiki import overhead:',t1-t0
Exemplo n.º 37
0
import requests
from time import time as t
from time import gmtime, strftime, sleep
import os

filename = os.path.basename(__file__)
num = filename.split('.')[0].split('worker')[1]

url = 'https://mipt{}-mihailselezniov.c9users.io/'.format(num)
url_task = 'http://127.0.0.1:5000/'

retry = 0
t1 = t()
while 1:
    if retry:
        task = retry
    else:
        r = requests.get(url_task)
        task = r.text

    try:
        r = requests.get(url + task)
    except:
        retry = task
        print('X', end='', flush=True)
        sleep(5)
        continue

    if r.status_code == 200 and r.text:
        with open('fib_data{}.txt'.format(num), 'a') as f:
            f.write('{}:{}\n'.format(task, r.text))
Exemplo n.º 38
0
 99 * 999 = 98901	> 10-digits } >>> 2

from 1 and 2:
we can get 'c' when: 'b' is within [999 : 9999] and 'a' [2:9]
and also when 'b' is within [99:999] and 'a' [10:99]

combining the limits on both a and b:

2<a<100
100<b<10000

'''

from time import time as t

s=t()

def ispan(y):
	x='123456789'
	z=''.join(sorted([i for i in y]))
	return z==x

'''
#ispan test

import random as r
x=range(1,10)
f=[i for i in x]
print f
r.shuffle(f)
print f
Exemplo n.º 39
0
    def train(self):
        tic_train = t()
        time_dataloader = 0
        time_backwardpass = 0

        # load_labels
        train_labels = self.bp.train_labels
        val_labels = self.bp.val_labels

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            # tensorboard
            self.writer = tf.summary.FileWriter(self.tensorboard_path)
            os.system('rm {}/events*'.format(self.tensorboard_path))
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])
            # restore model weights
            if self.args.restore:
                if tf.train.latest_checkpoint(
                        self.checkpoint_path) is not None:
                    print('OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO')
                    print('POSE MODEL : Loading weights from %s' %
                          tf.train.latest_checkpoint(self.checkpoint_path))
                    self.saver.restore(
                        sess, tf.train.latest_checkpoint(self.checkpoint_path))
                    self.start_epoch = int(
                        os.path.basename(
                            tf.train.latest_checkpoint(
                                self.checkpoint_path)).split('-')[-1])
                    print('Loaded')
                    print('OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO')
                else:
                    print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
                    print('No pose checkpoint found at {}'.format(
                        self.checkpoint_path))
                    print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')

            # train
            best_score = int(self.args.best_val_accuracy *
                             len(self.bp.val_labels))  # rt_score (maximum 48)
            print('====================================')
            print('           Train Started            ')
            print('====================================')
            for epoch in range(self.start_epoch, self.max_epoch + 1):
                tic = t()
                # load labels
                train_img_labels, train_cad_labels, train_pose_labels = self.bp.shuffle_and_parse_labels(
                    train_labels)
                # manage duplicate pose
                train_pose_labels = self.bp.manage_duplicate_pose(
                    train_cad_labels, train_pose_labels)

                max_batch_index = len(train_img_labels) // self.args.batch_size
                if len(train_img_labels) % self.args.batch_size != 0:
                    max_batch_index += 1
                for batch_index in range(max_batch_index):
                    # load data
                    tic_data = t()

                    b_img_lab, b_cad_lab, b_pose_lab = self.bp.batch_labels(
                        train_img_labels, train_cad_labels, train_pose_labels,
                        self.args.batch_size, batch_index)
                    view, ptcld = self.bp.return_batch(b_img_lab,
                                                       b_cad_lab,
                                                       mode='train')
                    b_RT_pose = self.bp.return_gt_rt(b_pose_lab)

                    toc_data = t()
                    time_dataloader += toc_data - tic_data
                    # train op
                    tic_trainop = t()

                    fetches = [
                        self.logit, self.quat_pred, self.norms, self.loss1,
                        self.loss2, self.loss3, self.loss4, self.loss,
                        self.optimizer
                    ]
                    feed_dict = {
                        self.X: view,
                        self.Y: ptcld,
                        self.Z: b_RT_pose,
                        self.is_training: True,
                        self.global_step: epoch
                    }
                    logit, quat_pred, norms, loss1, loss2, loss3, loss4, loss, _ = sess.run(
                        fetches, feed_dict=feed_dict)

                    toc_trainop = t()
                    time_backwardpass += toc_trainop - tic_trainop
                toc = t()
                print(
                    'epoch {} loss1 : {:.4} loss2 : {:.4} loss3 : {:.4} loss4 : {:.4} | loss : {:.4}, time per epoch: {:.4} sec, total time : {} sec'
                    .format(epoch, loss1, loss2, loss3, loss4, loss, toc - tic,
                            int(toc - tic_train)))
                print('data loading time : {}, backward pass time : {}'.format(
                    time_dataloader, time_backwardpass))
                print('quaternion[0] : {}, norms[0] : {}, batch_size : {}'.
                      format(quat_pred[0], norms[0], len(quat_pred)))

                # summary
                fetches = [self.summary_op, self.increment_global_step]
                feed_dict = {
                    self.X: view,
                    self.Y: ptcld,
                    self.Z: b_RT_pose,
                    self.is_training: False
                }
                summary, _ = sess.run(fetches, feed_dict=feed_dict)
                self.writer.add_summary(summary, epoch)

                # validation
                if epoch % self.args.val_every == 0:
                    print('-------------- validation -------------')
                    rt_score_list = list()
                    rt_score_dict = OrderedDict()
                    for cad_name in self.bp.classes:
                        rt_score_dict[cad_name] = []

                    # initialize temp.txt
                    f = open('./temp.txt', 'w')
                    f.close()

                    # load labels
                    val_img_labels, val_cad_labels, val_pose_labels = self.bp.shuffle_and_parse_labels(
                        val_labels, shuffle_labels=False)
                    # manage duplicate pose
                    val_pose_labels = self.bp.manage_duplicate_pose(
                        val_cad_labels, val_pose_labels)

                    with open('./temp.txt', 'a') as f:
                        max_val_batch_index = len(
                            val_pose_labels) // self.args.val_batch_size
                        if len(val_pose_labels
                               ) % self.args.val_batch_size != 0:
                            max_val_batch_index += 1
                        for val_batch_index in range(max_val_batch_index):
                            # load batch
                            b_img_lab, b_cad_lab, b_pose_lab = self.bp.batch_labels(
                                val_img_labels, val_cad_labels,
                                val_pose_labels, self.args.val_batch_size,
                                val_batch_index)
                            views, ptcld = self.bp.return_batch(b_img_lab,
                                                                b_cad_lab,
                                                                mode='val')
                            b_RT_pose = self.bp.return_gt_rt(b_pose_lab)

                            # feed forward
                            fetches = [
                                self.ptcld_pred, self.ptcld_pose, self.RT_pred
                            ]  # TODO : quat_pred
                            feed_dict = {
                                self.X: views,
                                self.Y: ptcld,
                                self.Z: b_RT_pose,
                                self.is_training: False
                            }
                            ptcld_pred, ptcld_pose, b_RT_pred = sess.run(
                                fetches, feed_dict=feed_dict)

                            # closest pose
                            closest_rt_idx_list = self.bp.return_closest_pose_candidate(
                                b_cad_lab, b_RT_pred)

                            # write results to "temp.txt"
                            for val_cad_lab, val_pose_lab, RT_gt, RT_pred, closest_rt_idx in zip(
                                    b_cad_lab, b_pose_lab, b_RT_pose,
                                    b_RT_pred, closest_rt_idx_list):
                                print('\nclass name : {},'.format(val_cad_lab),
                                      'pose index : {}'.format(val_pose_lab),
                                      file=f)
                                print(tabulate([[RT_gt, RT_pred]],
                                               headers=['RT_gt', 'RT_pred']),
                                      file=f)
                                print('# expected answer / closest RT', file=f)
                                print(val_pose_lab, closest_rt_idx, file=f)
                                s = 1 if val_pose_lab == closest_rt_idx else 0
                                rt_score_list.append(s)

                            # save pose comparison image
                            if val_batch_index == 0:
                                pose_img_len = self.args.pose_img_len
                                pose_compare_img = self.bp.return_pose_comparison_img(
                                    b_img_lab, b_cad_lab, b_pose_lab,
                                    b_RT_pose, b_RT_pred, closest_rt_idx_list,
                                    pose_img_len)
                                cv2.imwrite(
                                    self.val_path + '/pose_val_' +
                                    str(epoch).zfill(5) + '.png',
                                    pose_compare_img)

                    # write results
                    with open(
                            self.val_path + '/pose_val_' +
                            str(epoch).zfill(5) + '.txt', 'w') as f:
                        for i in range(len(rt_score_list)):
                            rt_score_dict[val_cad_labels[i]].append(
                                rt_score_list[i])
                        filelist = [sys.stdout, f]
                        for file in filelist:
                            total_score = 0
                            total_N = 0
                            for key, values in rt_score_dict.items():
                                print('<{}>'.format(key), file=file)
                                score = sum(values)
                                total_score += score
                                N = len(values)
                                total_N += N
                                try:
                                    print('rt score : {:.4}%  {}/{}'.format(
                                        score / N * 100, score, N),
                                          file=file)
                                except:
                                    pass
                            print('{:.4}% {}/{}'.format(
                                total_score / total_N * 100, total_score,
                                total_N),
                                  file=file)
                            print('-----------------------------', file=file)

                        # append contents of temp.txt
                        with open('./temp.txt', 'r') as g:
                            contents = g.read()
                            f.write(contents)

                    # save model
                    if total_score > best_score:
                        if self.args.save:
                            try:
                                self.saver.save(sess,
                                                self.checkpoint_path +
                                                '/saved_model',
                                                global_step=epoch)
                                print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
                                print('Saved model of epoch {}'.format(epoch))
                                print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
                            except:
                                print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
                                print('   Failed saving model ㅜㅜ   ')
                                print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
                        best_score = np.sum(rt_score_list)

        print('====================================')
        print('           Train Completed          ')
        print('====================================')
Exemplo n.º 40
0
    def test(self):
        tic = t()
        # load labels
        test_labels = self.bp.test_labels
        test_img_labels, test_cad_labels, test_pose_labels = self.bp.shuffle_and_parse_labels(
            test_labels, shuffle_labels=False)
        # duplicate pose handling
        test_pose_labels = self.bp.manage_duplicate_pose(
            test_cad_labels, test_pose_labels)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            # restore model weights
            if tf.train.latest_checkpoint(self.test_model_path) is not None:
                print('OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO')
                print('POSE MODEL : Loading weights from %s' %
                      tf.train.latest_checkpoint(self.test_model_path))
                self.saver.restore(
                    sess, tf.train.latest_checkpoint(self.test_model_path))
                print('OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO')
            else:
                print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
                print('No pose checkpoint found at {}'.format(
                    self.test_model_path))
                print('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
                sys.exit()
            toc = t()
            print('model loading time :', toc - tic)
            tic = t()
            views, cornerpoints = self.bp.return_testbatch()
            toc = t()
            print('data loading time :', toc - tic)
            tic = t()

            # feed forward
            fetches = [self.RT_pred]
            feed_dict = {
                self.X: views,
                self.Y: cornerpoints,
                self.is_training: False
            }
            [b_RT_pred] = sess.run(fetches, feed_dict=feed_dict)
            toc = t()
            print('feed forward time :', toc - tic)
            tic = t()

            # closest pose
            rt_idx_list = self.bp.return_closest_pose_candidate(
                test_cad_labels, b_RT_pred, dist=False)
            # calculate scores
            rt_score_list = []
            for closest_rt_idx, gt_pose_idx in zip(rt_idx_list,
                                                   test_pose_labels):
                s = 1 if closest_rt_idx == gt_pose_idx else 0
                rt_score_list.append(s)
            if self.args.test_simple:
                total_score = sum(rt_score_list)
                total_N = len(rt_score_list)
                print('{:.4}% {}/{}'.format(total_score / total_N * 100,
                                            total_score, total_N))
                print('-----------------------------')
            else:
                if self.args.save_test_imgs:
                    count = 0
                    LABEL_TO_POSE = {v: k for k, v in POSE_TO_LABEL.items()}
                    for input_img, closest_rt_idx, gt_pose_idx, cad_name in zip(
                            views, rt_idx_list, test_pose_labels,
                            test_cad_labels):
                        # save image
                        plt.clf()
                        fig, ax = plt.subplots(1, 2, sharey=True)
                        input_img = (input_img * 255).astype(np.uint8)
                        class_index = self.bp.classes.index(cad_name)
                        pred_pose_img = cv2.imread(
                            self.bp.rendering_adrs[class_index]
                            [closest_rt_idx])
                        ax[0].imshow(input_img)
                        ax[0].set_title('{}\n{}'.format(
                            cad_name, LABEL_TO_POSE[gt_pose_idx]))
                        ax[1].imshow(pred_pose_img)
                        ax[1].set_title('pred pose : {}'.format(
                            LABEL_TO_POSE[closest_rt_idx]))
                        if closest_rt_idx == gt_pose_idx:
                            plt.savefig(self.test_results_path + '/correct_' +
                                        str(count).zfill(3) + '.png')
                        else:
                            plt.savefig(self.test_results_path + '/wrong_' +
                                        str(count).zfill(3) + '.png')
                        count += 1
                        plt.close()
                rt_score_dict = OrderedDict()
                for cad_name in self.bp.classes:
                    rt_score_dict[cad_name] = []
                for i in range(len(rt_score_list)):
                    rt_score_dict[test_cad_labels[i]].append(rt_score_list[i])
                filelist = [sys.stdout]
                for file in filelist:
                    total_score = 0
                    total_N = 0
                    for key, values in rt_score_dict.items():
                        print('<{}>'.format(key), file=file)
                        score = sum(values)
                        total_score += score
                        N = len(values)
                        total_N += N
                        print('rt score : {:.4}%  {}/{}'.format(
                            score / N * 100, score, N),
                              file=file)
                    print('{:.4}% {}/{}'.format(total_score / total_N * 100,
                                                total_score, total_N),
                          file=file)
                    print('-----------------------------', file=file)
            toc = t()
            print('find closest pose / score calculation time :', toc - tic)
Exemplo n.º 41
0
spikes = {}
for layer in set(network.layers):
    spikes[layer] = Monitor(network.layers[layer], state_vars=["s"], time=time)
    network.add_monitor(spikes[layer], name="%s_spikes" % layer)

voltages = {}
for layer in set(network.layers) - {"X"}:
    voltages[layer] = Monitor(network.layers[layer],
                              state_vars=["v"],
                              time=time)
    network.add_monitor(voltages[layer], name="%s_voltages" % layer)

# Train the network.
print("Begin training.\n")
start = t()

inpt_axes = None
inpt_ims = None
spike_ims = None
spike_axes = None
weights1_im = None
voltage_ims = None
voltage_axes = None

for epoch in range(n_epochs):
    if epoch % progress_interval == 0:
        print("Progress: %d / %d (%.4f seconds)" %
              (epoch, n_epochs, t() - start))
        start = t()
Exemplo n.º 42
0
from time import time as t, sleep as sleep

t_start = t();

while t()-t_start<10:
    print('hi')
    sleep(1);
Exemplo n.º 43
0
def main(pat='*', print_arrays=False, **opts):
    """
    Called when module is run as a script.

    :param pat: Wildcard or regular expression pattern. Only functions whose
        names (stripped of the prefix 'run_ca_') match the pattern are included
        in the benchmark. If a pattern contains only alphanumeric/underscore
        and wildcard ('*' and '?') characters only, it is considered a wildcard
        pattern (which is automatically converted to a regular expression).
    :param print_arrays:
    :param opts:
    :return:
    """

    # if the pattern consists only of alphanumeric characters and/or
    # underscores and/or '*' (wildcard matching zero or more characters)
    # and/or '?' (wildcard matching exactly one character), it is considered
    # a wildcard pattern (which is then converted to a regex pattern) otherwise
    # a regex.
    if re.match(r'^[\w*?]*$', pat):
        if not pat.startswith('*'):
            pat = '^' + pat
        if not pat.endswith('*'):
            pat += '$'
        pat = pat.replace('*', '.*').replace('?', '.')

    print('Function name regex',
          '-------------------',
          pat,
          end='\n\n',
          sep='\n')

    o = options(opts)
    fn_prefix = o.run_func_prefix
    o.fnames = fnames = *filter(
        partial(re.search, pat),
        (fn[len(fn_prefix):]
         for fn in vars(sys.modules[__name__]) if fn.startswith(fn_prefix))),

    print(*fnames, '\nGrade: ' + o.grade, sep='\n', end='\n\n')

    gens = o.gens
    n_fn = len(fnames)
    arg = f'({gens})'
    space = len(max(fnames, key=len)) + len(arg)

    # grids, call strings (for display purposes), execution times
    arrays, call_str, dts = [], [], []
    for fn in fnames:
        f = eval(fn_prefix + fn)
        t0, arr = t(), f()
        dt = t() - t0
        dts.append(dt)
        arrays.append(cp.asnumpy(arr))
        call_str.append(f'{fn}{arg}'.center(space))
        print(f'{f.__name__}:'.ljust(space + 11), f'{dt:.3f} sec')

    print()

    for i in range(n_fn):
        j = (i + 1) % n_fn
        # t0 = t()
        print(call_str[i],
              f'{"!="[int(np.array_equal(arrays[i], arrays[j]))]}=',
              call_str[j])
    print()
    # print(f'Compare-arrays dt: {t()-t0:.3f}')

    if print_arrays:
        print(conv_opts.data.np.u.init, '\n=================')
        for a, c in zip(arrays, call_str):
            print(f'{c}\n{a}\n=================')

    return arrays, call_str
Exemplo n.º 44
0
def rmsf(trajectories, reference, fitselection, rmsfselection, maxChunk=None, refMean=False, verbose=True):
    """
    Compute the RMSF of a number of trajectories to a reference frame
    Input:
        trajectories:    mdtraj.iterload() instance
        reference:       trajectory of which frame 0 is taken as a reference frame
        fitselection:    Selection text for rmsd fit or atom indices
        rmsdselection:   Selection text for rmsf computation or atom indices
        maxChunk:        Maximum number of trajectory chunks to process
        refMean:         If true, take the mean atom positions from trajectory as reference (reference can be set to None)
    Return:
        RMSF:            Root mean square fluctuations
        atomNdx:         Atom indices of selcted atoms
        resID:           Resiude IDs of selected atoms
    """

    # timers
    fitt = 0.0
    sst = 0.0
    loadt = 0.0

    nframes = 0  # total number of frames

    loadst = t()

    for ntrj, trajectory in enumerate(trajectories):
        loadt += t() - loadst

        if verbose:
            print("\rProcessing trajectory chunk {} of maximally {}.".format(ntrj, maxChunk), end="")
            sys.stdout.flush()

        if type(maxChunk) != type(None) and ntrj >= maxChunk:
            break

        st = t()
        _fit_trajectory(trajectory, reference, fitselection, openMP=False)
        fitt += t() - st

        st = t()
        try:
            sumsquares += _sumsquares(trajectory, reference, rmsfselection, refMean=refMean)
        except:
            sumsquares = _sumsquares(trajectory, reference, rmsfselection, refMean=refMean)
        sst += t() - st

        nframes += trajectory.n_frames
        loadst = t()

    if verbose:
        print("\rLoading trajectories with a total of {} frames took {:.2f} sec.".format(nframes, loadt))
        print("RMSD fit took {:.2f} sec.".format(fitt))
        print("RMSF computation took {:.2f} sec.".format(sst))

    RMSF = (sumsquares / (3 * nframes)) ** 0.5

    atomIndices = _atom_indices_from_selection(trajectory.topology, rmsfselection)
    atomIndicesTrj = trajectory.atom_slice(atomIndices)
    resIDs = [atom.residue.resSeq for atom in atomIndicesTrj.topology.atoms]

    return RMSF, atomIndices, resIDs
Exemplo n.º 45
0
voltages = {}
for layer in set(network.layers) - {"X"}:
    voltages[layer] = Monitor(network.layers[layer], state_vars=["v"], time=int(time/dt))
    network.add_monitor(voltages[layer], name="%s_voltages" % layer)

inpt_ims, inpt_axes = None, None
spike_ims, spike_axes = None, None
weights_im = None
assigns_im = None
perf_ax = None
voltage_axes, voltage_ims = None, None

# Train the network.
print("\nBegin training.\n")
start = t()
labels = []
for epoch in range(n_epochs):

    if epoch % progress_interval == 0:
        print("Progress: %d / %d (%.4f seconds)" % (epoch, n_epochs, t() - start))
        start = t()

    # Create a dataloader to iterate and batch data
    dataloader = torch.utils.data.DataLoader(
        train_dataset, batch_size=1, shuffle=True, num_workers=n_workers, pin_memory=gpu
    )

    for step, batch in enumerate(tqdm(dataloader)):
        # Get next input sample.
        inputs = {"X": batch["encoded_image"].view(int(time/dt), 1, 1, 28, 28)}
Exemplo n.º 46
0
from time import time as t
from record import Record, UnimarcRecord
from reader import Reader
from marcxml import record_to_xml
#source = open('/home/sergey/PycharmProjects/pymarc2/tmp/ruslan22.mrc', 'r')
source = open('/home/sergey/PycharmProjects/pymarc2/tmp/wrong.mrc', 'r')
#source = open('/home/sergey/projects/PycharmProjects/ermapp/appdata/rusmarc_ebsco.mrc', 'r')
#out = open('tmp/rusmarc_ebsco_out.mrc', 'w')
s = t()
import pprint
pp = pprint.PrettyPrinter(indent=4)
reader = Reader(UnimarcRecord, source, raw_encoding='utf-8')
for record in reader:
    print record
    #record_to_xml(record)
    #print pp.pprint(record.to_dict())
    #out.write(UnimarcRecord(record.as_marc()).as_marc())


print 'time:', t() - s

Exemplo n.º 47
0
from time import time as t
time = lambda: int(t() * 1000)
chat.log("JEP start time: " + str(time()))
prStart = time()
chat.log("  Prime (2000) - Start: " + str(prStart))

primes = []
num = 2
while len(primes) < 2000:
    for prime in primes:
        if not num % prime:
            break
    else:
        primes.append(num)
    num += 1

num = None
primes = None

prEnd = time()
chat.log("  Prime (2000) - End: " + str(prEnd) + ", duration: " +
         str(prEnd - prStart))
gbStart = time()
chat.log("  getBlock (4 full chunk) - Start: " + str(gbStart))

blocks = []
pla = player.getPlayer()
chunk = [int(pla.getX()) >> 4, int(pla.getZ()) >> 4]
start = [chunk[0] << 4, chunk[1] << 4]
end = [start[0] + 32, start[1] + 32]
for x in range(start[0], end[0]):
Exemplo n.º 48
0
if __name__ == '__main__':
	#from osaterminology.getterminology import getaete
	#p = '/Applications/TextEdit.app'
	#p = '/Applications/GraphicConverter US/GraphicConverter.app'
	#p = '/Applications/Smile269/Smile.app'
	#p = '/Applications/Smile/Smile.app'
	#p = '/System/Library/CoreServices/Finder.app'
	#terms = buildtablesforaetes(getaete(p))
	
	f = file('/Users/has/PythonDev/appscript/~old/osaterminology_dev/InDesign_CS2_raw_terms/idcs2.aete')
	a=f.read()
	f.close()
	from time import time as t
	import terminology
	tt=t()
	terms = buildtablesforaetes([a])
	print t()-tt
#	classes, enums, properties, elements, commands = terms
#	tt=t()
#	terminology._makeTypeTable(classes,enums,properties)
#	terminology._makeReferenceTable(properties,elements,commands)
#	print t()-tt
#	print terms
	if 0:
		from pprint import pprint
		for i in terms:
			pprint(i)
			print

Exemplo n.º 49
0
from random import randrange
from time import time as t
question_count = int(input('Enter the number of questions'))
max_number = int(input('Enter the max range'))
score = 0
answer_list = []
start = t()
for n in range(question_count):
    rand_a, rand_b = int(randrange(2, max_number + 1)), int(
        randrange(2, max_number + 1))
    answer = rand_a * rand_b

    user_answer = int(input(f'What is {rand_a} * {rand_b} = '))
    answer_list.append(
        f'{n}. for {rand_a} * {rand_b} you answered {user_answer}, the correct answer {answer}'
    )
    if user_answer == answer:
        score += 1
end = t()
correct_ans = score / question_count
correct_perc = correct_ans * 100
print(f'Thank you for playing Math tutor')
print(
    f'You got {score} answers correct out of {question_count} questions with {round(correct_perc,1)} % accuracy'
)
print(f'you took {round(end-start,1)} sec to answer all the questions')

for item in answer_list:
    print(item)
Exemplo n.º 50
0
		# check for if this is the right answer
		if type(rtrnValue) is list:
			return rtrnValue
		walkList.remove(a[1])
		hasList.remove(a[0])

	return False

# tris, sqs, pens, hexs, heps, octs = [], [], [], [], [], []

numListList = [[], [], [], [], [], []]
conDictList = [{}, {}, {}, {}, {}, {}]

limit = 10000

start = t()

for i in range(limit):
	tri, sq, pen, Hex, hep, Oct = makeNums(i)
	
	if tri > 999 and tri < 10000:
		numListList[0].append(str(tri))

	if sq > 999 and sq < 10000:
		numListList[1].append(str(sq))

	if pen > 999 and pen < 10000:
		numListList[2].append(str(pen))

	if Hex > 999 and Hex < 10000:
		numListList[3].append(str(Hex))
Exemplo n.º 51
0
albums = set([])
artists = set([])
genres = set([])
tracks = set([])

track_info = open(os.path.join(path_to_raw_data, "trackData%i.txt" % comp_track_number))
tax_group = h5.createGroup("/", "tax", "Item Taxonomy information")

t_al  = h5.createTable(tax_group, "track_album",  TrackAlbum , expectedrows=1000000)
t_ar  = h5.createTable(tax_group, "track_artist", TrackArtist, expectedrows=1000000)
t_g   = h5.createTable(tax_group, "track_genre",  TrackGenre , expectedrows=1000000)
al_ar = h5.createTable(tax_group, "album_artist", AlbumArtist, expectedrows=1000000)
al_g  = h5.createTable(tax_group, "album_genre",  AlbumGenre , expectedrows=1000000)
ar_g  = h5.createTable(tax_group, "artist_genre", ArtistGenre, expectedrows=1000000)

lasttime = t()
for n, track in enumerate(track_info):
    track_data = track.strip().split("|") 

    track, album, artist = track_data[0:3]
    gens = [int(g) for g in track_data[3:] if g != "None"]

    assert (track != "None"), "Wtf . . . no track?"    
    tracks.add(int(track))

    # This is really confusing.  My apologies:
    # If there is no genre data, the list comprehensions just dont run
    # Doing it this way minimizes the amount of non-compiled conditional logic

    # Track -> Genres
    [t_g.append([tuple([int(track), g])]) for g in gens]