示例#1
0
async def serve(q: Q):
    if not q.app.lb_dict:
        q.app.lb_dict = dict()

    if not q.user.stop_watch:
        q.user.stop_watch = StopWatch()

    if q.args.start and not q.user.stop_watch.active:
        await U.start_clock(q.user.stop_watch, q)

        q.user.stop_watch.update_df()
        await U.update_streak_history(q.user.stop_watch, q)

        await U.update_leaderboard(q)
    elif q.args.stop and q.user.stop_watch.active:
        q.user.stop_watch.stop()
        await U.update_stop_streak(q.user.stop_watch, q)

        # q.user.stop_watch.update_df()
        # await U.update_streak_history(q.user.stop_watch, q)
        #
        # await U.update_leaderboard(q)

    if not q.client.initialized:
        q.client.initialized = True
        await U.responsive_layout(q)
        await U.update_leaderboard(q)
    await q.page.save()
示例#2
0
from utils.StopWatch import StopWatch
from utils.RawDataProcessing import PregeneratedProcessor
from utils.DataProviderLight import DataProviderLight
from utils.DatasetGeneration import DeterministicGenerator
from classification.ClassificationTest import ClassificationTest
from classification.KNNClassification import KNNClassifier
# script for corresponding test case
# most test cases should be able to be executed without any further changes, if data is available

SAMPLE_SIZE = [5000, 10000, 20000, 50000]
N_NEIGHBORS = 1
# change device to "cpu" if cuda not available
DEVICE = "cuda"
stopwatch = StopWatch()
# pregenerated embedding and labels
for sample_size in SAMPLE_SIZE:
    file_words = open("../tests/embedding7.json")
    file_labels = open("../data/unique_labels.json")
    data_provider = DataProviderLight(file_words, file_labels, sample_size=sample_size)
    # embedding data, splitting up into train and test set
    processor = PregeneratedProcessor(data_provider)
    generator = DeterministicGenerator(data_provider, processor)
    dataset = generator.generate_dataset()
    print("Sample Size: " + str(sample_size))
    # creating classifier, overwriting parameters
    classifier = KNNClassifier(data_provider, dataset, DEVICE)
    classifier.n_neighbours = N_NEIGHBORS
    # train classifier and output progress
    classifier.train()
    # testing
    test = ClassificationTest(dataset, classifier)
示例#3
0
DEVICE = "cuda"

for sample_size in SAMPLE_SIZE:
    # raw words and labels
    file_words = open("../data/unique_equations.json")
    file_labels = open("../data/unique_labels.json")
    # pre calculated weight matrix
    file_weights = open("../data/weights_0.json")
    data_provider = DataProviderLight(file_words,
                                      file_labels,
                                      sample_size=sample_size,
                                      file_weights=file_weights)
    # embedding data, splitting up into train and test set
    processor = VectorProcessor(data_provider)
    generator = DeterministicGenerator(data_provider, processor)
    stopwatch = StopWatch()

    # training the word2vec net
    word2vec = Word2Vec(data_provider, FEATURES, DEVICE)
    word2vec.train(EPOCHS, BATCH_SIZE)
    # extracting weights and injecting them into the data provider
    data_provider.weights = torch.tensor(word2vec.get_weights())
    # generate dataset
    dataset = generator.generate_dataset()
    # train knn classifier
    classifier = KNNClassifier(data_provider, dataset, DEVICE)
    classifier.n_neighbours = N_NEIGHBORS
    classifier.train()
    # test the classifier
    test = ClassificationTest(dataset, classifier)
    stopwatch.start()
示例#4
0
def stopwatch() -> StopWatch:
    return StopWatch()