コード例 #1
0
def offline_only():
    start_time = time.time()
    # x, y, x_s, y_s, x_test, y_test = load_datasets(path='datasets/svhn/', static_data_ratio=0.1)
    x, y, x_s, y_s, x_test, y_test = loadmnist(static_data_ratio=0.1)

    model = NeuralNetworkClassifier()
    sgd = StochasticGradientDescent(model, x, y)
    sgd.train(num_iterations=50, learning_rate=0.001)

    error = []
    test_increment = math.floor(len(y_s) / len(y_test))
    i = 0
    idx = 2
    while i <= len(y_s):
        end_of_batch = i + test_increment
        if i + test_increment >= len(y_s):
            end_of_batch = len(y_s)
        #sgd.incremental(x_s[i: end_of_batch, ], y_s[i: end_of_batch], 0.001)
        if idx < len(y_test):
            error_rate = model.calculate_loss(x_test[0:idx], y_test[0:idx])
        else:
            error_rate = model.calculate_loss(x_test, y_test)
        error += [error_rate]
        print("{} items process".format(i))
        i += test_increment
        idx += 1

    error_rate = model.calculate_loss(x_test, y_test)
    error += [error_rate]

    running_time = time.time() - start_time
    return error, running_time
コード例 #2
0
def velox():
    start_time = time.time()
    # x, y, x_s, y_s, x_test, y_test = load_datasets(path='datasets/svhn/', static_data_ratio=0.1)
    x, y, x_s, y_s, x_test, y_test = loadmnist(static_data_ratio=0.1)

    model = NeuralNetworkClassifier()
    sgd = StochasticGradientDescent(model, x, y)
    sgd.train(num_iterations=50, learning_rate=0.001)
    buffer_size = 10000
    error = []
    test_increment = math.floor(len(y_s) / len(y_test))
    i = 0
    idx = 2
    while i < len(y_s):
        sgd.incremental(np.array([x_s[i]]), np.array([y_s[i]]), 0.001)
        sgd.update_buffer(x_s[i], y_s[i])
        # print("{} items process".format(i))
        i += 1
        if i % test_increment == 0:
            if idx < len(y_test):
                error_rate = model.calculate_loss(x_test[0:idx], y_test[0:idx])
            else:
                error_rate = model.calculate_loss(x_test, y_test)
            error += [error_rate]
        if i % buffer_size == 0:
            print("{} items process ... scheduling new iteration".format(i))
            sgd.retrain()
        idx += 1

    error_rate = model.calculate_loss(x_test, y_test)
    error += [error_rate]

    running_time = time.time() - start_time
    return error, running_time
コード例 #3
0
def simple():
    x, y, x_s, y_s, x_test, y_test = loadsvhn(path='../datasets/svhn/',
                                              static_data_ratio=1.0)
    model = NeuralNetworkClassifier()
    sgd = StochasticGradientDescent(model, x, y)
    sgd.train(num_iterations=50, learning_rate=0.001)
    print(model.calculate_loss(x_test, y_test))
コード例 #4
0
def continuous():
    start_time = time.time()
    # ratings, stream, n_items, n_users = load_datasets('datasets/ml-100k/u.data', static_data_ratio=0.1)
    ratings, stream, n_items, n_users = load_datasets(
        'datasets/ml-1m/ratings.dat', sep='::', static_data_ratio=0.1)
    n_items = 3952
    model = MatrixFactorizationModel(item_count=n_items,
                                     user_count=n_users,
                                     sum_rating=np.sum(ratings[:, 2]),
                                     count_rating=len(ratings[:, 2]))
    sgd = StochasticGradientDescent(model, ratings[:, (0, 1)], ratings[:, 2])
    sgd.train()
    error = []
    i = 0
    cum_error = 0
    buffer_size = 50000
    for item in stream:

        l = np.abs(model.loss(item[0:2], item[2]))
        cum_error += l
        sgd.incremental(item[0:2], item[2], 0.001)
        sgd.update_buffer(item[0:2], item[2])
        if (i % buffer_size == 0) & (i != 0):
            print "{} items process ... scheduling new iteration".format(i)
            sgd.run_iter()
        i += 1
        if i % 1000 == 0:
            error += [cum_error / i]
    running_time = time.time() - start_time
    return error, running_time
コード例 #5
0
def full():
    start_time = time.time()
    # x, y, x_s, y_s, x_test, y_test = load_datasets(path='datasets/svhn/', static_data_ratio=0.1)
    x, y, x_s, y_s, x_test, y_test = loadmnist(static_data_ratio=1.0)

    model = NeuralNetworkClassifier()
    sgd = StochasticGradientDescent(model, x, y)
    sgd.train(num_iterations=50, learning_rate=0.001)
    error = []
    error_rate = model.calculate_loss(x_test, y_test)
    error += [error_rate]
    running_time = time.time() - start_time
    return error, running_time
コード例 #6
0
def full():
    start_time = time.time()
    # ratings, stream, n_items, n_users = load_datasets('datasets/ml-100k/u.data', static_data_ratio=0.1)
    ratings, stream, n_items, n_users = load_datasets(
        'datasets/ml-1m/ratings.dat', sep='::', static_data_ratio=1.0)
    n_items = 3952
    model = MatrixFactorizationModel(item_count=n_items,
                                     user_count=n_users,
                                     sum_rating=np.sum(ratings[:, 2]),
                                     count_rating=len(ratings[:, 2]))
    sgd = StochasticGradientDescent(model, ratings[:, (0, 1)], ratings[:, 2])
    sgd.train()
    error = model.calculate_loss(ratings[:, (0, 1)], ratings[:, 2])
    running_time = time.time() - start_time
    return error, running_time
コード例 #7
0
def offline_online():
    start_time = time.time()
    # ratings, stream, n_items, n_users = load_datasets('datasets/ml-100k/u.data', static_data_ratio=0.1)
    ratings, stream, n_items, n_users = load_datasets(
        'datasets/ml-1m/ratings.dat', sep='::', static_data_ratio=0.1)
    n_items = 3952
    model = MatrixFactorizationModel(item_count=n_items,
                                     user_count=n_users,
                                     sum_rating=np.sum(ratings[:, 2]),
                                     count_rating=len(ratings[:, 2]))
    sgd = StochasticGradientDescent(model, ratings[:, (0, 1)], ratings[:, 2])
    sgd.train()
    error = []
    i = 0
    cum_error = 0
    for item in stream:
        l = np.abs(model.loss(item[0:2], item[2]))
        cum_error += l
        sgd.incremental(item[0:2], item[2], 0.001)
        i += 1
        if i % 1000 == 0:
            print "{} items processed".format(i)
            error += [cum_error / i]
    running_time = time.time() - start_time
    return error, running_time
コード例 #8
0
def naive():
    start_time = time.time()
    # ratings, stream, n_items, n_users = load_datasets('datasets/ml-100k/u.data', static_data_ratio=0.1)
    ratings, stream, n_items, n_users = load_datasets(
        'datasets/ml-1m/ratings.dat', sep='::', static_data_ratio=0.0)
    n_items = 3952
    model = MatrixFactorizationModel(item_count=n_items, user_count=n_users)
    sgd = StochasticGradientDescent(
        model, np.empty([0, stream.shape[1]], dtype=np.uint64),
        np.empty([0, 1], dtype=np.uint64))
    error = []
    i = 0
    cum_error = 0
    for item in stream:
        l = np.abs(model.loss(item[0:2], item[2]))
        cum_error += l
        sgd.incremental(item[0:2], item[2], 0.001)
        i += 1
        if i % 1000 == 0:
            error += [cum_error / i]

    running_time = time.time() - start_time
    return error, running_time