Exemplo n.º 1
0
def calc_n_shapley_values(n_feats, n_samples, n_iter, data_type, cf_name, overwrite=False, data_dir="result_data_sunnies"):
    """
    Returns a nested list of shapley values (per player) per iteration;
    [[v1... vn], [v1...vn], [v1...vn], ...]
    I.e. the length of the list is equal to n_iter
    """
    players = list(range(n_feats))

    filename = f"{data_dir}/{n_feats}_feats_{n_samples}_samples_{n_iter}_iter_{cf_name}.npy"
    if not overwrite and os.path.exists(filename):
        return numpy.load(filename)

    all_shaps = []
    for _i in range(n_iter):
        x, y = data.make_data(n_feats, n_samples, data_type)

        _shapley_values = calc_shapley_values(x, y, cf_name)
        all_shaps.append(_shapley_values)

    numpy.save(filename, all_shaps)

    return all_shaps
Exemplo n.º 2
0
my_array = {}
my_url = input("Ссылка которую нужно сократить: ")
my_new_name = input("Новое имя ссылки: ")

emulator.adding_to_dict(my_new_name, my_url, my_array)

while True:
    new_link_request = input("Хотите добавить еще ссылку?(y/n): ")
    if new_link_request == "y":
        my_url = input("Ссылка которую нужно сократить: ")
        my_new_name = input("Новое имя ссылки: ")
        emulator.adding_to_dict(my_new_name, my_url, my_array)
    elif new_link_request == "n":
        break

data.make_data(my_new_name, my_url)

while True:
    link_request = input("Хотите получить свою ссылку(y/n): ")
    if link_request == "y":
        name = input("Имя: ")
        emulator.get_linc_from_dict(name, my_array)
    elif link_request == "n":
        break

while True:
    ans = input('Показать БД?(y/n): ')
    if ans == 'y':
        with shelve.open('data') as db:
            data = dict(db.items())
            print(data)
Exemplo n.º 3
0
# Number of irrelevant features
N_IRRELEVANT = 0

# Models to use
MODELS = [Ridge, KNeighborsRegressor]

# Model complexity
COMPLEXITIES = [1.0, 5]

########
# Main #
########

if __name__ == '__main__':
    # Generate data
    X, y = make_data(N_DATASETS * N_SAMPLES, N_IRRELEVANT)

    # Plot data
    scatter_plot(X[:, 0], y, 'Q3d_data')

    # Calculate expected error and its terms for
    # each model
    for model, complexity in zip(MODELS, COMPLEXITIES):
        # Create the protocol
        p = Protocol(X, y)

        # Train models
        p.train(model, complexity, N_DATASETS)

        # Get error and its terms
        noise, s_bias, var, exp_error = p.eval()
Exemplo n.º 4
0
from data import make_data
import plotly
import plotly.graph_objs as go

file = open('FRvideos.csv', 'r', encoding="utf8")
dataset = make_data(file)
print(dataset)
#вивести стовпчикову діаграму з кілкістю відео за кожну дату
new_dict = dict()
for category_id in dataset:
    for date in dataset[category_id]:
        if date in new_dict:
            new_dict[date] += len(dataset[category_id][date].values())
        else:
            new_dict[date] = len(dataset[category_id][date].values())

diagram = go.Bar(x=list(new_dict.keys()), y=list(new_dict.values()))
fig = go.Figure(data=[diagram])
plotly.offline.plot(fig, filename='graph1.html')

#вивести кругову діаграму з кількістю відео в кожній категорії
new_new_dict = dict()
for category_id in dataset:
    for date in dataset[category_id]:
        if category_id in new_new_dict:
            new_new_dict[category_id] += len(
                dataset[category_id][date].values())
        else:
            new_new_dict[category_id] = len(
                dataset[category_id][date].values())
Exemplo n.º 5
0
    stack.extend([
        ZeroCenter(),
        LinearSymplecticTwoByTwo(),
        SymplecticAdditiveCoupling(shift_model=IrrotationalMLP())
    ])
    #SymplecticAdditiveCoupling(shift_model=MLP())])
T = Chain(stack)

step = tf.get_variable("global_step", [],
                       tf.int64,
                       tf.zeros_initializer(),
                       trainable=False)

with tf.Session() as sess:

    z = make_data(settings, sess)

loss = make_loss(settings, T, z)

train_op = make_train_op(settings, loss, step)

# sess.run(tf.global_variables_initializer())

# Set the ZeroCenter bijectors to training mode:
for i, bijector in enumerate(T.bijectors):
    if hasattr(bijector, 'is_training'):
        T.bijectors[i].is_training = True

tf.contrib.training.train(train_op,
                          logdir=settings['log_dir'],
                          save_checkpoint_secs=60)
Exemplo n.º 6
0
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
from data import make_data
from plot import plot

#  (X^-1 * X.T) * y
# X * X.T
# [
#   [1, 3],
#   [2, 4]
#  ... ]

#  β=(X.T*X)^−1 * X.T * y

X_train, y_train, X_test, y_test = make_data(n_samples=6)

intercept = np.ones(shape=y_train.shape).reshape(-1, 1)
print("intercept:", intercept)
X_train = np.concatenate((X_train, intercept), 1)
print("\nX_train\n", X_train)
print(X_train.shape)

a = X_train.T
print("\nX_train.T\n", a)
print(a.shape)
a = a.dot(X_train)
print("\nX_train.T.dot(X_train)\n", a)
print(a.shape)
a = inv(a)
print("\nX_train.T.dot(X_train)*-1\n", a)
Exemplo n.º 7
0
def get_dataset(sample_number):
    X, y = make_data(sample_number, random_state=get_random_state())
    return X, y
Exemplo n.º 8
0
    print('Cost : ', cost(C, X, Z))


if __name__ == "__main__":

    # Loading the existing data
    if real_data:
        temp_X, temp_Y = load_file(load_data)
        random.shuffle(temp_X)
        random.shuffle(temp_Y)

        U, y = removeDups(temp_X, temp_Y)

    # Synthetic Data
    else:
        U, y, C_, Z_, ids_ = make_data(5, 0, 8, 50)


    # # X_train, X_test, y_train, y_test = train_test_split(np.array(temp_X), np.array(temp_Y), test_size=0.33, random_state=42)
    # # print(X_test.shape)

    # # data is finally in U and labels in y
    # print('u shape ', len(U),',',len(U[0]))
    # print(U[0][0])
    # print(U[1][0])
    # print(U[2][0])
    # # print(LS(U, [U[0]], 1)[0])
    # # print(cost_km([U[1]], U))


    if LSAlgo in RunAlgos:
Exemplo n.º 9
0
        Z = [U[x[0]] for x in dists[-z:]]
        X = [x[0] for x in dists[:-z]]  # storing index of point in U
        cNum = [0 for _ in range(k)]
        C = np.zeros((k, len(U[0])))

        for i in X:
            cNum[cIds[i]] += 1  # update no of points in cluster
            C[cIds[i]] = C[cIds[i]] + U[i]

        for j in range(k):
            if cNum[j] != 0:
                C[j] = C[j] / cNum[j]
            else:
                print('empty')

    return C, Z


if __name__ == "__main__":

    random.seed(0)
    np.random.seed(0)
    U, y, C_, Z_, ids_ = make_data(5, 10, 10, 100, num_points=10)
    C, Z = kmeans_minus(U, 3, 5)
    plotGraph(U, C, Z, "./Plots/KMeans_")
'''
4 -> make_data(5, 0, 10, 50)
3 -> make_data(5, 10, 10, 50)
2 -> make_data(5, 0, 8, 50)
1 -> make_data(5, 0, 10, 100)
'''