Esempio n. 1
0
def respond():
    train_request = dict({
        "selected":
        True,
        "model":
        dict({
            "arch": model.to_json(),
            "weights": model.get_weights(),
            "loss": model.loss,
            "optimizer": model.optimizer,
            "metrics_names": ['accuracy']
        }),
        "version":
        version,
        "hparam":
        dict({
            "epochs": 1,
            "batch_size": 32
        })
    })

    reject_message = dict({"selected": False})

    ready_clients_copy = ready_clients.copy()
    while len(ready_clients_copy) > 0:
        request = responder.recv_pyobj()
        client_id = request["client_id"]
        if client_id in selected_clients:
            responder.send_pyobj(train_request)
        else:
            responder.send_pyobj(reject_message)

        ready_clients_copy.remove(client_id)
Esempio n. 2
0
def aggregate():
    if len(selected_clients) <= 0:
        return {"success": False}

    print("Waiting for updates...")
    updates = []

    global version

    selected_clients_copy = selected_clients.copy()
    while len(selected_clients_copy) > 0:
        update = aggregator.recv_pyobj()

        print("Recieved update %d/%d" %
              (len(selected_clients_copy), len(selected_clients)))

        client_id = update["client_id"]
        model_version = update["version"]
        client_metrics = update["metrics"]

        if client_id in selected_clients_copy and model_version == version:
            print("Received update on version %s from client %s" %
                  (model_version, client_id))
            print("Metrics: %s" %
                  json.dumps(client_metrics, indent=4, sort_keys=True))
            updates.append(update)
            selected_clients_copy.remove(client_id)

    total_points = 0
    for update in updates:
        total_points += update["points"]

    print(total_points)

    weighted_avg = np.array(
        [np.zeros(layer.shape) for layer in model.get_weights()])

    for update in updates:
        points = update["points"]
        weights = update["weights"]
        weighted_avg += (points / total_points) * np.array(weights)

    model.set_weights(weighted_avg.tolist())

    version += 1
    print("Current version: %s" % version)
    print("Evaluating...")
    history = model.evaluate(x=x_test, y=y_test, batch_size=32)
    return {"success": True, "loss": history[0], "accuracy": history[1]}
Esempio n. 3
0
    #initialize objects for further use
    data = data.Data()
    model = model.Model()

    #get modelnet dataset splitted in training and testing set
    if params.DATASET_LOAD_DYNAMIC:
        dataset = data.get_dynamic_dataset(params.DATASET_PATH,
                                           one_hot=True,
                                           create_labels=True)
    else:
        dataset = data.get_dataset(params.DATASET_PATH,
                                   one_hot=True,
                                   create_labels=True)

    #get weights
    weights, biases = model.get_weights()

    if arg_train == "single":
        #create placeholders for input and output data
        x = tf.placeholder(tf.float32,
                           (None, params.IMAGE_SIZE, params.IMAGE_SIZE,
                            params.IMAGE_CHANNELS),
                           name="x")
        y = tf.placeholder(tf.float32, (None, params.N_CLASSES), name="y")

        #rough training of model: only one image
        model.train(x, y, deepcopy(dataset), weights, biases, arg_ckpt)

    elif arg_train == "multi":
        #group module training with multiview input
        x_mv = tf.placeholder(tf.float32,
Esempio n. 4
0
def model_currency(k=10):
    """
    Creates a neural network to model a currency.
    :param k: The degree of cross-validation to be performed.
    :return:
    """

    coin_dict, data = cr.read_csv()

    data = cr.split_data_coins(coin_dict, data)
    coin = select_currency(data)
    data = data[coin]

    model_weights = []
    model_errors = []

    split_data = cr.split_data(data, k)
    split_data = [[[float(e[2]), float(e[5]), float(e[3]), float(e[4])] for e in s] for s in split_data]

    print("Modeling neural networks with k-fold cross-validation")

    for i in range(k):
        model = m.create_model(4, [8, 8, 2])

        raw_data = split_data[:i] + split_data[i+1:]
        training_data = np.array([s[:-1] for s in raw_data])
        m.train_model(model, training_data, np.array([to_expected(s) for s in raw_data]))
        error = m.test_model(model, np.array([split_data[i][:-1]]), np.array([to_expected(split_data[i])]))
        model_weights.append(np.array(m.get_weights(model)))
        model_errors.append(error[0])

    sum_error = sum(1/e for e in model_errors)

    for idx, error in enumerate(model_errors):

        proportion = (1/error)/sum_error
        model_weights[idx] = proportion * model_weights[idx]

    true_weights = sum(model_weights)
    true_model = m.create_model(4, [8, 8, 2])
    m.set_weights(true_model, true_weights)

    while True:
        print("For how long would you like to invest?")
        steps = input("Choice:   ")
        try:
            steps = int(steps)
            assert steps > 0
        except ValueError or AssertionError:
            print("That was not a valid amount of time.")
        break

    revenue = m.predict_model(true_model, np.array([[split_data[-1][-1]]]), steps)
    error = m.test_model(true_model, np.array([s[:-1] for s in split_data]), np.array([to_expected(s) for s in split_data]))
    multiply = [1, 1]
    for r in revenue:
        multiply[0] *= r[0][0]
        multiply[1] *= r[0][1]
    print("Expected revenue: {}  with error percentage at: {}%".format(multiply, error[0]*100))

    return revenue, error
Esempio n. 5
0
seed(2)

#
# s e t u p
#
raw, params = parameters.initialize()
normalized = data.normalize(raw, params)
parameters.summary(params)
X, Y, Xtest, ytest = data.prepare(normalized, params)

#
# t r a i n i n g
#
model = model.setup(params)
model.summary()
lstm.stateless_fit(model, X, Y, Xtest, ytest, params)
# model.save(model, params, prefix='5y', additional_epocs=0)

#
# r e b u i l d   &   p r e d i c t
#
pred = lstm.build(params, batch_size=1)
pred.set_weights(model.get_weights())
(yhat, rmse, num_errors) = lstm.range_predict(pred, Xtest, ytest, params)

#
# p l o t
#
# plot.history(train_loss)
plot.prediction(ytest, yhat, rmse, num_errors, params)