Пример #1
0
def load_training_and_testing_data():
    print "loading training & testing data..."
    training, testing = load_dataset()

    # process testing and training images -> numpy arrays.
    train_images = process_images(training[0])
    test_images = process_images(testing[0])

    # convert training and testing to one hot vectors.
    train_labels = one_hot(training[1], num_classes=6)
    test_labels = one_hot(testing[1], num_classes=6)

    # shuffle training data in sync for better training.
    rng = np.random.get_state()
    np.random.shuffle(train_images)
    np.random.set_state(rng)
    np.random.shuffle(train_labels)

    # partition dataset 80/20. (80 -> training, 20 -> testing)
    r = np.random.rand(train_images.shape[0])
    part = r < np.percentile(r, 80)
    train_images = train_images[part]
    train_labels = train_labels[part]
    test_images = test_images[-part]
    test_labels = test_labels[-part]

    # optionally show images and labels.
    # show_image(train_images, train_labels)
    # show_image(test_images, test_labels)
    return train_images, train_labels, test_images, test_labels
def generate_text(seed, len_test_txt=500):
    global model_predicting
    # copy weights from trained model to predicting model
    trained_weights = model_training.get_weights()
    model_predicting.set_weights(trained_weights)

    seed = seed.lower()
    gen_str = seed

    # turn seed from letters to numbers so we can then one-hot encode it
    seed = [chars.index(c) for c in seed]
    for i in range(len_test_txt):
        # one hot encode the seed
        seed_oh = one_hot(seed, num_classes=len(chars))

        # reshape the seed into the shape the input layer of lstm needs
        seed_oh = np.reshape(seed_oh, newshape=(1, -1, len(chars)))

        #predicting
        char_probabilities = model_predicting.predict(seed_oh, verbose=0)[0][0]
        np.nan_to_num(char_probabilities)
        pred_index = np.random.choice(range(len(chars)), p=char_probabilities)
        gen_str += chars[pred_index]
        #update seed to be the predicted character
        seed = pred_index

    model_predicting.reset_states()

    return gen_str
    f = hp.File(data_folder + "torsion_pairwise_casp_data.hdf5", "r")
    data_set = f['dataset']

    # Training Loop
    history = []
    best_val_loss = None
    for epoch in range(epochs):
        print("Epoch", epoch, ':')

        # Fit training data
        print('Fitting:')
        train_status = []
        for i in tqdm(range(len(x_train))):
            x = np.array(data_set[x_train[i] + data_type])
            x = np.expand_dims(x, axis=0)
            y = one_hot(y_train[i], num_classes=2)
            y = np.expand_dims(y, axis=0)
            output = model.train_on_batch(x, y)
            train_status.append(output)

        # Calculate training loss and accuracy
        train_status = np.array(train_status)
        train_loss = np.average(train_status[:, 0])
        train_acc = np.average(train_status[:, 1])
        print('Train Loss ->', train_loss)
        print('Train Accuracy ->', train_acc, '\n')

        # Test on validation data
        print('Evaluating:')
        val_status = []
        for i in tqdm(range(len(x_val))):
Пример #4
0
    model.summary()

    # Training Loop
    history = []
    best_val_loss = 0.0
    for epoch in range(epochs):
        print("Epoch", epoch, ':')

        # Fit training data
        print('Fitting:')
        train_status = []
        for i in tqdm(range(len(train))):
            x = np.array(train_set[classes[int(train[i, 1])] + '/' +
                                   train[i, 0]])
            x = np.expand_dims(x, axis=0)
            y = one_hot(train[i, 1], num_classes=len(classes))
            y = np.expand_dims(y, axis=0)
            output = model.train_on_batch(x, y)
            train_status.append(output)

        # Calculate training loss and accuracy
        train_status = np.array(train_status)
        train_loss = np.average(train_status[:, 0])
        train_acc = np.average(train_status[:, 1])
        print('Train Loss ->', train_loss)
        print('Train Accuracy ->', train_acc, '\n')

        # Test on validation data
        print('Evaluating:')
        val_status = []
        for i in tqdm(range(len(val))):
        seed = pred_index

    model_predicting.reset_states()

    return gen_str


# fitting the model, depending on `num_iterations`, this can take a while
display_step = 50
start_time = time.time()

for i in range(num_iterations):
    # Get a random batch of training examples.
    x_batch, y_true_batch = get_next_batch(batch_size, time_steps, data)
    # we need to one-hot encode inputs and outputs
    x = one_hot(x_batch, num_classes=len(chars))
    y = one_hot(y_true_batch, num_classes=len(chars))

    # ---------------------- TRAIN -------------------------
    # optimize model
    history = model_training.fit(x, y, verbose=0, batch_size=x.shape[0])
    model_training.reset_states()

    # Print status every display_step iterations.
    if (i % display_step == 0) or (i == num_iterations - 1):
        #Message for network evaluation
        msg = "Optimization Iteration: {}, Training Loss: {}"
        print(msg.format(i, history.history['loss']))
        print("Text generated: " + generate_text("We", 60))

        # Ending time.
Пример #6
0
        if random.random() > epsilon:
            actionR = np.random.choice(env.act_dim,
                                       p=agentR.policy_action(stateR))
        else:
            actionR = random.randint(0, env.act_dim - 1)

        # log information
        print('Probability for each action:')
        print('L:', agentL.policy_action(stateL))
        print('R:', agentR.policy_action(stateR))
        print('Critic value:')
        print('L: ', end='')
        print(
            agentL.critic.target_predict([
                np.expand_dims(stateL, axis=0),
                np.expand_dims(one_hot(actionL, env.act_dim), axis=0),
                np.expand_dims(one_hot(actionR, env.act_dim), axis=0)
            ]))
        print('R: ', end='')
        print(
            agentR.critic.target_predict([
                np.expand_dims(stateR, axis=0),
                np.expand_dims(one_hot(actionR, env.act_dim), axis=0),
                np.expand_dims(one_hot(actionL, env.act_dim), axis=0)
            ]))
        print('actionL:', actionL, 'actionR:', actionR)
        print()

        # perform actions on the environment
        done, reward_l, reward_r, state_, actions = env.step(actionL, actionR)
Пример #7
0
        # agentR decides its action
        if random.random() > epsilon:
            actionR = np.random.choice(env.act_dim, p=agentR.policy_action(stateR))
        else:
            actionR = random.randint(0, env.act_dim-1)

        # log information
        print('Probability for each action:')
        print('L:', agentL.policy_action(stateL))
        print('R:', agentR.policy_action(stateR))
        print('Critic value:')
        print('L: ', end='')
        print(agentL.critic.target_predict([
            np.expand_dims(stateL, axis=0),
            np.expand_dims(one_hot(actionL, env.act_dim), axis=0),
            np.expand_dims(one_hot(actionR, env.act_dim), axis=0)
        ]))
        print('R: ', end='')
        print(agentR.critic.target_predict([
            np.expand_dims(stateR, axis=0),
            np.expand_dims(one_hot(actionR, env.act_dim), axis=0),
            np.expand_dims(one_hot(actionL, env.act_dim), axis=0)
        ]))
        print('actionL:', actionL, 'actionR:', actionR)
        print()

        # perform actions on the environment
        done, reward_l, reward_r, state_, actions = env.step(actionL, actionR)

        # adjust the state for each agent
Пример #8
0
        best_val_loss = None
        for epoch in range(epochs):
            print("Epoch", epoch, ':', "Score Threshold:", rank)

            # Fit training data
            print('Fitting:')
            train_status = []
            batch_x = []
            batch_y = []
            for j in tqdm(range(len(x_train))):
                try:
                    x = np.array(data_set[x_train[j]])
                except:
                    continue
                batch_x.append(x)
                y = one_hot(y_train[j], num_classes=2)
                #y = y.reshape((2))
                batch_y.append(y)
                if len(batch_x) == batch_size or j + 1 == len(x_train):
                    batch_x = np.array(batch_x)
                    batch_y = np.array(batch_y)
                    output = model.train_on_batch(batch_x, batch_y)
                    batch_x = []
                    batch_y = []
                    train_status.append(output)

            # Calculate training loss and accuracy
            train_status = np.array(train_status)
            train_loss = np.average(train_status[:, 0])
            train_acc = np.average(train_status[:, 1])
            print('Train Loss ->', train_loss)
Пример #9
0
        # agentL decides its action
        if isDeterministic:
            actionL = np.argmax(agentL.policy_action(stateL))
        else:
            actionL = np.random.choice(env.act_dim, p=agentL.policy_action(stateL))

        # agentR decides its action
        actionR = int(input('Choose an action (0~8): '))

        # log information
        print('Probability for each action:')
        print(agentL.policy_action(stateL))
        print('Critic value:')
        print(agentL.critic.target_predict([
            np.expand_dims(stateL, axis=0),
            np.expand_dims(one_hot(actionL, env.act_dim), axis=0),
            np.expand_dims(one_hot(actionR, env.act_dim), axis=0)
        ]))
        print('actionL:', actionL, 'actionR:', actionR)
        print()

        # perform actions on the environment
        done, reward_l, reward_r, state_, actions = env.step(actionL, actionR)

        state = state_
        stateL, stateR = state_each(normalize(env, state))

        rewardL += reward_l
        rewardR += reward_r

        if done:
    model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
    model.summary()

    # Training Loop
    history = []
    best_val_loss = None
    for epoch in range(epochs):
        print("Epoch", epoch, ':')

        # Fit training data
        print('Fitting:')
        train_status = []
        for i in tqdm(range(len(x_train))):
            x = np.array(data_set[classes[int(y_train[i])] + '/' + x_train[i]])
            x = np.expand_dims(x, axis=0)
            y = one_hot(y_train[i], num_classes=len(classes))
            y = np.expand_dims(y, axis=0)
            output = model.train_on_batch(x, y)
            train_status.append(output)

        # Calculate training loss and accuracy
        train_status = np.array(train_status)
        train_loss = np.average(train_status[:, 0])
        train_acc = np.average(train_status[:, 1])
        print('Train Loss ->', train_loss)
        print('Train Accuracy ->', train_acc, '\n')

        # Test on validation data
        print('Evaluating:')
        val_status = []
        for i in tqdm(range(len(x_val))):