Exemple #1
0
    def __init__(self, position, direction, tick_count, configuration, parent=None):
        self.id = random.getrandbits(32)

        self.position = position
        self.direction = direction
        self.health = configuration["Agent_Health"]
        self.birth = tick_count
        self.age = 0
        self.configuration = configuration
        if parent is None:
            self.neural_network = NeuralNetwork(configuration["Neural_Network_Nodes_In_Layers"])
            self.neural_network.mutate(1)
        else:
            self.neural_network = deepcopy(parent.neural_network)
            self.generation = parent.generation + 1
            self.mutate()
Exemple #2
0
    def __init__(self, buy_in, n_players, ID=0):
        self.n_opponents = n_players - 1
        self.states = [buy_in, 0, None]   # [current funds, bet amount, action]
        self.earnings = 0
        self.id = ID
        self.evaluator = Evaluator()

        self.n_opponents = n_players - 1
        self.earnings = 0
        self.id = ID
        self.evaluator = Evaluator()

        self.num_opp_actions = 5
        self.num_feature_elements = 52 + (self.num_opp_actions * self.n_opponents) + 3
        self.batch_size = 50000 # could have this as a parameter

        # initialize neural network
        self.network = NeuralNetwork(test_mnist=False, input_layer_size=self.num_feature_elements)

        # array of feature vectors
        self.X_train = np.empty([self.batch_size, self.num_feature_elements])
        # array of target values (i.e. Q(feature vector))
        self.y_train = np.empty([self.batch_size, 1])        

        self.prev_state_action_vector = None
        self.e = 0.3 # value for e-greedy
        self.iteration_num = 0
Exemple #3
0
def main():

    script = str(sys.argv[1])

    epochs = 3000
    # script = 'example'

    if script == "example":

        x = np.array([[0.05, 0.1]])
        y = np.array([[0.01, 0.99]])

        desired_y = np.array([[0.773, 0.778]])  # After one training step (epoch)

        parameters = {
            "num inputs" : 2,
            "num outputs" : 2,
            "num hidden layers" : 1,
            "num neurons" : 2,
            "activations": ['sigmoid'] * 2,
            "learning rate": 0.5,
            "loss function": 'squared error'
        }

        nn = NeuralNetwork(parameters)

        # In this example, don't use randomly-initialized weights - use the ones from the example. So, to make things
        # simple, just over-write the existing, randomly-initialized weights from the NeuralNetwork instantiation.
        weights = np.array([[0.15, 0.20, 0.25, 0.30], [0.40, 0.45, 0.50, 0.55]])
        biases = np.array([[0.35, 0.35], [0.60, 0.60]])

        for l in range(0, len(nn.layers)):
            nn.layers[l].weights = weights[l]
            nn.layers[l].biases = biases[l]

        nn.layers[0].neurons[0].weights = weights[0, 0:2]
        nn.layers[0].neurons[1].weights = weights[0, 2::]
        nn.layers[0].neurons[0].bias = biases[0, 0]
        nn.layers[0].neurons[1].bias = biases[0, 1]

        nn.layers[1].neurons[0].weights = weights[1, 0:2]
        nn.layers[1].neurons[1].weights = weights[1, 2::]
        nn.layers[1].neurons[0].bias = biases[1, 0]
        nn.layers[1].neurons[1].bias = biases[1, 1]

        nn.train(x, y, epochs=1)

        new_output = nn.calculate(x)

        # Print to screen
        print('\n')
        print('Class example: single epoch')
        print('--------------------------------------------------------------------------------')
        print('Initial output is: {}. After one training step, the next output should be {}, (with sigmoid activation).'.format(y, desired_y))
        print('New output is... {}'.format(new_output))

    elif script == "and":

        x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
        y = np.array([0, 0, 0, 1])

        parameters = {
            "num inputs": 2,
            "num outputs": 1,
            "num hidden layers": 0,
            "num neurons": 0,
            "activations": ['sigmoid'],
            "learning rate": 0.1,
            "loss function": 'squared error'
        }

        nn = NeuralNetwork(parameters)

        nn.train(x, y, epochs=epochs)

        # Plotting
        plt.plot(nn.loss_epoch)
        plt.grid()
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.title('AND Neural Network, {} activation and \n {} loss'.format(parameters['activations'][0], parameters['loss function']))
        plt.show()

        # Printing to screen
        print('\n')
        print('AND Gate Perceptron:')
        print('--------------------------------------------------------------------------------')
        print('For y = 1, output should be >= 0.5. For y = 0, output should be < 0.5.')
        for input, output in zip(x, y):
            print("MULTI-LAYER PERCEPTRON: Output for input x = {} should be y = {}. Computed value: {}".format(input,
                                                                                                                output,
                                                                                                                nn.calculate(
                                                                                                                    input)))

    elif script == "xor":

        x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
        y = np.array([0, 1, 1, 0])

        parameters_perceptron = {

            "num inputs": 2,
            "num outputs": 1,
            "num hidden layers": 0,
            "num neurons": 0,
            "activations": ['sigmoid'],
            "learning rate": 0.1,
            "loss function": 'squared error'
        }

        parameters_multilayer_perceptron = {

            "num inputs": 2,
            "num outputs": 1,
            "num hidden layers": 1,
            "num neurons": 4,
            "activations": ['sigmoid'] * 2,
            "learning rate": 0.1,
            "loss function": 'squared error'

        }

        nn_single = NeuralNetwork(parameters_perceptron)
        nn_single.train(x, y, epochs=epochs)

        nn_multi = NeuralNetwork(parameters_multilayer_perceptron)
        nn_multi.train(x, y, epochs=epochs)

        # Plotting
        plt.plot(nn_single.loss_epoch, label='Single Perceptron')
        plt.ylabel('Loss')
        plt.plot(nn_multi.loss_epoch, label='Multi-layer Perceptron')
        plt.grid()
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend(loc='upper right')
        plt.title('XOR Neural Network {} activation and \n {} loss'.format(
            parameters_multilayer_perceptron['activations'][0], parameters_multilayer_perceptron['loss function']))
        plt.show()

        # Printing results to screen
        print('\n')
        print('XOR Gate: Single-layer Perceptron:')
        print('--------------------------------------------------------------------------------')
        print('For y = 1, output should be >= 0.5. For y = 0, output should be < 0.5.')
        for input, output in zip(x, y):
            print("SINGLE PERCEPTRON: Output for input x = {} should be y = {}. Computed value: {}".format(input, output, nn_single.calculate(input)))

        print('\n')
        print('XOR Gate: Multi-layer Perceptron:')
        print('--------------------------------------------------------------------------------')
        print('For y = 1, output should be >= 0.5. For y = 0, output should be < 0.5.')
        for input, output in zip(x, y):
            print("MULTI-LAYER PERCEPTRON: Output for input x = {} should be y = {}. Computed value: {}".format(input, output,
                                                                                                           nn_multi.calculate(
                                                                                                               input)))
Exemple #4
0
class NNAgent(Agent):

    def __init__(self, buy_in, n_players, ID=0):
        self.n_opponents = n_players - 1
        self.states = [buy_in, 0, None]   # [current funds, bet amount, action]
        self.earnings = 0
        self.id = ID
        self.evaluator = Evaluator()

        self.n_opponents = n_players - 1
        self.earnings = 0
        self.id = ID
        self.evaluator = Evaluator()

        self.num_opp_actions = 5
        self.num_feature_elements = 52 + (self.num_opp_actions * self.n_opponents) + 3
        self.batch_size = 50000 # could have this as a parameter

        # initialize neural network
        self.network = NeuralNetwork(test_mnist=False, input_layer_size=self.num_feature_elements)

        # array of feature vectors
        self.X_train = np.empty([self.batch_size, self.num_feature_elements])
        # array of target values (i.e. Q(feature vector))
        self.y_train = np.empty([self.batch_size, 1])        

        self.prev_state_action_vector = None
        self.e = 0.3 # value for e-greedy
        self.iteration_num = 0
    
    # get output of neural network
    def Q(self, vector):
        return self.network.predict(vector)

    def trainQ(self):
        self.network.updateData(train_data=self.X_train, train_labels=self.y_train, input_layer_size=self.num_feature_elements)
        self.network.train(num_epochs=10)

    def getAction(self, game, call, raise_amt):
        cur_funds = self.states[0]
        cur_bet = self.states[1]
        diff = call - cur_bet
        raise_bet = diff + raise_amt

        action = None

        # can't call 
        if diff > cur_funds:
            action = 'F'

        # can't raise
        if raise_bet > cur_funds:
            action_set = ['F', 'C']

        # can do anything
        else:   
            action_set = ['F', 'C', 'R']        

        # 52 elements to represent the hand, number opponents elements * 3 elements to represent last actions, 3 elements to represent agents action
        state_action_vector = np.zeros(52 + self.num_opp_actions * self.n_opponents + 3)

        # indicating the hand
        state_action_vector[getCardNum(self.hole_cards[0])] = 1
        state_action_vector[getCardNum(self.hole_cards[1])] = 1
        
        other_player_actions = tuple(game.last_player_actions)
        # indicating the opp. actions
        for i in xrange(self.n_opponents):
            state_action_vector[52 + self.num_opp_actions * i + action_numbers[other_player_actions[i]]] = 1
    
        num_state_features = 52 + (self.num_opp_actions * self.n_opponents)
        
        if not action:
            # net has not been trained yet
            if self.iteration_num < self.batch_size:
                max_action_value = 0
                action = random.choice(action_set)

            else:
                r = random.uniform(0, 1)
                
                max_action_value = -np.inf
                max_legal_action_value = -np.inf # might not be able to do every action

                # choose best possible action
                for (i, a) in enumerate(actions):
                    state_action_vector[num_state_features + i] = 1
                    action_value = self.Q(state_action_vector)

                    if action_value > max_action_value: 
                        max_action_value = action_value
                    
                    if a in action_set:
                        if action_value > max_legal_action_value:
                            max_legal_action_value = action_value
                            action = a

                    state_action_vector[num_state_features + i] = 0

                # e-greedy exploration
                if r < self.e:
                    action = random.choice(action_set)

        else:
            max_action_value = 0
                
        # print action
        state_action_vector[num_state_features + action_numbers[action]] = 1

        if self.prev_state_action_vector is not None:
            self.X_train[self.iteration_num % self.batch_size] = self.prev_state_action_vector
            self.y_train[self.iteration_num % self.batch_size] = np.array(max_action_value)

            self.iteration_num += 1
            if self.iteration_num % self.batch_size == 0:
                self.trainQ()

        self.prev_state_action_vector = state_action_vector

        return action

    # add reward to training set
    def QReward(self, reward):
        if self.prev_state_action_vector is not None:
            self.X_train[self.iteration_num % self.batch_size] = self.prev_state_action_vector
            self.y_train[self.iteration_num % self.batch_size] = np.array(reward)

            self.iteration_num += 1
            if self.iteration_num % self.batch_size == 0:
                self.trainQ()

            self.prev_state_action_vector = None
Exemple #5
0
    ):  # none of the cards with same suit and lower value as the current card should have all of their identical cards discarded
        graveyard[i] = 0
    graveyard[
        int(card) -
        1] = 1  # This one means that all the cards identical to the current card have been discarded but not the current card, which is necessary but not in keeping with the other slots
    graveyard += card.toBinary()
    return graveyard


if __name__ == '__main__':

    for i in range(10):
        seed = random.randint(-65536, 65535)  # -13920 good seed
        random.seed(seed)
        print("seed : ", seed)
        nn = NeuralNetwork(neuronsPerLayer=[31, 31, 1])
        kb = []
        testKB = []
        for i in range(10000):
            kb.append((generateDispensableCard(), [1]))
            kb.append((generateIndispensableCard(), [0]))
        for i in range(1000):
            testKB.append((generateDispensableCard(), [1]))
            testKB.append((generateIndispensableCard(), [0]))

        untrainedErrorOnKB = nn.test(knowledgeBase=kb)
        print("untrainedErrorOnKB : ", untrainedErrorOnKB)

        untrainedErrorOnTest = nn.test(knowledgeBase=testKB)
        print("untrainedErrorOnTest : ", untrainedErrorOnTest)
Exemple #6
0
class Agent:
    id = None
    position = []
    direction = None  # 0 to 1 clockwise, 0 is facing up
    position_change_x = 0  # TODO: store as array (causes bug)
    position_change_y = 0
    direction_change = None
    health = None
    birth = None
    age = None
    configuration = {}
    neural_network = None
    sensors = []
    output = []
    marked = False
    generation = 0
    last_attacked_by = None

    def __init__(self, position, direction, tick_count, configuration, parent=None):
        self.id = random.getrandbits(32)

        self.position = position
        self.direction = direction
        self.health = configuration["Agent_Health"]
        self.birth = tick_count
        self.age = 0
        self.configuration = configuration
        if parent is None:
            self.neural_network = NeuralNetwork(configuration["Neural_Network_Nodes_In_Layers"])
            self.neural_network.mutate(1)
        else:
            self.neural_network = deepcopy(parent.neural_network)
            self.generation = parent.generation + 1
            self.mutate()

    def eat(self):
        self.health += self.configuration["Food_Value"]

    def perform_attack(self, agents):
        self.health -= self.configuration["Agent_Attack_Cost"]

        for agent in agents:
            if agent != self:
                distance = self.get_distance(agent.position, self.configuration["Agent_Attack_Range"])
                if distance < self.configuration["Agent_Attack_Range"]:
                    agent.get_attacked(self)

    def get_attacked(self, other_agent):
        self.health -= self.configuration["Agent_Attack_Damage"]

        self.last_attacked_by = other_agent

    def get_distance(self, position, max_range):
        distance_x = self.position[0] - position[0]
        distance_y = self.position[1] - position[1]

        if abs(distance_x) <= max_range and abs(distance_y) <= max_range:
            distance = math.sqrt(math.pow(distance_x, 2) + math.pow(distance_y, 2))
            return distance
        else:
            return 999999

    def get_information_string(self, tick_count):
        string = "Sensors: [" + ", ".join(str(e) for e in self.sensors) + "]\n"
        string += "Output: [" + ", ".join(str(e) for e in self.output) + "]\n"
        string += "Position: [" + str(round(self.position[0], 2)) + ", " + str(round(self.position[1], 2)) + "]\n"
        string += "Health: " + str(round(self.health, 2)) + "\n"
        string += "Age: " + str(tick_count - self.birth) + "\n"
        string += "Generation: " + str(self.generation) + "\n"

        return string

    def react(self, sensors):
        self.output = self.neural_network.feed(sensors)

    def mutate(self):
        self.neural_network.mutate(self.configuration["Neural_Network_Mutate"])
Exemple #7
0
        lg.Logger(parser.h_params.expt_dir + '/log_loss_train.txt',
                  loss_formatter),
        'valid_loss_logger':
        lg.Logger(parser.h_params.expt_dir + '/log_loss_valid.txt',
                  loss_formatter),
        'test_loss_logger':
        lg.Logger(parser.h_params.expt_dir + '/log_loss_test.txt',
                  loss_formatter),
        'train_error_logger':
        lg.Logger(parser.h_params.expt_dir + '/log_error_train.txt',
                  error_formatter),
        'valid_error_logger':
        lg.Logger(parser.h_params.expt_dir + '/log_error_valid.txt',
                  error_formatter),
        'test_error_logger':
        lg.Logger(parser.h_params.expt_dir + '/log_error_test.txt',
                  error_formatter)
    }
else:
    loggers = {}

neural_net = NeuralNetwork(parser.h_params.sizes, loss, activation_function,
                           activation_prime, act_f.softmax, loggers,
                           parser.h_params.expt_dir, parser.h_params.save_dir,
                           parser.h_params.anneal)

neural_net.stochastic_gradient_descent(
    training, validation, testing, parser.h_params.batch_size,
    parser.h_params.epochs, parser.h_params.lr, parser.h_params.momentum,
    parser.h_params.lmbda, nesterov, adam, build_logs)
Exemple #8
0
    """
    random.seed(seed)
    fireWork = Card()
    card = Card()
    while not (fireWork.getSuit() == card.getSuit()
               and fireWork.getValue() == card.getValue() - 1):
        fireWork.setSuit(Suit(random.randint(1, 5)))
        fireWork.setValue(random.randint(1, 5))
        card.setSuit(Suit(random.randint(1, 5)))
        card.setValue(random.randint(1, 5))
    return (Suit.toInt(fireWork.getSuit()), fireWork.getValue(),
            Suit.toInt(card.getSuit()), card.getValue())


if __name__ == '__main__':
    nn = NeuralNetwork(neuronsPerLayer=[4, 4, 1])
    trainKB = []
    testKB = []
    for i in range(2000):
        trainKB.append((generateGoodCombo(), [1]))
        trainKB.append((generateBadCombo(), [0]))
        testKB.append((generateGoodCombo(), [1]))
        testKB.append((generateBadCombo(), [0]))

    untrainedErrorOnKB = nn.test(knowledgeBase=trainKB)
    untrainedErrorOnTest = nn.test(knowledgeBase=testKB)
    trainedErrorOnKB1 = nn.train(knowledgeBase=trainKB)
    for _ in range(10):
        nn.train(knowledgeBase=trainKB, doTests=False)
    print()
    trainedErrorOnKB10 = nn.test(knowledgeBase=trainKB)
Exemple #9
0
    graveyard[(Suit.toInt(card.getSuit()) - 1) * 5] = random.randint(0, 2)
    for i in range((Suit.toInt(card.getSuit()) - 1) * 5 + 1, int(card) - 1):
        graveyard[i] = random.randint(0, 1)
    graveyard[int(card) - 1] = 0 if card.getValue() == 5 else 2 if card.getValue() == 1 else 1
    #                            5 can't be discarded            must be the last 1         must be the last of its kind
    graveyard.append(Suit.toInt(card.getSuit()))
    graveyard.append(card.getValue())
    return graveyard


if __name__ == '__main__':
    for i in range(10):
        seed = random.randint(-65536, 65535)
        random.seed(seed)
        nn = NeuralNetwork(neuronsPerLayer=[27, 50, 5, 1])
        kb = []
        testKB = []
        for i in range(10000):
            kb.append((generateDispensableCard(), [1]))
            kb.append((generateIndispensableCard(), [0]))

        for i in range(1000):
            testKB.append((generateDispensableCard(), [1]))
            testKB.append((generateIndispensableCard(), [0]))

        untrainedErrorOnKB = nn.test(knowledgeBase=kb)
        untrainedErrorOnTest = nn.test(knowledgeBase=testKB)
        trainedErrorOnKB1 = nn.train(knowledgeBase=kb)
        for _ in range(1000):
            nn.train(knowledgeBase=kb, doTests=False)
Exemple #10
0
    """
    Play a game to save the best computed moves and feed the neural network .
    Arg:
        -model : our neural network.
    """
    log = neuralNetAutoMain(neuralNet=net)
    score = log[-1].getScore()
    net.train(log2kb(log, score), doTests=False)
    print(score)
    return score


if __name__ == '__main__':

    for j in range(10):

        nn = NeuralNetwork(neuronsPerLayer=[
            93, 40, 1
        ])  # 93 inputs with a hidden layer of 40 nodes and 1 output

        scores = [0 for _ in range(5000)]
        i = 0
        while mean(scores) < 20:
            scores[i % 5000] = trainOnGame(net=nn)
            if i % 5000 == 0:
                with open("scores" + str(j) + ".csv", mode='a') as file:
                    file.write(str(mean(scores)) + ";")
            i += 1
            print(i, end="\t")
        file.close()
Exemple #11
0
from neuralnet import NeuralNetwork
from logistic import LogisticRegression

nn = NeuralNetwork()

nn.test_nn_gradient()

Exemple #12
0
    test_y['pred_dead'] = [np.rint(x) for x in test_y['pred_dead']]
    cost = ((test_y['pred_survived'] - test_y['Survived'])**2).mean()
    test_y = test_y[['pred_survived']]
    test_y.columns = ['Survived']
    return test_y, cost


if __name__ == '__main__':
    x, y = prep_train_data()
    test_x, test_y = prep_test_data()
    h_layers = [10, 10]
    inputs = x.values
    outputs = y.values
    nn = NeuralNetwork(n_inputs=4,
                       n_outputs=2,
                       h_layers=h_layers,
                       inputs=inputs,
                       expected_outputs=outputs)
    nn.open_session()
    nn.make_model(is_weights_rand=True)
    nn.make_tensor_board()
    s = time.time()
    nn.train(epochs=999, learning_rate=0.01, isliveplot=False)
    raw_outputs, _ = nn.test(test_inputs=test_x.values,
                             test_outputs=test_y.values)
    test_y, cost = categorize_output(raw_outputs, test_y)
    test_y.to_csv('./data/my_submission.csv')
    print('cost: ', cost)
    e = time.time() - s
    print("Training took ", e, "seconds")
    nn.close_session()
Exemple #13
0
        [7.68, 2.43],
        [6.45, 1.23],
        [9.56, 0.78],
        [5.34, 2.67],
        [8.21, 4.21],
        [7.54, 1.88],
        [6.77, 3.56],
        [9.0, 2.56],
        [8.0, 2.0],
        [2.31, 11.0],
        [7.0, 4.5]
    ], dtype=float)

    answers = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], dtype=float)

    nn = NeuralNetwork(2, 3, 1, 0.1)

    # training neural network
    for i in range(0, 100000):
        random_index = np.random.randint(0, np.size(answers))
        nn.train(inputs[random_index], answers[random_index])

    print("Training Complete\n")

    # Get insect dimension from end user
    input_width = float(input("Enter insect width:\t\t"))
    input_length = float(input("Enter insect length:\t"))

    # Get a trained guess
    output = nn.feed_forward([input_width, input_length])
def main():

    #这里tr_data:each row is a training sequence数据表示为
    #item编号
    #training_data:[batch_size,seqlen+1,onehot_size]
    tr_data = getTrainData()

    seqlen = 9
    onehot_size = 3953
    hidden_dims = [300, 200, 100, 50]

    #参数有:n_step,hidden_size,item_code_size,u_code_size,latent_vec_size
    model = NeuralNetwork(seqlen, onehot_size, hidden_dims)

    #训练轮数
    n_epoch = 6
    learning_rate = 0.1
    #train_input = tr_data[:,:-1]
    #train_target = tr_data[:,-1]
    #batch_size = train_input.shape[0]
    batch_size = 10000
    total_size = tr_data.shape[0]

    n_batch = total_size / batch_size

    optimizer = tf.train.GradientDescentOptimizer(learning_rate)

    begintrain = time.time()
    print "start train"
    #在一个session内完成训练与预测
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        opt = optimizer.minimize(model.loss)
        for epoch in xrange(n_epoch):
            """
            进行n_epoch轮训练,每轮训练是逐batch进行的
            """
            n = 0
            for batch in xrange(n_batch):
                onehot_input = np.zeros([batch_size, seqlen, onehot_size])
                onehot_target = np.zeros([batch_size, onehot_size])

                n = 100 * batch
                #生成onehot编码的输入与输出
                for i in range(batch_size):
                    onehot_target[i][tr_data[n + i][-1]] = 1
                    for j in range(seqlen):
                        onehot_input[i][j][tr_data[n + i][j]] = 1

                _, loss = sess.run([opt, model.loss],
                                   feed_dict={
                                       model.X_input: onehot_input,
                                       model.Y_tar: onehot_target
                                   })

            #训练最后一个batch为100的数据,数据设置的时候倒着来
            onehot_input = np.zeros([batch_size, seqlen, onehot_size])
            onehot_target = np.zeros([batch_size, onehot_size])
            for i in xrange(batch_size):
                onehot_target[-i - 1][tr_data[-i - 1][-1]] = 1
                for j in xrange(seqlen):
                    onehot_input[-i - 1][j][tr_data[-i - 1][j]] = 1
            _, loss = sess.run([opt, model.loss],
                               feed_dict={
                                   model.X_input: onehot_input,
                                   model.Y_tar: onehot_target
                               })
            aveloss = loss.mean()
            print "epoch %d cost is %f" % (epoch, aveloss)

        endtrain = time.time()
        print "train run %d miniutes" % ((endtrain - begintrain) / 60)
        print "start run pred"
        """
        开始预测
        """

        te_input, te_target = getTestData()
        n_te_lines = te_input.shape[0]
        #将te_input转换为onehot编码的形式
        _te_input = np.zeros([n_te_lines, seqlen, onehot_size])

        for i in xrange(n_te_lines):
            for j in xrange(seqlen):
                _te_input[i][j][te_input[i][j]] = 1
        """
        #预测返回的是一个列表,每一项为一个用户的预测,预测结果为一个大小为3953的向量 
        #向量每一项对应一部电影的概率值
        """
        pred_res = model.pred(sess, _te_input)
        print "start evaluate"

        recall = evaluate(pred_res, te_target, seqlen)
        print "recall is %f" % recall