Example #1
0
def main():

    for i, index in enumerate(range(code_length), 1):
        model_path = 'model/%s/' % index
        nodes_file_name = os.path.join(model_path, 'nodes.pk')
        if not os.path.exists(nodes_file_name):
            create_model(model_path)
Example #2
0
def main():

    for i, index in enumerate(range(code_length), 1):
        model_path = 'model/%s/' % index
        model_file_name = os.path.join(model_path, 'model')
        nodes_file_name = os.path.join(model_path, 'nodes.pk')
        if not os.path.exists(nodes_file_name):
            create_model(model_path)

        recent_accuracy = deque(maxlen=stat_length)
        graph = tf.Graph()
        config = tf.ConfigProto(intra_op_parallelism_threads=cpu_to_use)
        session = tf.Session(graph=graph, config=config)
        with session.graph.as_default():

            # 导入模型定义
            saver = tf.train.import_meta_graph(model_file_name + '.meta')
            saver.restore(session, model_file_name)
            nodes = pickle.load(open(nodes_file_name, "rU"))
            x = session.graph.get_tensor_by_name(nodes['x'])
            y = session.graph.get_tensor_by_name(nodes['y'])
            keep_prob = session.graph.get_tensor_by_name(nodes['keep_prob'])
            loss = session.graph.get_tensor_by_name(nodes['loss'])
            accuracy = session.graph.get_tensor_by_name(nodes['accuracy'])
            optimizer = session.graph.get_operation_by_name(nodes['optimizer'])

            # 训练模型
            for j, step in enumerate(range(max_train_time), 1):
                begin_time = datetime.now()
                imageList, codeList = get_data(100)
                codeList = map(lambda x: x[index], codeList)
                x_data = map(image_to_vector, imageList)
                y_data = map(code_to_vector, codeList)
                _, l, a = session.run([optimizer, loss, accuracy],
                                      feed_dict={
                                          x: x_data,
                                          y: y_data,
                                          keep_prob: .75
                                      })
                if step % 10 == 0:
                    saver.save(session, model_file_name)
                end_time = datetime.now()
                dt = end_time - begin_time
                recent_accuracy.append(a)
                mean_of_accuracy = pd.Series(recent_accuracy).mean()
                format_string = '[%d(%d/%d): %d/%d]: loss: %.2f, accuracy: %.2f, accuracy mean: %.2f(<%.2f?), time: %.2f'
                print format_string % (index, i, code_length, j,
                                       max_train_time, l, a, mean_of_accuracy,
                                       accuracy_level, dt.total_seconds())
                if len(recent_accuracy) == stat_length:
                    if mean_of_accuracy >= accuracy_level:
                        break
print('\n----------------------------')
print("> Total number of words:\t" + str(total_num_words))
print("> Length of vocabulary:\t\t" + str(len_vocab))
print('----------------------------')

word_to_int = dict((c, i) for i, c in enumerate(unique_words))
int_to_word = dict((i, c) for i, c in enumerate(unique_words))

### ---------- Define Model ----------

num_layers = 1
drop_out_rate = 0.2
learning_rate = 0.01
optimizer = RMSprop(lr=learning_rate)

model = create_model(num_layers, drop_out_rate, len_vocab)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)

## ---------- Predict ----------

def get_random_word(n=1, array=unique_words):

    random_words = []
    
    random_indices = random.sample(range(0, len(array)), n)
    
    # in-place shuffle
    random.shuffle(array)

    # take the first n elements of the now randomized array
    return array[:n]