示例#1
0
    def check(self, filepath, lineno):
        """
        2번 이상 memory leak 이 발생한 경우만 저장한다.
        따라서, 테스트코드에서 같은 명령을 2회 이상 실행해야 한다.
        :param filepath:
        :param lineno:
        :return:
        """
        if self.enable:
            lineno = int(lineno)
            garbage_len = self.gabage_len()
            leaks_count = garbage_len - self.last_garbage_len
            self.last_garbage_len = garbage_len

            # print('total_byes:', self.memory.total_memory())
            if leaks_count > 0:  # detect memory leak
                increased_bytes = self.memory.increased_bytes()
                print('increased_bytes:', increased_bytes)
                self.total_leaks_count += leaks_count
                self.total_increased_bytes += increased_bytes
                if self.show_lines:
                    line = 'leaks: %s bytes(%s)\n' % (NumUtil.comma_str(
                        increased_bytes), NumUtil.comma_str(leaks_count))
                    for i in range(lineno - 1, lineno):
                        # print(linecache.getline(filepath, i).strip())
                        line += '\tincreased:%s bytes\t%s:%s\t%s\n' % (
                            NumUtil.comma_str(increased_bytes), filepath, i,
                            linecache.getline(filepath, i).strip())
                    self.total_lines.append(line)
                return increased_bytes
        return 0
示例#2
0
    def dump_urls(mongo_url,
                  db_name,
                  collection_name,
                  urls_file,
                  mongo_query=None,
                  limit=0):
        if mongo_query is None:
            mongo_query = {}

        corpus_mongo = MongodbUtil(mongo_url,
                                   db_name=db_name,
                                   collection_name=collection_name)
        total = corpus_mongo.count()
        log.info('%s total: %s' % (corpus_mongo, NumUtil.comma_str(total)))

        output_dir = os.path.basename(urls_file)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        with open(urls_file, 'wt') as out_f:
            for i, row in enumerate(corpus_mongo.find(mongo_query,
                                                      limit=limit)):
                if i % 1000 == 0:
                    log.info('%s %.1f%% writed.' %
                             (os.path.basename(urls_file), i / total * 100))
                    out_f.write(row['url'])
                    out_f.write('\n')
示例#3
0
 def summary(self):
     print('total_byes:', self.memory.total_memory())
     if self.enable:
         if self.show_lines:
             summary = '[leak summary]\ntotal bytes: %s, total lines: %s, increased: %s bytes\n' % (
                 NumUtil.comma_str(
                     self.memory.total_memory()), len(self.total_lines),
                 NumUtil.comma_str(self.total_increased_bytes))
             return '\n'.join(self.total_lines) + summary
         else:
             summary = '[leak summary]\ntotal bytes: %s, total increased: %s bytes\n' % (
                 NumUtil.comma_str(self.memory.total_memory()),
                 NumUtil.comma_str(self.total_increased_bytes))
             return summary
     else:
         return ''
示例#4
0
    def dump_corpus(mongo_url, db_name, collection_name, sentences_file, mongo_query=None, limit=None):
        """
        Mongodb에서 문서를 읽어서, 문장 단위로 저장한다. (단 문장안의 단어가 1개 이거나, 한글이 전혀 없는 문장은 추출하지 않는다.)
        :param mongo_url: mongodb://~~~
        :param db_name: database name of mongodb
        :param collection_name: collection name of mongodb
        :param sentences_file: *.sentence file
        :param mongo_query: default={}
        :param limit:
        :return:
        """
        if mongo_query is None:
            mongo_query = {}

        corpus_mongo = MongodbUtil(mongo_url, db_name=db_name, collection_name=collection_name)
        total = corpus_mongo.count()
        log.info('%s total: %s' % (corpus_mongo, NumUtil.comma_str(total)))

        output_dir = os.path.basename(sentences_file)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        with gzip.open(sentences_file, 'wt') as out_f:
            for i, row in enumerate(corpus_mongo.find(mongo_query, limit=limit)):
                # print('url:', row['url'])
                for c in row['content']:
                    if i % 1000 == 0:
                        print('%.1f%% writed.' % (i / total * 100))
                    for s in HangulUtil.text2sentences(c['sentences']):
                        if HangulUtil.has_hangul(s):
                            out_f.write(s)
                            out_f.write('\n')
示例#5
0
                                            if min_valid_epoch == epoch:  # save the lastest best model
                                                saver.save(sess, model_file)

                                    if save_model_each_epochs:
                                        saver.save(sess,
                                                   model_file,
                                                   global_step=epoch)

                                log.info('')
                                log.info(
                                    '"%s" train: min_valid_cost: %.8f, min_valid_epoch: %s,  %.2f secs (batch_size: %s,  total_input_data: %s, total_epochs: %s, total_train_time: %s secs)'
                                    %
                                    (model_name,
                                     min_valid_cost, min_valid_epoch,
                                     watch.elapsed(), batch_size,
                                     NumUtil.comma_str(batch_size * nth_batch),
                                     epoch, total_train_time))
                                log.info('')
                            except:
                                log.info(traceback.format_exc())
                            finally:
                                coordinator.request_stop()
                                coordinator.join(
                                    threads)  # Wait for threads to finish.
                        else:  # testing
                            x, y, learning_rate, use_first_pipeline, W1, b1, y_hat, cost, train_step, summary = create_graph(
                                model_name,
                                scope_name,
                                first_pipeline=test_pipeline,
                                second_pipeline=test_pipeline,
                                verbose=False)
                            log.info('[epoch: %s] rsme (train/valid): %.1f / %.1f model saved' % (epoch, train_rsme, valid_rsme))
                        else:
                            log.info('[epoch: %s] rsme (train/valid): %.1f / %.1f' % (epoch, train_rsme, valid_rsme))
                        if valid_rsme < early_stop_cost or valid_rsme > max_cost or math.isnan(valid_rsme):
                            running = False
                            break
                watch.stop('train')

                if model_file_saved and os.path.exists(model_file + '.index'):
                    restored = saver.restore(sess, model_file)

                    log.info('')
                    log.info('--------TEST----------')
                    watch.start('test')
                    test_rsme, _y_hat = sess.run([rsme, y_hat], feed_dict={x: x_test, y: y_test})

                    log.info('%s rsme (test): %.1f (epoch best/total: %s/%s), activation: %s, n_hiddens: %s, learning_rate: %s, weights_initializer: %s' % (
                        func.__name__, test_rsme, NumUtil.comma_str(best_epoch), NumUtil.comma_str(epoch), activation.__name__,
                        n_hiddens, learning_rate, weights_initializer.__name__))

                    # _y_hat = np.round(_y_hat)
                    for i in range(min(5, _y_hat.shape[0])):
                        log.info('%s\t->\t%.1f\t(label: %d)' % (x_test[i], _y_hat[i], y_test[i]))
                    watch.stop('test')
                    log.info('--------TEST----------')
            log.info(watch.summary())
        except:
            traceback.print_exc()

    log.info('OK.')
示例#7
0
    def learning(cls,
                 sentences_file,
                 batch_size,
                 left_gram,
                 right_gram,
                 model_file,
                 features_vector,
                 labels_vector,
                 n_hidden1=100,
                 max_sentences=0,
                 learning_rate=0.01,
                 layers=2):
        ngram = left_gram + right_gram
        n_features = len(
            features_vector) * ngram  # number of features = 17,380 * 4
        n_classes = len(labels_vector) if len(
            labels_vector) >= 3 else 1  # number of classes = 2 but len=1

        log.info('load characters list...')
        log.info('load characters list OK. len: %s\n' %
                 NumUtil.comma_str(len(features_vector)))
        watch = WatchUtil()

        train_file = os.path.join(
            KO_WIKIPEDIA_ORG_DATA_DIR, 'datasets',
            'ko.wikipedia.org.dataset.sentences=%d.left=%d.right=%d.train.gz' %
            (max_sentences, left_gram, right_gram))
        validation_file = train_file.replace('.train.', '.validation.')
        test_file = train_file.replace('.train.', '.test.')
        if not os.path.exists(train_file) or not os.path.exists(
                validation_file) or not os.path.exists(test_file):
            watch.start('create dataset')
            log.info('create dataset...')
            features, labels = [], []
            check_interval = min(10000, math.ceil(max_sentences))
            log.info('total: %s' % NumUtil.comma_str(max_sentences))

            with gzip.open(sentences_file, 'rt') as f:
                for i, line in enumerate(f, 1):
                    if max_sentences < i:
                        break

                    if i % check_interval == 0:
                        log.info(
                            'create dataset... %.1f%% readed. data len: %s' %
                            (i / max_sentences * 100,
                             NumUtil.comma_str(len(features))))

                    _f, _l = WordSpacing.sentence2features_labels(
                        line.strip(),
                        left_gram=left_gram,
                        right_gram=right_gram)
                    features.extend(_f)
                    labels.extend(_l)

            dataset = DataSet(features=features,
                              labels=labels,
                              features_vector=features_vector,
                              labels_vector=labels_vector,
                              name='all')
            log.info('dataset: %s' % dataset)
            log.info('create dataset OK.\n')
            watch.stop('create dataset')

            watch.start('dataset save')
            log.info('split to train, test, validation...')
            datasets = DataSets.to_datasets(dataset,
                                            test_rate=0.1,
                                            valid_rate=0.1,
                                            test_max=10000,
                                            valid_max=1000,
                                            shuffle=True)
            train, test, validation = datasets.train, datasets.test, datasets.validation
            log.info(train)
            log.info(test)
            log.info(validation)
            # log.info('%s %s' % (test.features[0], test.labels[0]))
            log.info('split to train, test, validation OK.\n')

            log.info('dataset save... %s' % train_file)
            train.save(train_file, verbose=True)  # save as text
            log.info('dataset save OK.\n')

            log.info('dataset save... %s' % validation_file)
            validation = validation.convert_to_one_hot_vector(
                verbose=True)  # save as vector
            validation.save(validation_file, verbose=True)
            log.info('dataset save OK.\n')

            log.info('dataset save... %s' % test_file)
            test = test.convert_to_one_hot_vector(verbose=True)
            test.save(test_file, verbose=True)  # save as vector
            log.info('dataset save OK.\n')
            watch.stop('dataset save')
        else:
            watch.start('dataset load')
            log.info('dataset load...')
            train = DataSet.load(train_file, verbose=True)
            validation = DataSet.load(validation_file, verbose=True)
            test = DataSet.load(test_file, verbose=True)
            log.info(train)
            log.info(validation)
            log.info(test)
            log.info('dataset load OK.\n')
            watch.stop('dataset load')

        log.info('check samples...')
        for i, (features_batch, labels_batch) in enumerate(
                train.next_batch(batch_size=5, to_one_hot_vector=True), 1):
            if i > 2:
                break
            for a, b in zip(features_batch, labels_batch):
                feature, label = a, b
                _feature = feature.reshape((ngram, len(features_vector)))
                chars = ''.join(features_vector.to_values(_feature))
                has_space = np.argmax(label)
                log.info('[%s] %s -> %s, %s (len=%s) %s (len=%s)' %
                         (i, chars, has_space, feature, len(feature), label,
                          len(label)))
        log.info('check samples OK.\n')

        graph = WordSpacing.build_FFNN(n_features,
                                       n_classes,
                                       n_hidden1,
                                       learning_rate,
                                       watch,
                                       layers=layers)

        train_step, X, Y, cost, hypothesis, predicted, accuracy = graph[
            'train_step'], graph['X'], graph['Y'], graph['cost'], graph[
                'hypothesis'], graph['predicted'], graph['accuracy']

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())

            n_input = 0
            log.info('total: %s' % NumUtil.comma_str(train.size))
            log.info('learn...')
            watch.start('learn')
            for step, (features_batch, labels_batch) in enumerate(
                    train.next_batch(batch_size=batch_size), 1):
                n_input += batch_size
                sess.run(train_step,
                         feed_dict={
                             X: features_batch,
                             Y: labels_batch
                         })
                log.info(
                    '[%s][%.1f%%] validation cost: %.4f' %
                    (NumUtil.comma_str(n_input), n_input / train.size * 100,
                     sess.run(cost,
                              feed_dict={
                                  X: validation.features,
                                  Y: validation.labels
                              })))
            watch.stop('learn')
            log.info('learn OK.\n')

            log.info('evaluate...')
            watch.start('evaluate...')
            _hypothesis, _correct, _accuracy = sess.run(
                [hypothesis, predicted, accuracy],
                feed_dict={
                    X: test.features,
                    Y: test.labels
                })  # Accuracy report
            watch.stop('evaluate...')
            log.info('evaluate OK.')

            log.info('model save... %s' % model_file)
            watch.start('model save...')
            model_dir = os.path.dirname(model_file)
            if not os.path.exists(model_dir):
                os.makedirs(model_dir)
            saver = tf.train.Saver()
            saver.save(sess, model_file)
            watch.stop('model save...')
            log.info('model save OK. %s' % model_file)

        log.info('\n')
        log.info(watch.summary())
        # log.info('hypothesis: %s %s' % (_hypothesis.shape, _hypothesis))
        # log.info('correct: %s %s' % (_correct.shape, _correct))
        log.info('accuracy: %s %s' % (_accuracy.shape, _accuracy))
        log.info('\n')
示例#8
0
            if max_test_sentences < max_sentences:  # leared sentences is smaller than full sentences
                for i, line in enumerate(f, 1):
                    if i <= max_sentences:  # skip learned sentences
                        if i % 100000 == 0:
                            log.info('skip %d th learned sentence.' % i)
                        continue
                    if len(sentences
                           ) >= max_test_sentences:  # read new sentences
                        break

                    s = line.strip()
                    if s.count(' ') > 0 and len(
                            s.replace(' ', '')
                    ) > ngram:  # sentence must have one or more space.
                        sentences.append(s)
        log.info('len(sentences): %s' % NumUtil.comma_str(len(sentences)))
        watch.stop('read sentences')

        watch.start('run tensorflow')
        accuracies, sims = [], []
        with tf.Session() as sess:
            graph = WordSpacing.build_FFNN(n_features,
                                           n_classes,
                                           n_hidden1,
                                           learning_rate,
                                           layers=layers)
            X, Y, predicted, accuracy = graph['X'], graph['Y'], graph[
                'predicted'], graph['accuracy']

            saver = tf.train.Saver()
            try: