コード例 #1
0
ファイル: simple_classifier.py プロジェクト: dodo47/R2D2-1
def main():
    options = read_options()
    # Set logging
    logger.setLevel(logging.INFO)
    fmt = logging.Formatter('%(asctime)s: [ %(message)s ]',
                            '%m/%d/%Y %I:%M:%S %p')
    console = logging.StreamHandler()
    console.setFormatter(fmt)
    logfile = None
    logger.addHandler(console)
    # read the vocab files, it will be used by many classes hence global scope
    logger.info('reading vocab files...')
    relation_vocab = json.load(open(options['vocab_dir'] + '/relation_vocab.json'))
    entity_vocab = json.load(open(options['vocab_dir'] + '/entity_vocab.json'))
    logger.info('Reading mid to name map')
    mid_to_word = {}
    # with open('/iesl/canvas/rajarshi/data/RL-Path-RNN/FB15k-237/fb15k_names', 'r') as f:
    #     mid_to_word = json.load(f)
    logger.info('Done..')
    logger.info('Total number of entities {}'.format(len(entity_vocab)))
    logger.info('Total number of relations {}'.format(len(relation_vocab)))
    save_path = ''
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = False
    config.log_device_placement = False

    best_permutation = None
    best_acc = 0
    for permutation in create_permutations(options):
        current_time = datetime.datetime.now()
        current_time = current_time.strftime('%y_%b_%d__%H_%M_%S')
        permutation['output_dir'] = options['base_output_dir'] + '/' + str(current_time) + '__' + str(uuid.uuid4())[
                                                                                                  :4] + '_' + str(
            permutation['path_length']) + '_' + str(permutation['beta']) + '_' + str(
            permutation['test_rollouts']) + '_' + str(
            permutation['Lambda'])

        permutation['model_dir'] = permutation['output_dir'] + '/' + 'model/'

        permutation['load_model'] = (permutation['load_model'] == 1)

        ##Logger##
        permutation['path_logger_file'] = permutation['output_dir']
        permutation['log_file_name'] = permutation['output_dir'] + '/log.txt'
        os.makedirs(permutation['output_dir'])
        os.mkdir(permutation['model_dir'])
        with open(permutation['output_dir'] + '/config.txt', 'w') as out:
            pprint(permutation, stream=out)

        # print and return
        maxLen = max([len(ii) for ii in permutation.keys()])
        fmtString = '\t%' + str(maxLen) + 's : %s'
        print('Arguments:')
        for keyPair in sorted(permutation.items()): print(fmtString % keyPair)
        logger.removeHandler(logfile)
        logfile = logging.FileHandler(permutation['log_file_name'], 'w')
        logfile.setFormatter(fmt)
        logger.addHandler(logfile)
        permutation['relation_vocab'] = relation_vocab
        permutation['entity_vocab'] = entity_vocab


        judge = SimpleClassifier(permutation)
        judge.construct_graph()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            judge.train(sess)
コード例 #2
0
ファイル: trainer.py プロジェクト: YINGMAgator/MINERVA_tf2
        logger.info("Hits@10: {0:7.4f}".format(all_final_reward_10))
        logger.info("Hits@20: {0:7.4f}".format(all_final_reward_20))
        logger.info("auc: {0:7.4f}".format(auc))

    def top_k(self, scores, k):
        scores = scores.reshape(
            -1, k * self.max_num_actions)  # [B, (k*max_num_actions)]
        idx = np.argsort(scores, axis=1)
        idx = idx[:, -k:]  # take the last k highest indices # [B , k]
        return idx.reshape((-1))


if __name__ == '__main__':

    # read command line options
    options = read_options()
    # Set logging
    logger.setLevel(logging.INFO)
    fmt = logging.Formatter('%(asctime)s: [ %(message)s ]',
                            '%m/%d/%Y %I:%M:%S %p')
    console = logging.StreamHandler()
    console.setFormatter(fmt)
    logger.addHandler(console)
    logfile = logging.FileHandler(options['log_file_name'], 'w')
    logfile.setFormatter(fmt)
    logger.addHandler(logfile)
    #read dataset

    options['dataset'] = {}
    Dataset_list = ['train', 'test', 'dev', 'graph']
    for dataset in Dataset_list: