Beispiel #1
0
def test_join_instead_of_expand(join_type):
    start = time.time()
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset1 = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='dataset',
                             entities_col_name='tweet')\
        .expand(src_col_name='tweet', predicate_list=[('sioc:has_creater', 'tweep', False)])

    dataset2 = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='dataset',
                             entities_col_name='tweet')\
        .expand(src_col_name='tweet', predicate_list=[('sioc:content', 'text', False)])

    dataset2.join(dataset1, 'tweet', 'tweet', 'tweet', join_type)

    sparql_query = dataset2.to_sparql()
    print("SPARQL query with {} =\n{}\n".format(join_type, sparql_query))
Beispiel #2
0
    def _expandable_expandable_join(join_type, optional1, optional2):
        # create a knowledge graph to store the graph uri and prefixes
        graph = KnowledgeGraph(
            'twitter',
            'https://twitter.com',
            prefixes={
                "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                "sioc": "http://rdfs.org/sioc/ns#",
                "sioct": "http://rdfs.org/sioc/types#",
                "to": "http://twitter.com/ontology/",
                "dcterms": "http://purl.org/dc/terms/",
                "xsd": "http://www.example.org/",
                "foaf": "http://xmlns.com/foaf/0.1/"
            })
        # return all the instances of the tweet class
        dataset = graph.entities(class_name='sioct:microblogPost',
                                 new_dataset_name='dataset1',
                                 entities_col_name='tweet')
        dataset = dataset.expand(src_col_name='tweet',
                                 predicate_list=[
                                     ('sioc:has_creater', 'tweep', False),
                                     ('sioc:content', 'text', optional1)
                                 ]).select_cols(['tweep'])

        dataset2 = graph.entities(class_name='sioct:tweeter',
                                  new_dataset_name='dataset2',
                                  entities_col_name='tweeter')
        dataset2 = dataset2.expand(src_col_name='tweeter',
                                   predicate_list=[('sioc:has_name', 'name',
                                                    optional2)])

        dataset.join(dataset2, 'tweep', 'tweeter', 'tweep', join_type)

        return dataset.to_sparql()
Beispiel #3
0
def test_simple_query():
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter', 'https://twitter.com',
                           prefixes={
                               "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioc:microblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet', predicate_list=[
        RDFPredicate('sioc:has_creater', 'tweep', False),
        RDFPredicate('sioc:content', 'text', False)
    ])
    dataset = dataset.group_by(['tweep']).count(src_col_name='tweet', new_col_name='tweet_count', unique=True)
    sparql_query = dataset.to_sparql()
    print("sparql_query that returns each user and his unique tweet count =\n{}\n".format(sparql_query))

    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioc:microblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet', predicate_list=[
        RDFPredicate('sioc:has_creater', 'tweep', False),
        RDFPredicate('sioc:content', 'text', False)
    ])
    dataset = dataset.group_by(['tweep']).count('tweet')
    sparql_query = dataset.to_sparql()
    print("sparql_query that returns the number of tweets per user without unique =\n{}\n".format(sparql_query))

    dataset = graph.entities(class_name='sioc:microblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet', predicate_list=[
        RDFPredicate('sioc:has_creater', 'tweep', False),
        RDFPredicate('sioc:content', 'text', False)
    ])
    dataset = dataset.count(unique=True)
    sparql_query = dataset.to_sparql()
    print("sparql_query that returns the number of tweets =\n{}\n".format(sparql_query))

    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioc:microblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet', predicate_list=[
        RDFPredicate('sioc:has_creater', 'tweep', False)
    ])
    dataset = dataset.group_by(['tweep']).count(src_col_name='tweet', new_col_name='tweet_count', unique=True)
    dataset = dataset.expand(src_col_name='tweep', predicate_list=[RDFPredicate('sioc:content', 'text', False)])
    sparql_query = dataset.to_sparql()
    print("sparql_query that returns the tweep, tweet_count, text of each tweet =\n{}\n".format(sparql_query))
Beispiel #4
0
def important_vldb_authors():
    """
    Returns the SPARQL query that finds all authors that have more than 20 vldb papers using dblp data.
    """
    graph = KnowledgeGraph(graph_name='dblp',
                           graph_uri='http://dblp.l3s.de',
                           prefixes={
                               "xsd":
                               "http://www.w3.org/2001/XMLSchema#",
                               "swrc":
                               "http://swrc.ontoware.org/ontology#",
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "dc":
                               "http://purl.org/dc/elements/1.1/",
                               "dcterm":
                               "http://purl.org/dc/terms/",
                               "dblprc":
                               "http://dblp.l3s.de/d2r/resource/conferences/"
                           })

    dataset = graph.entities(class_name='swrc:InProceedings',
                             new_dataset_name='papers',
                             entities_col_name='paper')
    dataset = dataset.expand(src_col_name='paper', predicate_list=[
        ('dc:title', 'title'),
        ('dc:creator', 'author'),
        ('swrc:series', 'conference')])\
        .filter(conditions_dict={'conference': ['= <https://dblp.l3s.de/d2r/resource/conferences/vldb>']})
    grouped_dataset = dataset.group_by(['author'])\
        .count('paper', 'papers_count')\
        .filter(conditions_dict={'papers_count': ['>= {}'.format(20)]})

    grouped_dataset = grouped_dataset.select_cols(['author', 'papers_count'])
    print("SPARQL Query = \n{}".format(grouped_dataset.to_sparql()))
Beispiel #5
0
def test_filter_query():
    start = time.time()
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet', predicate_list=[
        ('sioc:has_creater', 'tweep', False),
        ('sioc:content', 'text', True)])\
        .filter({'text': [' >= \"aa\"']})\
        .select_cols(['tweet', 'text'])
    # TODO: make sure the order of filter when called before a join or optional is done before the join or the optional
    #  and when called after the join or optional are done after it
    sparql_query = dataset.to_sparql()
    print("sparql_query 1 =\n{}\n".format(sparql_query))
Beispiel #6
0
def test_filter_after_group_by():
    start = time.time()
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter', 'https://twitter.com',
                           prefixes={
                               "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')
    # expand each tweet by the following features: text and tweep
    ds = dataset.expand(src_col_name='tweet', predicate_list=[
        ('sioc:has_creater', 'tweep'),
        ('sioc:content', 'text')
    ])
    # return all the tweets of users whose tweep tweeted 250-300 twweets
    gds = ds.group_by(groupby_cols_list=['tweep'])\
        .count('tweet', 'tweets_count')\
        .filter(conditions_dict={'tweets_count': ['> {}'.format(250), '< {}'.format(300)]})

    # expand these tweets by the following features: date, media, hashtags, users mentioned
    # TODO: Bug. implement filter fully
    gds = gds.filter({'tweep': ' >= aa'})
    gds.print_query_structure()
    sparql_query = gds.to_sparql()
    end_transformation = time.time()
    print('Transformed in {} sec'.format(end_transformation-start))
    print("sparql_query 1 =\n{}\n".format(sparql_query))
Beispiel #7
0
def test_groupby_query():
    start = time.time()
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(
        class_name='sioct:microblogPost',
        new_dataset_name='tweets',
        # class_col_name='tweet_class',
        entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet',
                             predicate_list=[('sioc:has_creater', 'tweep',
                                              True),
                                             ('sioc:content', 'text', False)])
    dataset = dataset.group_by(['tweep'])
    sparql_query = dataset.to_sparql()
    print("sparql_query 1 =\n{}\n".format(sparql_query))
Beispiel #8
0
def test_groupby_aggregation_query():
    start = time.time()
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioc:microblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet',
                             predicate_list=[('sioc:has_creater', 'tweep',
                                              False),
                                             ('sioc:content', 'text', False)])
    grouped_dataset = dataset.group_by(['tweep'])\
        .count('tweet', 'tweets_count')\
        .select_cols(['tweep'])
    # TODO: when select after groupby and aggregation, remove the non-selected columns from the select clause
    #  including aggregation columns
    sparql_query = grouped_dataset.to_sparql()
    print("sparql_query 1 =\n{}\n".format(sparql_query))
Beispiel #9
0
def test_sort_limit_offset_query():
    start = time.time()
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet',
                             predicate_list=[('sioc:has_creater', 'tweep',
                                              True),
                                             ('sioc:content', 'text', False)])
    dataset.sort({'tweep': 'ASC'}).limit(10).offset(5)
    # TODO: do we care about limit after or before an offset? Do we allow one limit in each query?
    sparql_query = dataset.to_sparql()
    print("sparql_query 1 =\n{}\n".format(sparql_query))
Beispiel #10
0
def test_grouped_expandable_join(join_type):
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com/',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='dataset1',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet',
                             predicate_list=[
                                 RDFPredicate('sioc:has_creater', 'tweep',
                                              False),
                                 RDFPredicate('sioc:content', 'text', False)
                             ])

    dataset2 = graph.entities(class_name='sioct:microblogPost',
                              new_dataset_name='tweets',
                              entities_col_name='tweet')
    dataset2 = dataset2.expand(
        src_col_name='tweet',
        predicate_list=[RDFPredicate('sioc:has_creater', 'tweeter')]).group_by(
            ['tweeter'
             ]).count('tweet', 'tweets_count').filter(conditions_dict={
                 'tweets_count': ['>= {}'.format(200), '<= {}'.format(300)]
             })
    dataset2 = dataset2.expand(
        src_col_name='tweeter',
        predicate_list=[RDFPredicate('rdf:type', 'sioc:UserAccount')])
    dataset2.join(dataset, 'tweeter', 'tweep', 'user', join_type)
    dataset2.select_cols(['user'])

    sparql_query = dataset2.to_sparql()
    print("SPARQL query with {} =\n{}\n".format(join_type, sparql_query))
def test_users_tweets_count():
    """
    In twitter dataset, retrieve all users having tweets count >= count_threshold
    :return:
    """

    start = time.time()
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    dataset = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')

    ds = dataset.expand(src_col_name='tweet',
                        predicate_list=[
                            RDFPredicate('sioc:has_creater', 'tweep'),
                            RDFPredicate('sioc:content', 'text'),
                            RDFPredicate('dcterms:created', 'date'),
                            RDFPredicate('to:hasmedia', 'multimedia'),
                            RDFPredicate('to:hashashtag', 'hashtag'),
                            RDFPredicate('sioc:mentions', 'users_mentioned')
                        ])

    ds = ds.expand(src_col_name='tweep',
                   predicate_list=[RDFPredicate('sioc:name', 'tweep_name')])

    gds = ds.group_by(groupby_cols_list=['tweep'])
    gds = gds.count('tweet', 'tweets_count')
    gds = gds.filter(conditions_dict={
        'tweets_count': ['> {}'.format(250), '< {}'.format(300)]
    })

    ds = ds.sort({'tweep': 'ASC'}).limit(10).offset(5)

    ds = ds.select_cols([
        'tweet', 'tweep', 'tweep_name', 'text', 'date', 'multimedia',
        'hashtag', 'users_mentioned'
    ])

    sparql = ds.to_sparql()
    end_transformation = time.time()
    print('Transformed in {} sec'.format(end_transformation - start))
    print(sparql)
Beispiel #12
0
def test_grouped_grouped_join(join_type):
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com/',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='dataset1',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet', predicate_list=[
        ('sioc:has_creater', 'tweep', False),
        ('sioc:content', 'text', False)])\
        .group_by(['tweep']).count('tweet', 'tweets_count')\
        .filter({'tweets_count': ['>= {}'.format(1000)]})

    graph2 = KnowledgeGraph('twitter',
                            'https://twitter.com/',
                            prefixes={
                                "rdf":
                                "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                                "sioc2": "http://rdfs.org/sioc2/ns#",
                                "sioct2": "http://rdfs.org/sioc2/types#",
                            })
    dataset2 = graph2.entities(class_name='sioct2:twitterPost',
                               new_dataset_name='tweets',
                               entities_col_name='tweet')
    dataset2 = dataset2.expand(src_col_name='tweet', predicate_list=[
        ('sioc2:has_creater', 'tweeter')
    ]).group_by(['tweeter']).count('tweet', 'tweets_count2', unique=False)\
        .filter(conditions_dict={'tweets_count2': ['>= {}'.format(200), '<= {}'.format(300)]})
    dataset.join(dataset2, 'tweep', 'tweeter', 'user', join_type)
    dataset.select_cols(['user'])

    sparql_query = dataset.to_sparql()
    print("SPARQL query with {} =\n{}\n".format(join_type, sparql_query))
Beispiel #13
0
def important_topics():
    """
    Returns the SPARQL query to identify the hot areas of research in a field of databases.
    First, we identify a list of the top conferences of the computer science field of interest.
    We then identify the authors who have published more than 20 papers in these conferences since the year 2000.
    Next, we find the titles of all papers published by these authors in the specified conferences since 2005.
    """
    graph = KnowledgeGraph(graph_name='dblp',
                           graph_uri='http://dblp.l3s.de',
                           prefixes={
                               "xsd":
                               "http://www.w3.org/2001/XMLSchema#",
                               "swrc":
                               "http://swrc.ontoware.org/ontology#",
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "dc":
                               "http://purl.org/dc/elements/1.1/",
                               "dcterm":
                               "http://purl.org/dc/terms/",
                               "dblprc":
                               "http://dblp.l3s.de/d2r/resource/conferences/"
                           })
    endpoint = 'http://10.161.202.101:8890/sparql/'
    port = 8890
    output_format = HttpClientDataFormat.PANDAS_DF
    max_rows = 1000000
    timeout = 12000
    client = HttpClient(endpoint_url=endpoint,
                        port=port,
                        return_format=output_format,
                        timeout=timeout,
                        max_rows=max_rows)

    dataset = graph.entities('swrc:InProceedings', entities_col_name='paper')\
        .expand(src_col_name='paper', predicate_list=[('dc:creator', 'author'), ('dcterm:issued', 'date'),
            ('swrc:series', 'conference'), ('dc:title', 'title')])
    dataset = dataset.cache()

    authors = dataset.filter({'date':['>= 2000'], 'conference': ['IN (dblprc:vldb, dblprc:sigmod)']})\
        .group_by(['author'])\
        .count('paper', 'papers_count')\
        .filter({'papers_count':['>= 20']})

    titles = dataset.join(authors, 'author').filter({
        'date': ['>= 2005']
    }).select_cols(['title'])

    print("SPARQL Query = \n{}".format(titles.to_sparql()))

    df = titles.execute(client, return_format=output_format)
    print(df)
Beispiel #14
0
def test_expandable_expandable_join_w_selectcols():
    start = time.time()
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='dataset1',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet',
                             predicate_list=[
                                 RDFPredicate('sioc:has_creater', 'tweep',
                                              False),
                                 RDFPredicate('sioc:content', 'text', False)
                             ]).select_cols(['tweep', 'text'])

    dataset2 = graph.entities(class_name='sioct:tweeter',
                              new_dataset_name='dataset2',
                              entities_col_name='tweep')
    dataset2 = dataset2.expand(
        src_col_name='tweep',
        predicate_list=[RDFPredicate('sioc:has_name', 'name',
                                     False)]).select_cols(['tweep', 'name'])

    dataset.join(dataset2, 'tweep', 'tweep', 'tweep', JoinType.InnerJoin)

    sparql_query = dataset.to_sparql()
    print("SPARQL query =\n{}\n".format(sparql_query))
Beispiel #15
0
def test_join_query():
    start = time.time()
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='dataset1',
                             entities_col_name='tweet')
    dataset = dataset.expand(src_col_name='tweet',
                             predicate_list=[
                                 RDFPredicate('sioc:has_creater', 'tweep',
                                              False),
                                 RDFPredicate('sioc:content', 'text', False)
                             ])

    dataset2 = graph.entities(class_name='sioc:UserAccount',
                              new_dataset_name='dataset2',
                              entities_col_name='tweep')
    dataset2 = dataset2.expand(
        src_col_name='tweep',
        predicate_list=[RDFPredicate('sioc:has_name', 'name', False)])

    # TODO: put the whole first dataset in one optional block. now, its in multiple optional blocks
    dataset.join(dataset2, 'tweep', 'tweep', 'tweep', JoinType.RightOuterJoin)

    sparql_query = dataset.to_sparql()
    print("sparql_query 1 =\n{}\n".format(sparql_query))
Beispiel #16
0
def test_twitter_query():
    # TODO: remove endpoint URI
    endpoint = 'http://10.161.202.101:8890/sparql/'
    port = 8890
    output_format = HttpClientDataFormat.PANDAS_DF
    max_rows = 1000000
    timeout = 12000
    default_graph_url = 'http://twitter.com'
    client = HttpClient(endpoint_url=endpoint,
                        port=port,
                        return_format=output_format,
                        timeout=timeout,
                        default_graph_uri=default_graph_url,
                        max_rows=max_rows
                        )

    graph = KnowledgeGraph('twitter',
                           'http://twitter.com/',
                           prefixes={
                               "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })

    dataset = graph.entities(class_name='sioct:microblogPost',
                             entities_col_name='tweet')
    ds = dataset.expand(src_col_name='tweet', predicate_list=[RDFPredicate('sioc:has_creater', 'tweep')])\
        .group_by(['tweep'])\
        .count('tweet', 'tweets_count')\
        .filter({'tweets_count': ['>= {}'.format(200), '<= {}'.format(300)]})

    ds = ds.expand('tweep', [RDFPredicate('sioc:has_creater', 'tweet', directionality=PredicateDirection.INCOMING)]).\
        expand('tweet', [
        RDFPredicate('sioc:content', 'text', optional=False),
        RDFPredicate('dcterms:created', 'date', optional=True),
        RDFPredicate('to:hasmedia', 'multimedia', optional=True),
        RDFPredicate('to:hashashtag', 'hashtag', optional=True),
        RDFPredicate('sioc:mentions', 'users_mentioned', optional=True)
    ])

    ds = ds.select_cols(['tweet', 'tweep', 'text', 'date', 'multimedia', 'hashtag', 'users_mentioned', 'tweets_count'])

    print("Sparql Query = \n{}".format(ds.to_sparql()))
Beispiel #17
0
def explore_dblp():
    graph = KnowledgeGraph(graph_name='dblp',
                           graph_uri='http://dblp.l3s.de',
                           prefixes={
                               "xsd":
                               "http://www.w3.org/2001/XMLSchema#",
                               "swrc":
                               "http://swrc.ontoware.org/ontology#",
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "dc":
                               "http://purl.org/dc/elements/1.1/",
                               "dcterm":
                               "http://purl.org/dc/terms/",
                               "dblprc":
                               "http://dblp.l3s.de/d2r/resource/conferences/"
                           })

    endpoint = 'http://10.161.202.101:8890/sparql/'
    port = 8890
    output_format = HttpClientDataFormat.PANDAS_DF
    max_rows = 1000000
    timeout = 12000
    client = HttpClient(endpoint_url=endpoint,
                        port=port,
                        return_format=output_format,
                        timeout=timeout,
                        max_rows=max_rows)

    classes = graph.classes_and_freq().sort({'frequency': 'DESC'})
    #class_with_max_freq = graph.classes_and_freq().max('frequency').to_sparql()
    attributes_of_papers = graph.features('swrc:InProceedings')
    attributes_of_papers_with_freq = graph.features_and_freq(
        'swrc:InProceedings')
    papers = graph.entities('swrc:InProceedings')
    #papers_with_features = graph.entities_and_features('swrc:InProceedings').to_sparql()
    num_papers = graph.num_entities('swrc:InProceedings')

    print("{}".format(classes.to_sparql()))
    df = classes.execute(client, return_format=output_format)

    #print("{}".format(attributes_of_papers.to_sparql()))
    #df = attributes_of_papers.execute(client, return_format=output_format)

    print(df)
Beispiel #18
0
def test_convenience_functions():
    graph = KnowledgeGraph(graph_name='dbpedia')
    entities = graph.entities('dbpo:BasketballPlayer',
                              entities_col_name='player')
    print(entities.to_sparql())
    features = graph.features('dbpo:BasketballPlayer',
                              features_col_name='feature_uri')
    print(features.to_sparql())
    entities_feats = graph.entities_and_features(
        'dbpo:BasketballPlayer', [('dbpp:nationality', 'nationality'),
                                  ('dbpp:birthPlace', 'place'),
                                  ('dbpp:birthDate', 'birthDate'),
                                  ('dbpp:team', 'team')])
    print(entities_feats.to_sparql())
    classes_freq = graph.classes_and_freq()
    print(classes_freq.to_sparql())
    feats_freq = graph.features_and_freq('dbpo:BasketballPlayer')
    print(feats_freq.to_sparql())
    n_entities = graph.num_entities('dbpo:BasketballPlayer')
    print(n_entities.to_sparql())
Beispiel #19
0
def test_simple_query():
    start = time.time()
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com/',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioct:MicroblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')
    sparql_query = dataset.to_sparql()
    print("sparql_query to return tweets =\n{}\n".format(sparql_query))

    endpoint = 'http://10.161.202.101:8890/sparql/'
    port = 8890
    output_format = HttpClientDataFormat.PANDAS_DF
    max_rows = 1000000
    timeout = 12000
    default_graph_url = 'http://twitter.com/'
    client = HttpClient(endpoint_url=endpoint,
                        port=port,
                        return_format=output_format,
                        timeout=timeout,
                        default_graph_uri=default_graph_url,
                        max_rows=max_rows)

    #df = dataset.execute(client, return_format=output_format)
    duration = start - time.time()
    print("Done in {} secs".format(duration))
Beispiel #20
0
  import pandas as pd
  from rdfframes.client.http_client import HttpClientDataFormat, HttpClient
  from rdfframes.knowledge_graph import KnowledgeGraph
  graph = KnowledgeGraph(
          graph_uri = 'http://dblp.l3s.de',
          prefixes = {"xsd": "http://www.w3.org/2001/XMLSchema#",
                    "swrc": "http://swrc.ontoware.org/ontology#",
                    "rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                    "dc": "http://purl.org/dc/elements/1.1/",
                    "dcterm": "http://purl.org/dc/terms/",
                    "dblprc": "http://dblp.l3s.de/d2r/resource/conferences/"})
  output_format = HttpClientDataFormat.PANDAS_DF
  client = HttpClient(endpoint_url=endpoint, port=port,return_format=output_format)

  # RDFFrames code for creating the dataframe
  papers = graph.entities('swrc:InProceedings', paper)
  papers = papers.expand('paper',[('dc:creator', 'author'),('dcterm:issued', 'date'), ('swrc:series', 'conference'),
                        ('dc:title', 'title')]).cache()
  authors = papers.filter({'date': ['>=2005'],'conference': ['In(dblp:vldb, dblp:sigmod)']}).group_by(['author'])
                   . count('paper', 'n_papers').filter({'n_papers': '>=20', 'date': ['>=2005']})
  titles = papers.join(authors, 'author', InnerJoin).select_cols(['title'])
  df = titles.execute(client, return_format=output_format)

  # Preprocessing and cleaning
  from nltk.corpus import stopwords
  df['clean_title'] = df['title'].str.replace("[^a-zA-Z#]", " ")
  df['clean_title'] = df['clean_title'].apply(lambda x: x.lower())
  df['clean_title'] = df['clean_title'].apply(lambda x: ' '.join([w for w in str(x).split() if len(w)>3])) 
  stop_words        = stopwords.words('english')
  tokenized_doc     = df['clean_title'].apply(lambda x: x.split())
  df['clean_title'] = tokenized_doc.apply(lambda x:[item for item in x if item not in stop_words])
   
  graph = KnowledgeGraph(graph_uri='http://dbpedia.org',
                         prefixes= {'dcterms': 'http://purl.org/dc/terms/',
                                  'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
                                  'dbpprop': 'http://dbpedia.org/property/',
                                  'dbpr': 'http://dbpedia.org/resource/'}) 
  endpoint = 'http://10.161.202.101:8890/sparql/'
  output_format = HttpClientDataFormat.PANDAS_DF
  timeout = 12000
  client = HttpClient(endpoint_url=endpoint, return_format=output_format)

  # RDFFrames code for creating the dataframe
  
  songs = graph.entities('dbpo:Song', entities_col_name='song').expand('song', [('dbpp:album', 'album') ,('dbpp:artist', 'artist'),\
                         ('dbpp:title','title'),('dbpp:lyrics', 'lyrics'), ('dbpp:writer', 'writer'), \
                                                                                ('dbpp:studio', 'studio'),('dbpp:genre', 'genre')])\
                                     .expand('album', [('dbpp:title', 'Album_title') ,('dbpp:artist', 'ALbum_artist')])\
                                       .filter({'artist': ['langMatches(lang(?artist), "en")']})
 
  sparql_query = songs.to_sparql()
  print(sparql_query)
  
  #  execution and return a dataframe of songs where artist has english name. 
 
  df = songs.execute(client, return_format=output_format)

  # Preprocessing and preparation
 
  regex = re.compile(
        r'^(?:http|ftp)s?://' # http:// or https://
        r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
def test_expand_after_group_by():
    start = time.time()
    # create a knowledge graph to store the graph uri and prefixes
    graph = KnowledgeGraph('twitter',
                           'https://twitter.com',
                           prefixes={
                               "rdf":
                               "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
                               "sioc": "http://rdfs.org/sioc/ns#",
                               "sioct": "http://rdfs.org/sioc/types#",
                               "to": "http://twitter.com/ontology/",
                               "dcterms": "http://purl.org/dc/terms/",
                               "xsd": "http://www.example.org/",
                               "foaf": "http://xmlns.com/foaf/0.1/"
                           })
    # return all the instances of the tweet class
    dataset = graph.entities(class_name='sioct:microblogPost',
                             new_dataset_name='tweets',
                             entities_col_name='tweet')
    sparql_query = dataset.to_sparql()
    print("sparql_query 1 =\n{}\n".format(sparql_query))

    # expand each tweet by the following features: text and tweep
    ds = dataset.expand(src_col_name='tweet',
                        predicate_list=[
                            RDFPredicate('sioc:has_creater', 'tweep'),
                            RDFPredicate('sioc:content', 'text')
                        ])
    sparql_query = ds.to_sparql()
    print("sparql_query 2 =\n{}\n".format(sparql_query))

    # return all the tweets of users whose tweep tweeted 250-300 twweets
    gds = ds.group_by(groupby_cols_list=['tweep'])\
        .count('tweet', 'tweets_count')\
        .filter({'tweets_count': ['> {}'.format(250), '< {}'.format(300)]})
    sparql_query = gds.to_sparql()
    print("sparql_query 3 =\n{}\n".format(sparql_query))

    # expand these tweets by the following features: date, media, hashtags, users mentioned
    gds = gds.expand(src_col_name='tweep',
                     predicate_list=[
                         RDFPredicate(
                             'sioc:has_creater',
                             'tweet',
                             directionality=PredicateDirection.INCOMING)
                     ])
    sparql_query = gds.to_sparql()
    print("sparql_query 3.1 =\n{}\n".format(sparql_query))

    gds = gds.expand(src_col_name='tweet',
                     predicate_list=[
                         RDFPredicate('dcterms:created', 'date'),
                         RDFPredicate('sioc:content', 'text'),
                         RDFPredicate('to:hasmedia', 'multimedia'),
                         RDFPredicate('to:hashashtag', 'hashtag'),
                         RDFPredicate('sioc:mentions', 'users_mentioned')
                     ])
    sparql_query = gds.to_sparql()
    print("sparql_query 4 =\n{}\n\n\n\n".format(sparql_query))
    # select all the tweets and their features
    gds = gds.select_cols([
        'tweet', 'tweep', 'text', 'date', 'multimedia', 'hashtag',
        'users_mentioned'
    ])

    # ds.print_query_structure()
    gds.print_query_structure()
    sparql_query = gds.to_sparql()
    end_transformation = time.time()
    print('Transformed in {} sec'.format(end_transformation - start))
    print("sparql_query 5 =\n{}\n".format(sparql_query))