Example #1
0
    def test_create_and_update(self):
        delete_all_datasets_by_name(self.api, self.dataset_name)
        filters = {"rp_entity_id": {"$in": ['AAAAAA']}}
        dataset = Dataset(
            name=self.dataset_name,
            filters=filters,  # a dataset with a filter
        )
        dataset = self.api.create_dataset(dataset)

        assert dataset.id is not None
        dataset_id = dataset.id

        # change the dataset
        new_filters = {"rp_entity_id": {"$in": ['BBBBBB']}}
        dataset.filters = new_filters
        dataset.save()

        # get the dataset again
        dataset = self.api.get_dataset(dataset_id)
        assert dataset.filters == new_filters
        new_filters = {"rp_entity_id": {"$in": ['CCCCCC']}}
        dataset.filters = new_filters
        dataset.save()

        dataset.delete()

        assert delete_all_datasets_by_name(self.api, self.dataset_name) == 0
Example #2
0
 def create_dataset(self, dataset):
     # be sure to create a copy
     new_dataset_data = dataset.as_dict()
     new_dataset = Dataset(api=self, **new_dataset_data)
     if 'uuid' in new_dataset_data:
         del new_dataset['uuid']
     new_dataset.save()
     dataset_id = new_dataset.id
     logger.info("Created dataset %s" % dataset_id)
     return new_dataset
Example #3
0
 def test_dataset_copy_updated(self):
     source_dataset = Dataset(api=self.api, id='us30')
     new_dataset = Dataset(
         api=self.api,
         name="copy of the us30 dataset",
         filters=source_dataset.filters,
         fields=['timestamp_utc', 'rp_entity_id', 'avg_sentiment'],
         custom_fields=[{
             "avg_sentiment": {
                 "avg": {
                     "field": "EVENT_SENTIMENT_SCORE",
                 }
             }
         }],
         frequency='daily',
         tags=['copy', 'test'])
     new_dataset.save()
     new_dataset.delete()
Example #4
0
        "count": {
            "field": "RP_ENTITY_ID"
        }
    }
}, {
    "average_news_count_90d": {
        "avg": {
            "field": "news_count_1d",
            "lookback": 90
        }
    }
}]

custom_dataset = Dataset(api=api,
                         name="Us30 indicators",
                         filters=new_filters,
                         fields=new_fields,
                         frequency='daily')
custom_dataset.save()
print(custom_dataset)

# query the datafile and save it to file
job = custom_dataset.request_datafile(
    start_date='2017-01-01 19:30',
    end_date='2017-01-02 19:30',
    compressed=True,
    time_zone='Europe/London',
)

job.save_to_file('output.csv')
dataset_id = None  # put here a dataset_id if you have it already

if dataset_id is None:
    dataset = Dataset(api=api,
                      filters={},
                      name='Average sentiment',
                      frequency='daily',
                      fields=[{
                          'average_ess': {
                              'avg': {
                                  'field': 'EVENT_SENTIMENT_SCORE'
                              }
                          }
                      }])
    dataset_id = dataset.save()
else:
    dataset = api.get_dataset(dataset_id)

# job = Job(api=api,
#           token='xxx')  # if you already have a job you can use this

# ... or request a new one
job = dataset.request_datafile(
    start_date='2018-01-01 00:00:00',
    end_date='2018-01-02 00:00:00',
)

# write only the ROLLUP rows
for line in job.iterate_results():
    timestamp, entity_id, entity_name, avg_sentiment = line
# You can then add functions (https://app.ravenpack.com/api-documentation/#indicator-syntax)
# Alternatively you can also create the dataset via the query builder and just use the dataset_uuid
dataset = Dataset(api,
                  name='My Indicator dataset',
                  filters={"relevance": {"$gt": 90}},
                  frequency='daily',
                  fields=[{"avg_1d": {"avg": {"field": "EVENT_SENTIMENT_SCORE", "lookback": 1,
                                              "mode": "granular"}}},
                          {"avg_7d": {
                              "avg": {"field": "avg_1d", "lookback": 1, "mode": "granular"}}},
                          {"buzz_365d": {"buzz": {"field": "RP_ENTITY_ID", "lookback": 365}}},
                          {"newsvolume_1d": {"count": {"field": "RP_ENTITY_ID", "lookback": 1}}},
                          {"newsvolume_365d": {"avg": {"field": "newsvolume_1d", "lookback": 365,
                                                       "mode": "granular"}}}]
                  )
dataset.save()

# you can also change the fields, (remember to save afterward)
dataset.fields = [
    {"avg": {"avg": {"field": "EVENT_SENTIMENT_SCORE", "lookback": 365}}},
]
dataset.save()

# Following this, you can then generate a datafile (for your desired date range)
job = dataset.request_datafile(
    start_date='2018-04-10', end_date='2018-04-11',
    output_format='csv'
)
job.save_to_file('output.csv')  # This will poll until the file is ready for download

# a convenience function to delete all the dataset given a name