from bigml.api import BigML
api = BigML()

source1 = api.create_source("iris.csv")
api.ok(source1)

dataset1 = api.create_dataset(source1, \
    {'name': u'iris dataset'})
api.ok(dataset1)

anomaly1 = api.create_anomaly(dataset1, \
    {'name': u"iris dataset's anomaly detector"})
api.ok(anomaly1)

batchanomalyscore1 = api.create_batch_anomaly_score(anomaly1, dataset1, \
    {'name': u"Batch Anomaly Score of iris dataset's anomaly detector with iris dataset",
     'output_dataset': True})
api.ok(batchanomalyscore1)

dataset2 = api.get_dataset(batchanomalyscore1['object']['output_dataset_resource'])
api.ok(dataset2)

dataset2 = api.update_dataset(dataset2, \
    {'fields': {u'000000': {'name': u'score'}},
     'name': u'my_dataset_from_batch_anomaly_score_name'})
api.ok(dataset2)
Пример #2
0
from bigml.api import BigML
api = BigML()

source1 = api.create_source("iris.csv")
api.ok(source1)

dataset1 = api.create_dataset(source1, \
    {'name': u'iris dataset'})
api.ok(dataset1)

anomaly1 = api.create_anomaly(dataset1, \
    {'name': u"iris dataset's anomaly detector"})
api.ok(anomaly1)

batchanomalyscore1 = api.create_batch_anomaly_score(anomaly1, dataset1, \
    {'name': u'my_batch_anomaly_score_name'})
api.ok(batchanomalyscore1)
 api.ok(source2)
 
 args = \
     {u'objective_field': {u'id': u'000004'}}
 dataset1 = api.create_dataset(source2, args)
 api.ok(dataset1)
 
 args = \
     {u'anomaly_seed': u'bigml', u'seed': u'bigml'}
 anomaly1 = api.create_anomaly(dataset1, args)
 api.ok(anomaly1)
 
 args = \
     {u'fields_map': {u'000000': u'000000',
                      u'000001': u'000001',
                      u'000002': u'000002',
                      u'000003': u'000003',
                      u'000004': u'000004'},
      u'output_dataset': True}
 batchanomalyscore1 = api.create_batch_anomaly_score(anomaly1, dataset1, args)
 api.ok(batchanomalyscore1)
 
 dataset2 = api.get_dataset(batchanomalyscore1["object"]["output_dataset_resource"])
 api.ok(dataset2)
 
 args = \
     {u'fields': {u'100000': {u'name': u'score', u'preferred': True}},
      u'objective_field': {u'id': u'100000'}}
 dataset3 = api.update_dataset(dataset2, args)
 api.ok(dataset3)
 
from bigml.api import BigML

api = BigML()

source1 = api.create_source("iris.csv")
api.ok(source1)

dataset1 = api.create_dataset(source1)
api.ok(dataset1)

anomaly1 = api.create_anomaly(dataset1)
api.ok(anomaly1)

batchanomalyscore1 = api.create_batch_anomaly_score(anomaly1, dataset1, \
    {'output_dataset': True})
api.ok(batchanomalyscore1)

dataset2 = api.get_dataset(
    batchanomalyscore1['object']['output_dataset_resource'])
api.ok(dataset2)

dataset2 = api.update_dataset(dataset2, \
    {'fields': {u'000000': {'name': u'score'}},
     'name': u'my_dataset_from_batch_anomaly_score_name'})
api.ok(dataset2)
Пример #5
0
    api = BigML()
    source1_file = "iris.csv"
    args = \
        {u'fields': {u'000000': {u'name': u'sepal length', u'optype': u'numeric'},
                     u'000001': {u'name': u'sepal width', u'optype': u'numeric'},
                     u'000002': {u'name': u'petal length', u'optype': u'numeric'},
                     u'000003': {u'name': u'petal width', u'optype': u'numeric'},
                     u'000004': {u'name': u'species',
                                 u'optype': u'categorical',
                                 u'term_analysis': {u'enabled': True}}},
         }
    source2 = api.create_source(source1_file, args)
    api.ok(source2)

    args = \
        {u'objective_field': {u'id': u'000004'},
         }
    dataset1 = api.create_dataset(source2, args)
    api.ok(dataset1)

    args = \
        {u'anomaly_seed': u'bigml',
                  u'seed': u'bigml'}
    anomaly1 = api.create_anomaly(dataset1, args)
    api.ok(anomaly1)

    args = \
        {}
    batchanomalyscore1 = api.create_batch_anomaly_score(anomaly1, dataset1, args)
    api.ok(batchanomalyscore1)
    from bigml.api import BigML
    api = BigML()

    source1 = api.create_source("iris.csv")
    api.ok(source1)

    dataset1 = api.create_dataset(source1, \
        {'name': 'iris'})
    api.ok(dataset1)

    anomaly1 = api.create_anomaly(dataset1, \
        {'anomaly_seed': '2c249dda00fbf54ab4cdd850532a584f286af5b6', 'name': 'iris'})
    api.ok(anomaly1)

    batchanomalyscore1 = api.create_batch_anomaly_score(anomaly1, dataset1, \
        {'name': 'iris using iris dataset', 'output_dataset': True})
    api.ok(batchanomalyscore1)

    dataset2 = api.get_dataset(batchanomalyscore1['object']['output_dataset_resource'])
    api.ok(dataset2)

    dataset2 = api.update_dataset(dataset2, \
        {'name': 'my_dataset_from_batch_anomaly_score_name'})
    api.ok(dataset2)
from bigml.api import BigML
api = BigML()

source1 = api.create_source("iris.csv")
api.ok(source1)

dataset1 = api.create_dataset(source1, \
    {'name': u'iris dataset'})
api.ok(dataset1)

anomaly1 = api.create_anomaly(dataset1, \
    {'anomaly_seed': u'2c249dda00fbf54ab4cdd850532a584f286af5b6',
     'name': u"iris dataset's anomaly detector"})
api.ok(anomaly1)

batchanomalyscore1 = api.create_batch_anomaly_score(anomaly1, dataset1, \
    {'name': u"Batch Anomaly Score of iris dataset's anomaly detector with iris dataset",
     'output_dataset': True})
api.ok(batchanomalyscore1)

dataset2 = api.get_dataset(
    batchanomalyscore1['object']['output_dataset_resource'])
api.ok(dataset2)

dataset2 = api.update_dataset(dataset2, \
    {'fields': {u'000000': {'name': u'score'}},
     'name': u'my_dataset_from_batch_anomaly_score_name'})
api.ok(dataset2)
Пример #8
0
from bigml.api import BigML
api = BigML()

source1 = api.create_source("iris.csv")
api.ok(source1)

dataset1 = api.create_dataset(source1, \
    {'name': u'iris'})
api.ok(dataset1)

anomaly1 = api.create_anomaly(dataset1, \
    {'anomaly_seed': u'2c249dda00fbf54ab4cdd850532a584f286af5b6', 'name': u'iris'})
api.ok(anomaly1)

batchanomalyscore1 = api.create_batch_anomaly_score(anomaly1, dataset1, \
    {'name': u'iris dataset with iris', 'output_dataset': True})
api.ok(batchanomalyscore1)

dataset2 = api.get_dataset(
    batchanomalyscore1['object']['output_dataset_resource'])
api.ok(dataset2)

dataset2 = api.update_dataset(dataset2, \
    {'name': u'my_dataset_from_batch_anomaly_score_name'})
api.ok(dataset2)
from bigml.api import BigML
api = BigML()

source1 = api.create_source("iris.csv")
api.ok(source1)

dataset1 = api.create_dataset(source1)
api.ok(dataset1)

anomaly1 = api.create_anomaly(dataset1)
api.ok(anomaly1)

batchanomalyscore1 = api.create_batch_anomaly_score(anomaly1, dataset1, \
    {'output_dataset': True})
api.ok(batchanomalyscore1)

dataset2 = api.get_dataset(batchanomalyscore1['object']['output_dataset_resource'])
api.ok(dataset2)

dataset2 = api.update_dataset(dataset2, \
    {'fields': {u'000000': {'name': u'score'}},
     'name': u'my_dataset_from_batch_anomaly_score_name'})
api.ok(dataset2)
from bigml.api import BigML
api = BigML()

source1 = api.create_source("iris.csv")
api.ok(source1)

dataset1 = api.create_dataset(source1, \
    {'name': u'iris'})
api.ok(dataset1)

anomaly1 = api.create_anomaly(dataset1, \
    {'anomaly_seed': u'2c249dda00fbf54ab4cdd850532a584f286af5b6', 'name': u'iris'})
api.ok(anomaly1)

batchanomalyscore1 = api.create_batch_anomaly_score(anomaly1, dataset1, \
    {'name': u'iris dataset with iris', 'output_dataset': True})
api.ok(batchanomalyscore1)

dataset2 = api.get_dataset(batchanomalyscore1['object']['output_dataset_resource'])
api.ok(dataset2)

dataset2 = api.update_dataset(dataset2, \
    {'name': u'my_dataset_from_batch_anomaly_score_name'})
api.ok(dataset2)