Exemple #1
0
import sys

from flask import Flask

from anomaly_detection import log
from anomaly_detection.api.middleware.auth import NoAuthMiddleWare
from anomaly_detection.api import v1beta
from anomaly_detection.api.version import version
from anomaly_detection.utils import config as cfg
from anomaly_detection.common import options # load configuration, don't remove

CONF = cfg.CONF

api_opts = [
    cfg.StrOpt('listen_ip',
               default='0.0.0.0',
               help='API server listen ip'),
    cfg.StrOpt('listen_port',
               default='8085',
               help='API server listen ip'),
    cfg.StrOpt('dbscan_figure_style',
               default='blue_red',
               choices=['blue_red', 'core_border_spectral'],
               help='DBSCAN figure output style')

    ]

CONF.register_opts(api_opts, "apiserver")


class ServerManager:
Exemple #2
0
import copy
import json

import requests
from keystoneauth1 import identity
from keystoneauth1 import session as ks

from anomaly_detection import log
from anomaly_detection.utils import config as cfg

LOG = log.getLogger(__name__)
CONF = cfg.CONF

auth_opts = [
    cfg.StrOpt('auth_url',
               default='http://127.0.0.1/identity',
               help='Authentication URL'),
    cfg.StrOpt('auth_type',
               default="password",
               help='Authentication type'),
    cfg.StrOpt('username',
               default="admin",
               help='User name'),
    cfg.StrOpt('password',
               default="opensds@123",
               help='User password'),
    cfg.StrOpt('project_name',
               default='admin',
               help='Project name'),
    cfg.StrOpt('project_domain_name',
               default='Default',
Exemple #3
0
    'minutes': 'm',
    'hours': 'h',
    'days': 'd',
    'weekday': 'w',
    'midnight': 'midnight'
}
_LOG_FILE = "anomaly_detection.log"
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"

logging_opts = [
    cfg.BoolOpt('debug',
                default=False,
                help='If set to true, the logging level will be set to '
                'DEBUG instead of the default INFO level.'),
    cfg.StrOpt('log_date_format',
               default=_DEFAULT_LOG_DATE_FORMAT,
               help='Defines the format string for %%(asctime)s in log '
               'records. Default: %(default)s . '),
    cfg.StrOpt('log_file',
               help='(Optional) Name of log file to send logging output to. '
               'If no default is set, logging will go to stderr as '
               'defined by use_stderr. '),
    cfg.StrOpt('log_dir',
               help='(Optional) The base directory used for relative log_file '
               ' paths. '),
    cfg.StrOpt(
        'log_rotate_interval_type',
        choices=['Seconds', 'Minutes', 'Hours', 'Days', 'Weekday', 'Midnight'],
        ignore_case=True,
        default='days',
        help='Rotation interval type. The time of the last file '
        'change (or the time when the service was started) is '
Exemple #4
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading

from anomaly_detection.utils import config as cfg
from anomaly_detection import log
from anomaly_detection import utils

LOG = log.getLogger(__name__)
CONF = cfg.CONF

db_opts = [
    cfg.StrOpt('backend',
               default='sqlalchemy',
               help='The back end to use for the database.'),
    cfg.StrOpt('connection',
               help='The SQLAlchemy connection string to use to connect to '
               'the database.',
               secret=True)
]
CONF.register_opts(db_opts, group='database')


class DBAPI(object):
    """Initialize the chosen DB API backend.
    """
    def __init__(self, backend_name, backend_mapping=None, lazy=False):

        self._backend = None
import json

from anomaly_detection import log
from anomaly_detection.context import get_admin_context
from anomaly_detection.db import base
from anomaly_detection.exception import LoopingCallDone
from anomaly_detection.ml import csv
from anomaly_detection.utils import config as cfg
from kafka import KafkaConsumer

LOG = log.getLogger(__name__)
CONF = cfg.CONF

data_parser_opts = [
    cfg.StrOpt('csv_file_name',
               default='performance.csv',
               help='Data receiver source file name'),
    cfg.StrOpt('kafka_bootstrap_servers',
               default='localhost:9092',
               help='kafka bootstrap server'),
    cfg.StrOpt('kafka_topic', default='metrics', help='kafka topic'),
    cfg.IntOpt('kafka_retry_num', default=3, help='kafka retry num')
]

CONF.register_opts(data_parser_opts, "data_parser")


class LoopingCall(object):
    def __init__(self, interval=60, raise_on_error=False):
        self._interval = interval
        self._raise_on_error = raise_on_error
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys

from anomaly_detection import log
from anomaly_detection.data_parser import manager
from anomaly_detection.utils import config as cfg
# need register global_opts
from anomaly_detection.common import options

CONF = cfg.CONF

data_parser_opts = [
    cfg.StrOpt('receiver_name', default='csv', help='Data receiver name')
]

CONF.register_opts(data_parser_opts, "data_parser")


def main():
    CONF(sys.argv[1:])
    log.setup(CONF, "anomaly_detection")
    mgr = manager.Manager(CONF.data_parser.receiver_name)
    mgr.run()


if __name__ == '__main__':
    sys.exit(main())
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io

from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas

from anomaly_detection.db.base import Base
from anomaly_detection.utils import import_object
from anomaly_detection.utils import config as cfg

CONF = cfg.CONF

training_opts = [
    cfg.StrOpt('dataset_source_type',
               choices=['csv', 'database'],
               default='csv',
               help='Training dataset source type'),
    cfg.StrOpt('dataset_csv_file_name',
               default='performance.csv',
               help='Training dataset csv file name'),
    cfg.IntOpt('dataset_number',
               default=10000,
               help='Dataset number which is used to training')
]

CONF.register_opts(training_opts, "training")


def print_figure(fig, fmt='png'):
    output = io.BytesIO()