コード例 #1
0
 def __init__(self, data, max_depth, min_samples_split, min_samples_leaf):
     """
     """
     self.provider = DataProvider(data)
     self.max_depth = max_depth
     self.min_samples_split = min_samples_split
     self.min_samples_leaf = min_samples_leaf
     self.random_features_nums = int(1+np.sqrt(self.provider.num_features))
     self.tree = None
コード例 #2
0
ファイル: manager.py プロジェクト: Semyazz/nova-stats
    def init_host(self):

        self.topic = HealthMonitorAPI.HEALTH_MONITOR_TOPIC
        self.ctx = context.get_admin_context()
        self.ctx.read_deleted = "no"
        self.dataProvider = DataProvider(self.RRD_ROOT_DIR, self.db, self.ctx)
        self.instances = self.db.instance_get_all_by_host(self.ctx, self.host)
        self.migration_algorithm = AntColonyAlgorithm()

        self._init_monitors_connections()
        self.STARTED = False

        self.scheduler_rpc_api = None
コード例 #3
0
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt

seed = 42
np.random.seed(seed)

# ------------ Data preprocessing ------------

# ------------ Training ------------
# 1. rs_train5_tolerances_10_no_headers.txt
# 2. rs_train5_exhaustive_no_headers.txt
# 3. rs_train3_tolerances_10_no_headers.txt

dp_train = DataProvider('data/rs_train3_tolerances_10_no_headers.txt')
x_train, y_train = dp_train.split()

dp_test = DataProvider('data/rs_test3_tolerances_10_no_headers.txt')
x_test, y_test = dp_test.split()


def perform_ova_single_nn():
    ann = ANN((100, ), 5000, 0.01, verbose=False)

    parameter_space = {
        'hidden_layer_sizes': [(50, ), (100, ), (150, ), (200, ), (50, 50),
                               (50, 100), (100, 50), (200, 50)],
        'learning_rate_init': [0.005, 0.01, 0.015, 0.02, 0.025],
        'max_iter': [2000, 3000, 4000, 5000]
    }
コード例 #4
0
    def load_data(self,
                  p_data_dir,
                  n_data_dir,
                  p_limits,
                  n_limits,
                  test_prob=0.2,
                  valid_prob=0.25):
        if self.K_cross is not None:
            valid_prob = 1.0 / self.K_cross

        if not self.train:
            test_prob = 1.0
        print("Loading data!!!")
        print("Positive data dir: ", p_data_dir)
        print("Negative data dir: ", n_data_dir)
        p_data = read_data(p_data_dir,
                           self.image_size,
                           normalize=True,
                           limit=p_limits,
                           additional=self.additional)
        n_data = read_data(n_data_dir,
                           self.image_size,
                           normalize=True,
                           limit=n_limits,
                           additional=self.additional)
        random.shuffle(p_data)
        random.shuffle(n_data)
        if self.train:
            # 分割出test集合
            test_p_data, p_data = self.split_data(p_data, test_prob)
            test_n_data, n_data = self.split_data(n_data, test_prob)
            self.test_p_DP = DataProvider(test_p_data)
            self.test_n_DP = DataProvider(test_n_data)

            # 训练数据自增操作
            #p_data += list(map(np.flipud, p_data))
            #n_data += list(map(np.flipud, n_data))
            #random.shuffle(p_data)
            #random.shuffle(n_data)

            valid_p_data, train_p_data = self.split_data(p_data, valid_prob)
            valid_n_data, train_n_data = self.split_data(n_data, valid_prob)
            self.train_p_DP = FeaturesDataProvider(
                train_p_data, self.haar_features_extractors)
            self.train_n_DP = FeaturesDataProvider(
                train_n_data, self.haar_features_extractors)
            self.valid_p_DP = FeaturesDataProvider(
                valid_p_data, self.haar_features_extractors)
            self.valid_n_DP = FeaturesDataProvider(
                valid_n_data, self.haar_features_extractors)

            print("train data size: ", "p:", self.train_p_DP.size(), "n:",
                  self.train_n_DP.size())
            print("valid data size: ", "p:", self.valid_p_DP.size(), "n:",
                  self.valid_n_DP.size())
            print("test data size: ", "p:", self.test_p_DP.size(), "n:",
                  self.test_n_DP.size())
        else:
            self.test_p_DP = DataProvider(p_data)
            self.test_n_DP = DataProvider(n_data)
            print("test data size: ", "p:", self.test_p_DP.size(), "n:",
                  self.test_n_DP.size())
コード例 #5
0
class Context(object):
    def __init__(self,
                 f=1.0,
                 d=1.0,
                 F_target=0.0,
                 p_data_dir="",
                 p_limits=-1,
                 n_data_dir="",
                 n_limits=-1,
                 image_size=(12, 48),
                 train=True,
                 haar=True,
                 additional=True,
                 K_cross=None):
        self.image_size = image_size
        self.train = train
        self.K_cross = K_cross
        self.additional = additional
        print("Is Train: ", train)

        self.haar_features_extractors = get_all_feature_extractor(
            self.image_size) if haar else []  #得到所有haar特征提取器
        self.load_data(p_data_dir, n_data_dir, p_limits, n_limits)  # 加载数据

        if train:
            self.f, self.d = f, d
            self.F, self.D = 1.0, 1.0
            self.F_target = F_target
            self.valid_predict = []
            self.train_predict = []
            self.remain_train_n_idx = list(range(
                self.train_n_DP.size()))  #保留下来的训练负样本
            self.remain_valid_p_idx = list(range(
                self.valid_p_DP.size()))  #保留下来的验证正样本
            self.remain_valid_n_idx = list(range(
                self.valid_n_DP.size()))  #保留下来的验证负样本

    def split_data(self, data, split_point=0.8):
        tmp = int(len(data) * split_point)
        return data[:tmp], data[tmp:]

    def load_data(self,
                  p_data_dir,
                  n_data_dir,
                  p_limits,
                  n_limits,
                  test_prob=0.2,
                  valid_prob=0.25):
        if self.K_cross is not None:
            valid_prob = 1.0 / self.K_cross

        if not self.train:
            test_prob = 1.0
        print("Loading data!!!")
        print("Positive data dir: ", p_data_dir)
        print("Negative data dir: ", n_data_dir)
        p_data = read_data(p_data_dir,
                           self.image_size,
                           normalize=True,
                           limit=p_limits,
                           additional=self.additional)
        n_data = read_data(n_data_dir,
                           self.image_size,
                           normalize=True,
                           limit=n_limits,
                           additional=self.additional)
        random.shuffle(p_data)
        random.shuffle(n_data)
        if self.train:
            # 分割出test集合
            test_p_data, p_data = self.split_data(p_data, test_prob)
            test_n_data, n_data = self.split_data(n_data, test_prob)
            self.test_p_DP = DataProvider(test_p_data)
            self.test_n_DP = DataProvider(test_n_data)

            # 训练数据自增操作
            #p_data += list(map(np.flipud, p_data))
            #n_data += list(map(np.flipud, n_data))
            #random.shuffle(p_data)
            #random.shuffle(n_data)

            valid_p_data, train_p_data = self.split_data(p_data, valid_prob)
            valid_n_data, train_n_data = self.split_data(n_data, valid_prob)
            self.train_p_DP = FeaturesDataProvider(
                train_p_data, self.haar_features_extractors)
            self.train_n_DP = FeaturesDataProvider(
                train_n_data, self.haar_features_extractors)
            self.valid_p_DP = FeaturesDataProvider(
                valid_p_data, self.haar_features_extractors)
            self.valid_n_DP = FeaturesDataProvider(
                valid_n_data, self.haar_features_extractors)

            print("train data size: ", "p:", self.train_p_DP.size(), "n:",
                  self.train_n_DP.size())
            print("valid data size: ", "p:", self.valid_p_DP.size(), "n:",
                  self.valid_n_DP.size())
            print("test data size: ", "p:", self.test_p_DP.size(), "n:",
                  self.test_n_DP.size())
        else:
            self.test_p_DP = DataProvider(p_data)
            self.test_n_DP = DataProvider(n_data)
            print("test data size: ", "p:", self.test_p_DP.size(), "n:",
                  self.test_n_DP.size())

    def get_train_data(self):
        train_p_features = self.train_p_DP.get_features()
        train_n_features = self.train_n_DP.get_features(
            data_idx=self.remain_train_n_idx)
        tmp_features = zip(train_p_features, train_n_features)
        with poolContext(processes=8) as pool:
            train_features = pool.map(np.concatenate, tmp_features)
        labels = np.array([1] * len(train_p_features[0]) +
                          [0] * len(train_n_features[0]))
        return train_features, labels

    def get_valid_data(self):
        valid_p_features = self.valid_p_DP.get_features(
            data_idx=self.remain_valid_p_idx)
        valid_n_features = self.valid_n_DP.get_features(
            data_idx=self.remain_valid_n_idx)
        tmp_features = zip(valid_p_features, valid_n_features)
        with poolContext(processes=8) as pool:
            valid_features = pool.map(np.concatenate, tmp_features)
        labels = np.array([1] * len(valid_p_features[0]) +
                          [0] * len(valid_n_features[0]))
        return valid_features, labels

    #def get_valid_data(self, haar_features_extractors, additional_features_idx):
    #p_features = self.valid_p_DP.get_features(haar_features_extractors, additional_features_idx, self.remain_valid_p_idx)
    #n_features = self.valid_n_DP.get_features(haar_features_extractors, additional_features_idx, self.remain_valid_n_idx)
    #tmp_features = zip(p_features, n_features)
    #with poolContext(processes=8) as pool:
    #    valid_features = pool.map(np.concatenate, tmp_features)
    #labels = np.array([1] * len(self.remain_valid_p_idx) + [0] * len(self.remain_valid_n_idx))
    #return valid_features, labels

    def get_test_data(self):
        (p_images, p_additional_features
         ) = self.test_p_DP.get_data_and_additional_features()
        (n_images, n_additional_features
         ) = self.test_n_DP.get_data_and_additional_features()

        labels = np.array([1] * len(p_images) + [0] * len(n_images))
        return (p_images + n_images,
                p_additional_features + n_additional_features), labels

    def update_data(self):
        # 根据预测值更新验证集合,标记为0的不能进入级联下层的Adaboost
        remain_valid_p_num = len(self.remain_valid_p_idx)
        remain_valid_n_num = len(self.remain_valid_n_idx)
        assert remain_valid_p_num + remain_valid_n_num == len(
            self.valid_predict)

        for idx in range(remain_valid_p_num)[::-1]:
            if self.valid_predict[idx] < EPS:
                del self.remain_valid_p_idx[idx]
        for idx in range(remain_valid_n_num)[::-1]:
            if self.valid_predict[remain_valid_p_num + idx] < EPS:
                del self.remain_valid_n_idx[idx]

        # 更新train集合,负样本只保留fp样本
        remain_train_p_num = self.train_p_DP.size()
        remain_train_n_num = len(self.remain_train_n_idx)
        assert remain_train_p_num + remain_train_n_num == len(
            self.train_predict)

        for idx in range(remain_train_n_num)[::-1]:
            if self.train_predict[remain_train_p_num + idx] < EPS:
                del self.remain_train_n_idx[idx]

    def k_cross(self):  # 未完成
        self.F, self.D = 1.0, 1.0
        self.valid_predict = []
        self.train_predict = []
        self.remain_train_n_idx = list(range(
            self.train_n_DP.size()))  #保留下来的验证负样本
        self.remain_valid_p_idx = list(range(
            self.valid_p_DP.size()))  #保留下来的验证正样本
        self.remain_valid_n_idx = list(range(
            self.valid_n_DP.size()))  #保留下来的验证负样本

        k_p_size = self.valid_p_DP.size()
        k_n_size = self.valid_n_DP.size()

        p_data1, p_data2 = self.train_p_DP.split_data(k_p_size)
        n_data1, n_data2 = self.train_n_DP.split_data(k_n_size)

        tmp_p_data = zip(p_data2, self.valid_p_DP.features)
        with poolContext(processes=8) as pool:
            self.train_p_DP.features = pool.map(np.concatenate, tmp_p_data)
        self.train_p_DP._size = len(self.train_p_DP.features[0])
        self.valid_p_DP.features = p_data1
        self.valid_p_DP._size = len(self.valid_p_DP.features[0])

        tmp_n_data = zip(n_data2, self.valid_n_DP.features)
        with poolContext(processes=8) as pool:
            self.train_n_DP.features = pool.map(np.concatenate, tmp_n_data)
        self.train_n_DP._size = len(self.train_n_DP.features[0])
        self.valid_n_DP.features = n_data1
        self.valid_n_DP._size = len(self.valid_n_DP.features[0])
コード例 #6
0
from PersistenFetcher import PersistentFetcher
from dataProvider import DataProvider
from networkGenerator import generate
from math import isnan as nan
import math
print("lets go")
fetcher = PersistentFetcher(
    "/home/ps/PycharmProjects/evo-feature-engineer/data")
dataProvider = DataProvider(fetcher)
data = dataProvider.getData(None, None)
net = generate(data, 9)
lRate, oscillations, lwp, mse = 0.001, 0, True, 101
iters = 0
while (mse > 1 and iters < 10):
    print("ITERATION: ", iters)
    sum_e = 0
    nans = 0
    for d in data:
        lRate, oscillations, lwp, e = net.interation(d.x, d.y, lRate,
                                                     oscillations, lwp)
        if (nan(e)):
            nans += 1
        else:
            sum_e += e
    iters += 1
    valids = len(data) - nans
    mse = sum_e / (valids) if valids > 0 else -1
    print("NaNs: ", nans)
    print("mean absolute error: ", math.sqrt(mse))

mean = sum([d.y for d in data]) / len(data)
コード例 #7
0
ファイル: manager.py プロジェクト: Semyazz/nova-stats
class HealthMonitorManager(manager.Manager):
    BASE_RPC_API_VERSION = "1.0"
    RPC_API_VERSION = "1.0"

    RRD_ROOT_DIR = "/home/stack/ganglia"

    #    def __init__(self, topic=None):
    #        print "HelloMgr"
    ##        self.topic = topic

    timestamp = None
    stabilizationTimeDelta = datetime.timedelta(minutes=20)
    lock = threading.RLock()
    lock2 = threading.RLock()

    # RPC API Implementation -------------------------------------------------------------------------------------------
    def raise_alert(self, ctx=None, alert=None):
        LOG.info(alert)

        with self.lock:
            if self.STARTED:
                # Drop alert, algorithm is running.
                # TODO: Maybe alerts should be added to cyclic buffer?
                return
            else:

                # Do not check alerts because it's too early
                if (
                    self.timestamp is not None
                    and (self.timestamp + MigrationParams.STABILIZATION_TIME_DELTA) > datetime.datetime.now()
                ):
                    LOG.info("It's too early to run algorithm. Waiting for stabilization.")
                    return

                self.STARTED = self.dataProvider.preProcessAlert(alert)
        try:

            if self.dataProvider.preProcessAlert(alert):
                if not self._is_migrating():
                    self.prepare_resource_allocation_algorithm_input(alert)
            pass
        except Exception as err:
            print "exception %s" % err
            LOG.error(err)

        with self.lock:
            self.STARTED = False

    # -------------------------------------------------------------------------------------------------------------------

    def _get_scheduler_rpc_api(self):
        if not self.scheduler_rpc_api:
            self._init_scheduler()

        return self.scheduler_rpc_api

    def _is_migrating(self):
        ctx = context.get_admin_context()

        instances = self.db.instance_get_all(ctx)

        for instance in instances:
            if instance.vm_state == "migrating":
                LOG.error("Migration in process. Abort algorithm execution")
                return True

        return False
        # scheduler = self._get_scheduler_rpc_api()

    # Manager inherited ------------------------------------------------------------------------------------------------
    def init_host(self):

        self.topic = HealthMonitorAPI.HEALTH_MONITOR_TOPIC
        self.ctx = context.get_admin_context()
        self.ctx.read_deleted = "no"
        self.dataProvider = DataProvider(self.RRD_ROOT_DIR, self.db, self.ctx)
        self.instances = self.db.instance_get_all_by_host(self.ctx, self.host)
        self.migration_algorithm = AntColonyAlgorithm()

        self._init_monitors_connections()
        self.STARTED = False

        self.scheduler_rpc_api = None

    #        self._test_rpc_call()

    def periodic_tasks(self, context, raise_on_error=False):
        pass

    # -------------------------------------------------------------------------------------------------------------------

    class MigrationSettings(object):
        block_migration = (False,)
        disk_over_commit = False

        def __init__(self, **kwargs):
            self.block_migration = False
            self.disk_over_commit = False
            for key in kwargs:
                setattr(self, key, kwargs[key])

    migration_settings = MigrationSettings()

    #    migration_s = namedtuple("", "block_migration disk_over_commit")
    #   http://stackoverflow.com/questions/11708799/any-way-to-initialize-attributes-properties-during-class-creation-in-python

    def _init_scheduler(self):

        self.scheduler_rpc_api = SchedulerAPI()

        if self.scheduler_rpc_api is None:
            LOG.error("Scheduler == None")
            raise Exception("Error during execution scheduler")

    def _init_monitors_connections(self):

        self.conn = rpc.create_connection(new=True)

        LOG.debug(_("Creating Consumer connection for Service %s") % self.topic)

        rpc_dispatcher = self.create_rpc_dispatcher()

        # According to documentation fanout=True => broadcast to all services.
        self.conn.create_consumer(self.topic, self, fanout=True)

        # Consume from all consumers in a thread
        self.conn.consume_in_thread()

    def prepare_resource_allocation_algorithm_input(self, alert):
        """
            Hostname is virtual machine's hostname (name)
        :return:
        """

        isValid = self.dataProvider.getData()

        if not isValid:
            LOG.error("skipping this round")
            return

        hosts = self.dataProvider.hosts.values()
        virtualMachines = []
        now = datetime.datetime.now()

        for host in hosts:
            LOG.error("stat [%s] host %s\t %s", int(time.mktime(now.timetuple())), host.Hostname, host.getMetrics())
            virtualMachines.extend(host._vms)
            for vm in host._vms:
                LOG.error(
                    "stat [%s]vm %s\t %s", int(time.mktime(now.timetuple())), vm.InstanceName, vm.getMetrics(host)
                )

        InputData = namedtuple("InputData", "Hosts VirtualMachines Alert")
        input_data_set = InputData(Hosts=hosts, VirtualMachines=virtualMachines, Alert=alert)

        # Count used hosts and how many boundaries are violated
        usedHostsBeforeMigration = sum([host.getIsOn() for host in hosts])
        # Dictionary <host, tuple(upperBoundsViolations, lowerBoundsViolations)>
        violationsDictionaryBeforeMigration = HealthMonitorManager.count_boundaries_violations(hosts)

        # todo if alert mem
        self.dataProvider.updateWeights()

        LOG.error("Start Algorithm")
        try:
            migrationPlans = self.migration_algorithm.execute_algorithm(input_data_set)
        except Exception as exc:
            LOG.error("OOOOOPS %s" % exc)
        LOG.error("Stop Algorithm")

        assert migrationPlans is not None, "Migration plans is none"
        plan, migrations_counter = self.choose_migration_plan(migrationPlans, virtualMachines)

        # Count used hosts and how many boundaries are violated
        usedHostsAfterMigration = sum([host.getIsOn() for host in hosts])
        # Dictionary <host, tuple(upperBoundsViolations, lowerBoundsViolations)>
        violationsDictionaryAfterMigration = HealthMonitorManager.count_boundaries_violations(hosts)

        # Zysk na naruszonych granicach SLA.
        profitUpper, profitLower = HealthMonitorManager.boundaries_profit_gained(
            violationsDictionaryBeforeMigration, violationsDictionaryAfterMigration
        )

        LOG.error("stat [%s] Migration count %s", int(time.mktime(now.timetuple())), migrations_counter)
        LOG.error(
            "stat [%s] Hosts used before %s, after %s",
            int(time.mktime(now.timetuple())),
            usedHostsBeforeMigration,
            usedHostsAfterMigration,
        )

        if alert["severity"] == 2 and usedHostsAfterMigration >= usedHostsBeforeMigration:
            # todo make alert['severity'] more human readable

            LOG.error("There is no profit from migration - skip")
            return

        self.dataProvider.saveWeights()

        for mi in plan:
            LOG.error("stat [%s] migration %s@%s", int(time.mktime(now.timetuple())), mi.instance_id, mi.hostname)

        if migrations_counter != 0:
            self.execute_plan(plan)

            # Timestamp
            self.timestamp = datetime.datetime.now()

        pass

    @staticmethod
    def count_boundaries_violations(hosts):
        def count_true(dictionary):
            assert isinstance(dictionary, dict)

            def raise_exception_missing_key(key):
                if not dictionary.has_key("C"):
                    LOG.error("Missing C key")
                    raise Exception("Missing C key")

            raise_exception_missing_key("C")
            raise_exception_missing_key("N")
            raise_exception_missing_key("M")

            true_counter = 0

            if dictionary["C"]:
                true_counter += 1

            if dictionary["N"]:
                true_counter += 1

            if dictionary["M"]:
                true_counter += 1

            return true_counter

        violations = {}

        for host in hosts:
            assert isinstance(host, Host)
            upperBoundsWithRaise = count_true(host.getUpperBounds())
            upperBoundsViolations = sum(int(violation) for violation in host.getUpperBounds().values())

            assert upperBoundsWithRaise == upperBoundsViolations, "Upperbounds violations count error"

            lowerBoundsWithRaise = count_true(host.getLowerBounds())
            lowerBoundsViolations = sum(int(violation) for violation in host.getLowerBounds().values())

            assert lowerBoundsWithRaise == lowerBoundsViolations, "Lowerbounds violations count error"

            violations[host] = (upperBoundsViolations, lowerBoundsViolations)

        return violations

    @staticmethod
    def boundaries_profit_gained(violationsBefore, violationsAfter):

        assert isinstance(violationsBefore, dict)
        assert isinstance(violationsAfter, dict)
        assert len(violationsBefore.keys()) == len(violationsAfter.keys())

        def sum_list_of_tuples(tuples):

            sumX, sumY = 0, 0
            for x, y in tuples:
                sumX += x
                sumY += y

            return sumX, sumY

        def profitFunctionSumWholeViolations():
            """
                Prosta funkcaj zliczająca ilość naruszeń na górnych granicach i dolnych granicach w sumie w całym środowisku

                Jeśli suma naruszeń górnych granic jest większa niż
            """

            upperViolatedBefore, lowerViolatedBefore = sum_list_of_tuples(violationsBefore.values())
            upperViolatedAfter, lowerViolatedAfter = sum_list_of_tuples(violationsAfter.values())

            # ProfitUpper - int
            # profitUpper==0 : no difference
            # profitUpper <0 : Not good
            # profitUpper >0 : Great we have less violations
            profitUpper = upperViolatedBefore - upperViolatedAfter

            # ProfitLower - int
            # profitLower==0 : no difference
            # profitLower <0 : Not good
            # profitLower >0 : Great we have less violations
            profitLower = lowerViolatedBefore - lowerViolatedAfter

            return profitUpper, profitLower

        profitUpper, profitLower = profitFunctionSumWholeViolations()

        return profitUpper, profitLower

    def choose_migration_plan(self, migrationPlans, virtualMachines):

        minValue = len(virtualMachines)
        plan = None

        if migrationPlans:
            for current in migrationPlans:

                migrationCount = 0

                for vm in virtualMachines:
                    migrationItem = find(lambda migration_item: migration_item.instance_id == vm.InstanceName, current)

                    if vm.Hostname != migrationItem.hostname:
                        migrationCount += 1

                    LOG.error("mg count %s", migrationCount)

                if current is not None and migrationCount < minValue:
                    plan = current
                    minValue = migrationCount

        else:
            LOG.info("There are no migration plans")
            return (None, None)

        selfMigrations = []
        migrationCount = 0

        #        print "vms"
        #        for vm in virtualMachines:
        #            print vm.InstanceName
        #
        #        print "Migration Items"
        #        for item in plan:
        #            print "%s@%s" % (item.instance_id, item.hostname)
        for vm in virtualMachines:

            assert plan is not None, "Plan is none"
            assert vm is not None, "VM is None"
            migrationItem = find(lambda migration_item: migration_item.instance_id == vm.InstanceName, plan)
            assert migrationItem is not None, "Migration item is None"

            if vm.Hostname != migrationItem.hostname:
                migrationCount += 1
                self.updateHostVmConn(vm, migrationItem)
            else:
                selfMigrations.append(migrationItem)

        for mi in selfMigrations:
            plan.remove(mi)

        return plan, migrationCount

    def updateHostVmConn(self, vm, migrationItem):

        assert self.dataProvider.hosts.has_key(
            migrationItem.hostname
        ), "data provider has no host specified in migration item"
        assert self.dataProvider.hosts.has_key(vm.Hostname), "data provider has no host specified in vm"

        hostFrom = self.dataProvider.hosts[vm.Hostname]
        hosTo = self.dataProvider.hosts[migrationItem.hostname]

        hostFrom._vms.remove(vm)
        hosTo._vms.append(vm)

    def execute_plan(self, plan):
        """
        Executes migration plan. Migrate VMs to given nodes.
        :param migrationPlans: list
        :return:
        """

        try:
            if not self.scheduler_rpc_api:
                self._init_scheduler()

            #            assert isinstance(migrationPlans, list)
            #            if migrationPlans:
            #                plan = migrationPlans[0]
            #            else:
            #                LOG.info("There is no migration plans")
            #                return

            ctx = context.get_admin_context()
            instances = self.db.instance_get_all(self.ctx)

            for migrationItem in plan:
                assert isinstance(migrationItem, MigrationItem)
                # if 0:self.db=db_api # Stupid hack for code completion in ide

                instance = self._get_instance(migrationItem.instance_id, instances)
                assert instance is not None

                if instance["host"] == migrationItem.hostname:
                    continue

                migration_status = self.scheduler_rpc_api.live_migration(
                    ctxt=ctx,
                    block_migration=self.migration_settings.block_migration,
                    disk_over_commit=self.migration_settings.disk_over_commit,
                    instance=instance,
                    dest=migrationItem.hostname,
                )

        except:
            raise

    def _get_instance(self, name, instances):
        for instance in instances:
            if instance.name == name:
                return instance

    def collect_data(self, hostname, vm_name, resource):
        """
            Collect historical data about resource utilization for given node (hostname/virtual machine).

            CUrrently it's implemented to retrieve data from RRD's files.
        :return:
        """

        # node_topic = '%s.%s' % (HealthMonitorNodeAPI.HEALTH_MONITOR_NODE_TOPIC, hostname)

        if self.local_storage is None:
            self.local_storage = RrdWrapper(self.RRD_ROOT_DIR)

        node = "%s.%s" % (hostname, vm_name)

        endTime = datetime.datetime.now()
        startTime = endTime - datetime.timedelta(hours=1)  # TODO: Move to configuration file customizable timedelta

        self.local_storage.query(startTime, endTime, resource, node)

        return None

    def collect_data_remote(self, hostname, vm_name, resource):
        """
            Collect data from network (AMQP). Not Implemented
        :param hostname:
        :param vm_name:
        :param resource:
        :return:
        """
        raise NotImplemented

        health_rpc_api = HealthMonitorNodeAPI(hostname)

        if health_rpc_api is None:
            raise Exception("Unable to get health_monitor_node RPC API object")

        message = {"resource": resource, "vm_name": vm_name}

        return health_rpc_api.collect_recent_stats(self.ctx, message)

    def _test_rpc_call(self):

        health_monitor_node_rpc_api = HealthMonitorNodeAPI(self.host)
        message = {"resource": "RAM", "vm_name": "SEMY"}

        result = health_monitor_node_rpc_api.collect_recent_stats(self.ctx, message)
        LOG.info("Received: %s" % result)

    def test_migration(self):
        """
        Executes migration plan. Migrate VMs to given nodes.
        :param migrationPlans: list
        :return:
        """

        instance_uuid = "3974a5b5-39d4-4bcf-a12d-a1a17bdf2341"
        hostname = "lab-os-1"

        if not self.scheduler_rpc_api:
            self._init_scheduler()

        ctx = context.get_admin_context()

        if 0:
            self.db = db_api  # Stupid hack for code completion in ide

        #        self.db.instance_get_by_uuid(self.ctx, instance_uuid)
        instances = self.db.instance_get_all(ctx)

        selected = None

        assert isinstance(instance, nova.db.sqlalchemy.models.Instance)

        #        migration_status = self.scheduler_rpc_api.live_migration(ctxt=ctx,
        #                                                                 block_migration=self.migration_settings.block_migration,
        #                                                                 disk_over_commit=self.migration_settings.disk_over_commit,
        #                                                                 instance=instance,
        #                                                                 dest=hostname)

        LOG.error("Migration status %s" % migration_status)