Esempio n. 1
0
def worker_setup_node():
    while True:
        node = node_q.get()
        # copy ivs pkg to node
        Helper.copy_pkg_scripts_to_remote(node)

        # deploy node
        Helper.safe_print("Start to deploy %(hostname)s\n" %
                          {'hostname': node.hostname})
        if node.role == const.ROLE_NEUTRON_SERVER:
            Helper.run_command_on_remote(node, (
                r'''/bin/bash %(dst_dir)s/%(hostname)s_ospurge.sh >> %(log)s 2>&1'''
                % {
                    'dst_dir': node.dst_dir,
                    'hostname': node.hostname,
                    'log': node.log
                }))
        Helper.run_command_on_remote(
            node,
            (r'''/bin/bash %(dst_dir)s/%(hostname)s.sh >> %(log)s 2>&1''' % {
                'dst_dir': node.dst_dir,
                'hostname': node.hostname,
                'log': node.log
            }))
        Helper.safe_print("Finish deploying %(hostname)s\n" %
                          {'hostname': node.hostname})
        node_q.task_done()
Esempio n. 2
0
    def _create_reachability_graph(self):
        queue = [(0, 0, self.transitions)]
        states_list = [(0, 0, self._get_network_state(self.transitions), {}, None)]
        stop_condition = 20
        state_id = 0
        while queue and state_id < stop_condition:
            state = queue.pop(0)
            parent_state_id = state[0]
            transitions = state[2]

            transitions_ids_to_do = []
            for transition in transitions:
                if transition.is_doable():
                    priority_queue = Helper.get_competitive_transitions_priority_queue(transitions, transition)
                    if priority_queue.get() is transition:
                        transitions_ids_to_do.append(transition.id)

            for transition_id in transitions_ids_to_do:
                new_transitions_state = deepcopy(transitions)
                transition = Helper.find_transition_by_id(new_transitions_state, transition_id)
                transition.run_transition()
                new_network_state = self._get_network_state(new_transitions_state)
                found_state = self._find_state_based_on_network_state(states_list, new_network_state)

                if found_state is not None:
                    id = found_state[0]
                    parent_network_state = self._find_state_by_id(states_list, parent_state_id)
                    parent_network_state[3][id] = transition.id
                else:
                    state_id += 1
                    states_list.append((state_id, parent_state_id, new_network_state, {}, transition.id))
                    queue.append((state_id, parent_state_id, new_transitions_state))

        return states_list
    def split_only_sequential(self, URM, URM_df):

        helper = Helper()

        sequential_playlists = helper.get_target_playlists_list()[:5000]
        selected_playlists = np.array([])

        self.target_playlists = sequential_playlists

        grouped = URM_df.groupby(
            'playlist_id', as_index=True).apply(lambda x: list(x['track_id']))

        relevant_items = defaultdict(list)
        for playlist_id in sequential_playlists:

            # Tracks = lista delle tracks prese dalla URM
            tracks = np.array(grouped[playlist_id])
            to_be_removed = int(len(tracks) * 0.2)

            # Torna le #to_be_removed tracks ordinate sequenzialmente. e le toglie dalla lista delle tracks
            to_be_removed_tracks = helper.get_sorted_tracks_in_playlist(
                playlist_id)[-to_be_removed:]
            for track in to_be_removed_tracks:
                relevant_items[playlist_id].append(track)
                tracks = np.delete(tracks, np.where(tracks == track))
            grouped[playlist_id] = tracks

        all_tracks = self.tracks_df["track_id"].unique()
        matrix = MultiLabelBinarizer(classes=all_tracks,
                                     sparse_output=True).fit_transform(grouped)
        self.URM_train = matrix.tocsr()
        self.URM_train = self.URM_train.astype(np.float64)
        self.dict_test = relevant_items
    def split_randomic_exactly_last(self, URM, URM_df):
        # splitting URM in test set e train set
        selected_playlists = np.array([])

        helper = Helper()

        self.target_playlists = helper.get_target_playlists_list()[5000:]
        selected_playlists = self.target_playlists
        grouped = URM_df.groupby(
            'playlist_id', as_index=True).apply(lambda x: list(x['track_id']))

        relevant_items = defaultdict(list)
        count = 0
        for playlist_id in selected_playlists:
            tracks = np.array(grouped[playlist_id])
            to_be_removed = int(len(tracks) * 0.2)
            for i in range(to_be_removed):
                index = randint(0, len(tracks) - 1)
                removed_track = tracks[index]
                relevant_items[playlist_id].append(removed_track)
                tracks = np.delete(tracks, index)
            grouped[playlist_id] = tracks
            count += 1

        all_tracks = self.tracks_df["track_id"].unique()

        matrix = MultiLabelBinarizer(classes=all_tracks,
                                     sparse_output=True).fit_transform(grouped)
        self.URM_train = matrix.tocsr()
        self.URM_train = self.URM_train.astype(np.float64)
        self.dict_test = relevant_items
Esempio n. 5
0
    def recommend(self, playlist_id, at=10):
        playlist_id = int(playlist_id)
        helper = Helper()

        ### DUE TO TIME CONSTRAINT THE CODE STRUCTURE HERE IS REDUNTANT
        ### TODO exploit inheritance to reduce code duplications and simple extract ratings, combine them, simply by iterate over a list of recommenders

        ### COMMON CODE ###
        self.hybrid_ratings = None  #BE CAREFUL, MAGIC INSIDE :)

        ### COMBINE RATINGS IN DIFFERENT WAYS (seq, random short, random long)
        if (helper.is_sequential(playlist_id)):
            self.userCF_ratings = self.userCF_sequential.get_expected_ratings(
                playlist_id)
            self.itemCF_ratings = self.itemCF_sequential.get_expected_ratings(
                playlist_id)
            self.cbf_ratings = self.cbf_sequential.get_expected_ratings(
                playlist_id)
            self.slim_elastic_ratings = self.slim_elastic_sequential.get_expected_ratings(
                playlist_id)
            # self.svd_icm_ratings = self.svd_icm_sequential.get_expected_ratings(playlist_id)
            self.ALS_ratings = self.ALS_sequential.get_expected_ratings(
                playlist_id)
            self.slim_ratings = self.slim_sequential.get_expected_ratings(
                playlist_id)
            w_right = self.w_seq
        else:
            self.userCF_ratings = self.userCF.get_expected_ratings(playlist_id)
            self.itemCF_ratings = self.itemCF.get_expected_ratings(playlist_id)
            self.cbf_ratings = self.cbf.get_expected_ratings(playlist_id)
            self.slim_elastic_ratings = self.slim_elastic.get_expected_ratings(
                playlist_id)
            # self.svd_icm_ratings = self.svd_icm.get_expected_ratings(playlist_id)
            self.ALS_ratings = self.ALS.get_expected_ratings(playlist_id)
            self.slim_ratings = self.slim_random.get_expected_ratings(
                playlist_id)
            if len(self.URM[playlist_id].indices) > 10:
                w_right = self.w_long
            else:
                w_right = self.w_short

        self.hybrid_ratings = self.userCF_ratings * w_right["user_cf"]
        self.hybrid_ratings += self.itemCF_ratings * w_right["item_cf"]
        self.hybrid_ratings += self.cbf_ratings * w_right["cbf"]
        self.hybrid_ratings += self.slim_ratings * w_right["slim"]
        # self.hybrid_ratings += self.svd_icm_ratings * w_right["svd_icm"]
        self.hybrid_ratings += self.ALS_ratings * w_right["als"]
        self.hybrid_ratings += self.slim_elastic_ratings * w_right["elastic"]

        recommended_items = np.flip(np.argsort(self.hybrid_ratings), 0)

        # REMOVING SEEN
        unseen_items_mask = np.in1d(recommended_items,
                                    self.URM[playlist_id].indices,
                                    assume_unique=True,
                                    invert=True)
        recommended_items = recommended_items[unseen_items_mask]

        return recommended_items[0:at]
Esempio n. 6
0
File: bosi.py Progetto: xinwu/bosi-1
def certify_node_setup(q):
    while True:
        node = q.get()
        if node.certificate_dir:
            if not os.path.isfile("%s/ca.cert" % node.certificate_dir):
                safe_print("Missing ca.cert in %s\n" % node.certificate_dir)
                break
        Helper.certify_node(node)
        q.task_done()
Esempio n. 7
0
def certify_node_setup(q):
    while True:
        node = q.get()
        if node.certificate_dir:
            if not os.path.isfile("%s/ca.cert" % node.certificate_dir):
                safe_print("Missing ca.cert in %s\n" % node.certificate_dir)
                break
        Helper.certify_node(node)
        q.task_done()
Esempio n. 8
0
def setup_sriov(node_dic):
    for hostname, node in node_dic.iteritems():
        if node.skip:
            safe_print("skip node %(fqdn)s due to %(error)s\n" % {
                'fqdn': node.fqdn,
                'error': node.error
            })
            continue
        if node.tag != node.env_tag:
            safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                       {'fqdn': node.fqdn})
            continue
        if node.role != const.ROLE_SRIOV:
            safe_print("Skipping node %(hostname)s because deployment mode is "
                       "SRIOV and role set for node is not SRIOV. It is "
                       "%(role)s\n" % {
                           'hostname': hostname,
                           'role': node.role
                       })
            continue
        if node.os != const.REDHAT:
            safe_print("Skipping node %(hostname)s because deployment mode is "
                       "SRIOV and non REDHAT OS is not supported. OS set for "
                       "node is %(os)s\n" % {
                           'hostname': hostname,
                           'os': node.os
                       })
            continue

        # all okay, generate scripts for node
        Helper.generate_sriov_scripts_for_redhat(node)
        node_q.put(node)

    with open(const.LOG_FILE, "a") as log_file:
        for hostname, node in node_dic.iteritems():
            log_file.write(str(node))

    # Use multiple threads to setup nodes
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_upgrade_or_sriov_node,
                             args=(node_q, ))
        t.daemon = True
        t.start()
    node_q.join()

    sorted_time_dict = OrderedDict(
        sorted(time_dict.items(), key=lambda x: x[1]))
    for fqdn, h_time in sorted_time_dict.items():
        safe_print("node: %(fqdn)s, time: %(time).2f\n" % {
            'fqdn': fqdn,
            'time': h_time
        })

    safe_print("Big Cloud Fabric deployment finished! "
               "Check %(log)s on each node for details.\n" %
               {'log': const.LOG_FILE})
    def _create_transitions(self, transitions):
        for transition in transitions:
            links_in = []
            links_out = []
            for conn_id in transition["links_in_ids"]:
                links_in.append(Helper.find_link_by_id(self.links, conn_id))
            for conn_id in transition["links_out_ids"]:
                links_out.append(Helper.find_link_by_id(self.links, conn_id))

            transition_obj = Transition(links_in, links_out, transition["priority"], transition["id"], transition["name"])
            self.transitions.append(transition_obj)
Esempio n. 10
0
    def split_cluster_randomic_only_last(self, URM, URM_df):
        # splitting URM in test set e train set
        segment = 1
        # splitting URM in test set e train set
        selected_playlists = np.array([])
        available_playlists = np.arange(URM.shape[0])
        target_analyzer = TargetAnalyzer()

        # Gets distribution of only last 5000 playlists
        dist = target_analyzer.get_distribution_array_only_last(segment)

        helper = Helper()
        target_playlists = helper.get_target_playlists_list(
        )[:5000]  # WILL REMOVE THEM

        print("Clustering with segment = " + str(segment))
        for key in tqdm(range(len(dist))):
            while dist[key] != 0:
                random_index = randint(0, len(available_playlists) - 1)
                playlist_id = available_playlists[random_index]
                target_segment = int(0.8 * len(URM[playlist_id].data))
                if target_segment == key and playlist_id not in target_playlists:
                    available_playlists = np.delete(
                        available_playlists,
                        np.where(available_playlists == playlist_id))
                    selected_playlists = np.append(selected_playlists,
                                                   playlist_id)
                    dist[key] -= 1

        self.target_playlists = selected_playlists.astype(int)
        grouped = URM_df.groupby(
            'playlist_id', as_index=True).apply(lambda x: list(x['track_id']))

        relevant_items = defaultdict(list)

        for playlist_id in selected_playlists:
            # Tracks = lista delle tracks prese dalla URM
            tracks = np.array(grouped[playlist_id])

            to_be_removed = int(len(tracks) * 0.2)
            for i in range(to_be_removed):
                index = randint(0, len(tracks) - 1)
                removed_track = tracks[index]
                relevant_items[playlist_id].append(removed_track)
                tracks = np.delete(tracks, index)
            grouped[playlist_id] = tracks

        all_tracks = self.tracks_df["track_id"].unique()
        matrix = MultiLabelBinarizer(classes=all_tracks,
                                     sparse_output=True).fit_transform(grouped)
        self.URM_train = matrix.tocsr()
        self.URM_train = self.URM_train.astype(np.float64)
        self.dict_test = relevant_items
Esempio n. 11
0
    def _create_transitions(self, transitions):
        for transition in transitions:
            links_in = []
            links_out = []
            for conn_id in transition["links_in_ids"]:
                links_in.append(Helper.find_link_by_id(self.links, conn_id))
            for conn_id in transition["links_out_ids"]:
                links_out.append(Helper.find_link_by_id(self.links, conn_id))

            transition_obj = Transition(links_in, links_out,
                                        transition["priority"],
                                        transition["id"], transition["name"])
            self.transitions.append(transition_obj)
Esempio n. 12
0
    def _create_coverability_graph(self):
        queue = [(0, 0, self.transitions)]
        states_list = [(0, 0, self._get_network_state(self.transitions), {},
                        None)]
        stop_condition = 20
        state_id = 0
        while queue and state_id < stop_condition:
            state = queue.pop(0)
            parent_state_id = state[0]
            transitions = state[2]

            transitions_ids_to_do = []
            for transition in transitions:
                if transition.is_doable():
                    priority_queue = Helper.get_competitive_transitions_priority_queue(
                        transitions, transition)
                    if priority_queue.get() is transition:
                        transitions_ids_to_do.append(transition.id)

            for transition_id in transitions_ids_to_do:
                new_transitions_state = deepcopy(transitions)
                transition = Helper.find_transition_by_id(
                    new_transitions_state, transition_id)
                transition.run_transition()
                new_network_state = self._get_network_state(
                    new_transitions_state)

                state_id += 1
                states_list_element = (state_id, parent_state_id,
                                       new_network_state, {}, transition.id)

                predecessors = self._get_predecessors(states_list,
                                                      states_list_element)
                self._check_infinite_state(predecessors, new_network_state)
                found_state = self._find_state_based_on_network_state(
                    states_list, new_network_state)

                if found_state is not None:
                    state_id -= 1
                    id = found_state[0]
                    parent_network_state = self._find_state_by_id(
                        states_list, parent_state_id)
                    parent_network_state[3][id] = transition.id
                else:
                    states_list.append(states_list_element)
                    queue.append(
                        (state_id, parent_state_id, new_transitions_state))

        return states_list
Esempio n. 13
0
    def is_network_reversible(self):
        for state in self.states_list:
            states_queue = [state[0]]
            if self.__is_root(state):
                continue

            found_root_state = False
            reached_states = []
            while states_queue:
                state_id = states_queue.pop(0)
                state_obj = Helper.find_state_by_id(self.states_list, state_id)
                if self.__is_root(state_obj):
                    found_root_state = True
                    break

                reached_states.append(state_id)
                print 'Reached state: ', reached_states
                print 'Children: ', self.__find_children(state_obj)
                filtered_state_children = list(set(self.__find_children(state_obj))-set(reached_states)-set(states_queue))
                print 'FilteredStateChildren: ', list(set(self.__find_children(state_obj))-set(reached_states)-set(states_queue))
                states_queue.extend(filtered_state_children)

            print '-------------------------------'
            if not found_root_state:
                return False

        return True
Esempio n. 14
0
 def _create_links(self, links):
     for link in links:
         link_obj = Connector(
             link["id"],
             Helper.find_place_by_id(self.places, link["place_id"]),
             link["direction"], link["weight"])
         self.links.append(link_obj)
Esempio n. 15
0
    def is_network_reversible(self):
        for state in self.states_list:
            states_queue = [state[0]]
            if self.__is_root(state):
                continue

            found_root_state = False
            reached_states = []
            while states_queue:
                state_id = states_queue.pop(0)
                state_obj = Helper.find_state_by_id(self.states_list, state_id)
                if self.__is_root(state_obj):
                    found_root_state = True
                    break

                reached_states.append(state_id)
                print 'Reached state: ', reached_states
                print 'Children: ', self.__find_children(state_obj)
                filtered_state_children = list(
                    set(self.__find_children(state_obj)) -
                    set(reached_states) - set(states_queue))
                print 'FilteredStateChildren: ', list(
                    set(self.__find_children(state_obj)) -
                    set(reached_states) - set(states_queue))
                states_queue.extend(filtered_state_children)

            print '-------------------------------'
            if not found_root_state:
                return False

        return True
	def __init__(self, **kwargs):
		super(WindowApp, self).__init__(**kwargs)
		#create recognizer class
		self.recognizer = Recognizer()
		#load templates to recognizer
		self.templates = Helper.returnTemplates()
		self.loadTemplates()
		#store current gesture
		self.gesture = []
 def on_touch_up(self, touch):
     self.gesture.append(touch.userdata["trace"])
     if len(getCurrentTouches()) == 0:
         templatePath = Helper.returnTemplatePath()
         output = open(templatePath, "wb")
         pickle.dump(self.gesture, output)
         output.close()
         print templatePath + " saved..."
         self.gesture = []
Esempio n. 18
0
def worker_setup_node(q):
    while True:
        node = q.get()
        # copy ivs pkg to node
        Helper.copy_pkg_scripts_to_remote(node)

        # deploy node
        safe_print("Start to deploy %(fqdn)s\n" % {'fqdn': node.fqdn})
        if node.cleanup and node.role == const.ROLE_NEUTRON_SERVER:
            Helper.run_command_on_remote(
                node, (r'''sudo bash %(dst_dir)s/%(hostname)s_ospurge.sh''' % {
                    'dst_dir': node.dst_dir,
                    'hostname': node.hostname,
                    'log': node.log
                }))

        # a random delay to smooth apt-get/yum
        delay = random.random() * 10.0
        time.sleep(delay)

        start_time = datetime.datetime.now()
        Helper.run_command_on_remote(
            node, (r'''sudo bash %(dst_dir)s/%(hostname)s.sh''' % {
                'dst_dir': node.dst_dir,
                'hostname': node.hostname,
                'log': node.log
            }))
        end_time = datetime.datetime.now()

        # parse setup log
        diff = Helper.timedelta_total_seconds(end_time - start_time)
        node.set_time_diff(diff)
        node = Helper.update_last_log(node)
        node_dict[node.fqdn] = node
        time_dict[node.fqdn] = diff

        # when deploying T5 on UBUNTU, reboot compute nodes
        Helper.reboot_if_necessary(node)

        safe_print("Finish deploying %(fqdn)s, cost time: %(diff).2f\n" % {
            'fqdn': node.fqdn,
            'diff': node.time_diff
        })
        q.task_done()
Esempio n. 19
0
    def split_randomic_all_playlists_longer_10000(self,
                                                  URM,
                                                  URM_df,
                                                  threshold_length=10):
        # splitting URM in test set e train set
        selected_playlists = np.array([])
        available_playlists = np.arange(URM.shape[0])

        helper = Helper()
        target_playlists_kaggle = helper.get_target_playlists_list()
        for playlist_id in available_playlists:
            if len(selected_playlists) == 10000:
                break
            if playlist_id not in target_playlists_kaggle and len(
                    URM[playlist_id].indices) > threshold_length:
                selected_playlists = np.append(selected_playlists, playlist_id)

        self.target_playlists = selected_playlists.astype(int)

        grouped = URM_df.groupby(
            'playlist_id', as_index=True).apply(lambda x: list(x['track_id']))

        relevant_items = defaultdict(list)

        for playlist_id in selected_playlists:
            tracks = np.array(grouped[playlist_id])

            to_be_removed = int(len(tracks) * 0.2)
            for i in range(to_be_removed):
                index = randint(0, len(tracks) - 1)
                removed_track = tracks[index]
                relevant_items[playlist_id].append(removed_track)
                tracks = np.delete(tracks, index)
            grouped[playlist_id] = tracks

        all_tracks = self.tracks_df["track_id"].unique()

        matrix = MultiLabelBinarizer(classes=all_tracks,
                                     sparse_output=True).fit_transform(grouped)
        self.URM_train = matrix.tocsr()
        self.URM_train = self.URM_train.astype(np.float64)
        self.dict_test = relevant_items
Esempio n. 20
0
File: bosi.py Progetto: zoirboy/bosi
def worker_upgrade_or_sriov_node(q):
    while True:
        node = q.get()
        # copy ivs pkg to node
        Helper.copy_pkg_scripts_to_remote(node)

        # deploy node
        safe_print("Start to deploy %(fqdn)s\n" %
                   {'fqdn': node.fqdn})

        start_time = datetime.datetime.now()
        if node.role == const.ROLE_SRIOV:
            Helper.run_command_on_remote(node,
                (r'''sudo bash %(dst_dir)s/%(hostname)s_sriov.sh''' %
                {'dst_dir': node.dst_dir,
                 'hostname': node.hostname,
                 'log': node.log}))
        elif node.role in const.DPDK_ROLES:
            Helper.run_command_on_remote(node,
                (r'''sudo bash %(dst_dir)s/%(hostname)s_dpdk.sh''' %
                {'dst_dir': node.dst_dir,
                 'hostname': node.hostname,
                 'log': node.log}))
        else:
            Helper.run_command_on_remote(node,
                (r'''sudo bash %(dst_dir)s/%(hostname)s_upgrade.sh''' %
                {'dst_dir': node.dst_dir,
                 'hostname': node.hostname,
                 'log': node.log}))
        end_time = datetime.datetime.now()

        # parse setup log
        diff = Helper.timedelta_total_seconds(end_time - start_time)
        node.set_time_diff(diff)
        node = Helper.update_last_log(node)
        node_dict[node.fqdn] = node
        time_dict[node.fqdn] = diff

        safe_print("Finish executing script for node %(fqdn)s, "
                   "cost time: %(diff).2f\n" %
                   {'fqdn': node.fqdn, 'diff': node.time_diff})
        q.task_done()
Esempio n. 21
0
    def seek(cls, query):
        url = cls.get_url(query)
        raw_content = cls.get_raw_content(url)
        soup = cls.get_soup(raw_content)

        songs = cls.parse(soup)

        if not Helper.array_of(songs, Song):
            raise ValueError('parse() response must be an array of Song objects')

        return songs
Esempio n. 22
0
File: bosi.py Progetto: xinwu/bosi-1
def worker_setup_node(q):
    while True:
        node = q.get()
        # copy ivs pkg to node
        Helper.copy_pkg_scripts_to_remote(node)

        # deploy node
        safe_print("Start to deploy %(fqdn)s\n" %
                   {'fqdn': node.fqdn})
        if node.cleanup and node.role == const.ROLE_NEUTRON_SERVER:
            Helper.run_command_on_remote(node,
                (r'''sudo bash %(dst_dir)s/%(hostname)s_ospurge.sh''' %
                {'dst_dir': node.dst_dir,
                 'hostname': node.hostname,
                 'log': node.log}))

        # a random delay to smooth apt-get/yum
        delay = random.random() * 10.0
        time.sleep(delay)

        start_time = datetime.datetime.now()
        Helper.run_command_on_remote(node,
            (r'''sudo bash %(dst_dir)s/%(hostname)s.sh''' %
            {'dst_dir': node.dst_dir,
             'hostname': node.hostname,
             'log': node.log}))
        end_time = datetime.datetime.now()

        # parse setup log
        diff = Helper.timedelta_total_seconds(end_time - start_time)
        node.set_time_diff(diff)
        node = Helper.update_last_log(node)
        node_dict[node.fqdn] = node
        time_dict[node.fqdn] = diff

        # when deploying T5 on UBUNTU, reboot compute nodes
        Helper.reboot_if_necessary(node)

        safe_print("Finish deploying %(fqdn)s, cost time: %(diff).2f\n" %
                   {'fqdn': node.fqdn, 'diff': node.time_diff})
        q.task_done()
Esempio n. 23
0
def main():
    # Check if network is working properly
    code = subprocess.call("ping www.bigswitch.com -c1", shell=True)
    if code != 0:
        Helper.safe_print("Network is not working properly, quit deployment\n")
        exit(1)

    # Parse configuration
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--config-file", required=True,
                        help="BCF YAML configuration file")
    parser.add_argument('-f', "--fuel-cluster-id", required=False,
                        help="Fuel cluster ID. Fuel settings may override YAML configuration. Please refer to config.yaml")
    parser.add_argument('-t', "--tag", required=False,
                        help="Deploy to tagged nodes only.")
    parser.add_argument('--cleanup', action='store_true', default=False,
                        help="Clean up existing routers, networks and projects.")
    args = parser.parse_args()
    with open(args.config_file, 'r') as config_file:
        config = yaml.load(config_file)
    deploy_bcf(config, args.fuel_cluster_id, args.tag, args.cleanup)
Esempio n. 24
0
File: bosi.py Progetto: xinwu/bosi-1
def worker_upgrade_node(q):
    while True:
        node = q.get()
        # copy ivs pkg to node
        Helper.copy_pkg_scripts_to_remote(node)

        # deploy node
        safe_print("Start to deploy %(fqdn)s\n" %
                   {'fqdn': node.fqdn})

        start_time = datetime.datetime.now()
        Helper.run_command_on_remote(node,
            (r'''sudo bash %(dst_dir)s/%(hostname)s_upgrade.sh''' %
            {'dst_dir': node.dst_dir,
             'hostname': node.hostname,
             'log': node.log}))
        end_time = datetime.datetime.now()

        # parse setup log
        diff = Helper.timedelta_total_seconds(end_time - start_time)
        node.set_time_diff(diff)
        node = Helper.update_last_log(node)
        node_dict[node.fqdn] = node
        time_dict[node.fqdn] = diff

        safe_print("Finish upgrading %(fqdn)s, cost time: %(diff).2f\n" %
                   {'fqdn': node.fqdn, 'diff': node.time_diff})
        q.task_done()
Esempio n. 25
0
    def simulate(self):
        live_transitions = self.__get_live_transitions()

        if not live_transitions:
            return self.__json_type_wrapper(RequestType.END)

        transition = random.sample(live_transitions, 1)[0]

        priority_queue = Helper.get_competitive_transitions_priority_queue(self.transitions, transition)
        transition = priority_queue.get()
        transition.run_transition()

        return self.__json_type_wrapper(RequestType.SIMULATE, transition.to_json(), True)
Esempio n. 26
0
 def sign(data: object, private_key: str) -> str:
   """
   signs a message
   @param message: (object) message to sign
   @param private_key: (str) private key to sign with
   """
   private_key_bytes = serialization.load_pem_private_key(
     bytes.fromhex(private_key),
     password=None,
     backend=default_backend()
   )
   data_bytes = Helper.object_to_bytes(data)
   signature = private_key_bytes.sign(data_bytes, ec.ECDSA(hashes.SHA256()))
   return signature.hex()
Esempio n. 27
0
def worker_setup_node():
    while True:
        node = node_q.get()
        # copy ivs pkg to node
        Helper.copy_pkg_scripts_to_remote(node)

        # deploy node
        Helper.safe_print("Start to deploy %(hostname)s\n" %
                         {'hostname' : node.hostname})
        if node.role == const.ROLE_NEUTRON_SERVER:
            Helper.run_command_on_remote(node,
                (r'''/bin/bash %(dst_dir)s/%(hostname)s_ospurge.sh >> %(log)s 2>&1''' %
                {'dst_dir'  : node.dst_dir,
                 'hostname' : node.hostname,
                 'log'      : node.log}))
        Helper.run_command_on_remote(node,
            (r'''/bin/bash %(dst_dir)s/%(hostname)s.sh >> %(log)s 2>&1''' %
            {'dst_dir'  : node.dst_dir,
             'hostname' : node.hostname,
             'log'      : node.log}))
        Helper.safe_print("Finish deploying %(hostname)s\n" %
                         {'hostname' : node.hostname})
        node_q.task_done()
Esempio n. 28
0
    def print_search_content(self,
                             content,
                             show_authors=False,
                             show_type=False,
                             show_publisher=False,
                             show_url=False):
        base_template = "{score:.2f} - {year:4d} - {cfg_doi}{doi:40}{cfg_end} \
- {cfg_title}{title}{cfg_end}"

        template = base_template

        if show_authors:
            template += "\n  {cfg_more}AUTHORS{cfg_end}   : {authors}"
        if show_type:
            template += "\n  {cfg_more}TYPE{cfg_end}      : {type}"
        if show_publisher:
            template += "\n  {cfg_more}PUBLISHER{cfg_end} : {publisher}"
        if show_url:
            template += "\n  {cfg_more}URL{cfg_end}       : {url}"

        for result in content.get('items', ()):
            sr = SearchResult(result)
            payload = {
                "score": sr.get_score(),
                "year": sr.get_year(),
                "doi": sr.get_doi().get_identifier(),
                "title": sr.get_title(),
                "authors": sr.get_authors(),
                "type": sr.get_type(),
                "publisher": sr.get_publisher(),
                "url": sr.get_url(),
                "cfg_more": '',
                "cfg_end": '',
                "cfg_doi": '',
                "cfg_title": '',
            }

            if self.colored_output:
                color_options = [
                    ("cfg_doi", self.color_doi),
                    ("cfg_title", self.color_title),
                    ("cfg_more", self.color_more),
                    ("cfg_end", 'reset'),
                ]
                for key, value in color_options:
                    payload[key] = Helper.get_fg_colorcode_by_identifier(value)

            print(template.format(**payload))
Esempio n. 29
0
File: bosi.py Progetto: xinwu/bosi-1
def verify_node_setup(q):
    while True:
        node = q.get()
        all_service_status = 'Service status for node: ' + node.fqdn
        # check services are running and IVS version is correct
        if node.deploy_dhcp_agent:
            dhcp_status = Helper.check_os_service_status(
                node, "neutron-dhcp-agent")
            all_service_status = (all_service_status +
                                  ' | DHCP Agent ' + dhcp_status)
            metadata_status = Helper.check_os_service_status(
                node, "neutron-metadata-agent")
            all_service_status = (all_service_status +
                                  ' | Metadata Agent ' + metadata_status)
        if node.deploy_l3_agent and node.deploy_mode == const.T5:
            l3_status = Helper.check_os_service_status(
                node, "neutron-l3-agent")
            all_service_status = (all_service_status +
                                  ' | L3 Agent ' + l3_status)
        # for T5 deployment, check LLDP service status on compute nodes
        if node.deploy_mode == const.T5 and node.role != const.ROLE_NEUTRON_SERVER:
            lldp_status = Helper.check_os_service_status(node, "send_lldp")
            all_service_status = (all_service_status +
                                  ' | LLDP Service ' + lldp_status)
        # for T6 deployment, check IVS status and version too
        if node.deploy_mode == const.T6:
            # check ivs status and version
            ivs_status = Helper.check_os_service_status(node, "ivs")
            if ivs_status == ':-)':
                # ivs is OK. check version
                ivs_version = Helper.check_ivs_version(node)
                all_service_status = (all_service_status +
                                      ' | IVS version ' + ivs_version)
            else:
                # ivs not OK
                all_service_status = (all_service_status +
                                      ' | IVS ' + ivs_status)
            # check neutron-bsn-agent status
            bsn_agent_status = Helper.check_os_service_status(
                node, "neutron-bsn-agent")
            all_service_status = (all_service_status +
                                  ' | BSN Agent ' + bsn_agent_status)
        # after forming the complete string, put in respective list
        if ":-(" not in all_service_status:
            node_pass[node.fqdn] = all_service_status
        else:
            node_fail[node.fqdn] = all_service_status
        q.task_done()
Esempio n. 30
0
def verify_node_setup(q):
    while True:
        node = q.get()
        all_service_status = 'Service status for node: ' + node.fqdn
        # check services are running and IVS version is correct
        if node.deploy_dhcp_agent:
            dhcp_status = Helper.check_os_service_status(
                node, "neutron-dhcp-agent")
            all_service_status = (all_service_status + ' | DHCP Agent ' +
                                  dhcp_status)
            metadata_status = Helper.check_os_service_status(
                node, "neutron-metadata-agent")
            all_service_status = (all_service_status + ' | Metadata Agent ' +
                                  metadata_status)
        if node.deploy_l3_agent and node.deploy_mode == const.T5:
            l3_status = Helper.check_os_service_status(node,
                                                       "neutron-l3-agent")
            all_service_status = (all_service_status + ' | L3 Agent ' +
                                  l3_status)
        # for T5 deployment, check LLDP service status on compute nodes
        if node.deploy_mode == const.T5 and node.role != const.ROLE_NEUTRON_SERVER:
            lldp_status = Helper.check_os_service_status(node, "send_lldp")
            all_service_status = (all_service_status + ' | LLDP Service ' +
                                  lldp_status)
        # for T6 deployment, check IVS status and version too
        if node.deploy_mode == const.T6:
            # check ivs status and version
            ivs_status = Helper.check_os_service_status(node, "ivs")
            if ivs_status == ':-)':
                # ivs is OK. check version
                ivs_version = Helper.check_ivs_version(node)
                all_service_status = (all_service_status + ' | IVS version ' +
                                      ivs_version)
            else:
                # ivs not OK
                all_service_status = (all_service_status + ' | IVS ' +
                                      ivs_status)
            # check neutron-bsn-agent status
            bsn_agent_status = Helper.check_os_service_status(
                node, "neutron-bsn-agent")
            all_service_status = (all_service_status + ' | BSN Agent ' +
                                  bsn_agent_status)
        # after forming the complete string, put in respective list
        if ":-(" not in all_service_status:
            node_pass[node.fqdn] = all_service_status
        else:
            node_fail[node.fqdn] = all_service_status
        q.task_done()
Esempio n. 31
0
 def verify(data: object, signature: str, public_key: str) -> bool:
   """
   verfies message with digital signature
   @param message: (object) message to verify
   @param signature: (str) signature to verify message with
   @param public_key: (str) public key to verify message with
   """
   try:
     public_key_bytes = serialization.load_pem_public_key(
       bytes.fromhex(public_key),
       backend=default_backend()
     )
     signature_bytes = bytes.fromhex(signature)
     data_bytes = Helper.object_to_bytes(data)
     public_key_bytes.verify(signature_bytes, data_bytes, ec.ECDSA(hashes.SHA256()))
     return True
   except InvalidSignature:
     return False
Esempio n. 32
0
    def __init__(self, start_time, end_time, is_dup_data):
        self.config_options = Helper.get_configurations(os.path.join(os.path.dirname(__file__), os.path.pardir, 'dnl_softswitch', 'conf', 'dnl_softswitch.conf'))
        self.connect_database()
        self.start_time = start_time if start_time is not None else self.get_start_time(end_time)
        self.end_time = end_time
        self.is_dup_data = is_dup_data
        self.total_start_time = start_time
        options = self.get_options()
        try:
            self.billing_path = options.get('switch_cdr', 'cdr_directory')
        except Exception:
            self.billing_path = None

        if not self.billing_path:
            self.billing_path = os.path.join(os.path.dirname(__file__), os.path.pardir, 'dnl_softswitch', 'cdr')

        if not os.path.exists(self.billing_path):
            os.makedirs(self.billing_path)
Esempio n. 33
0
    def print_search_content(self, content, show_authors=False,
            show_type=False, show_publisher=False, show_url=False):
        base_template = "{score:.2f} - {year:4d} - {cfg_doi}{doi:40}{cfg_end} \
- {cfg_title}{title}{cfg_end}"
        template = base_template

        if show_authors:
            template += "\n  {cfg_more}AUTHORS{cfg_end}   : {authors}"
        if show_type:
            template += "\n  {cfg_more}TYPE{cfg_end}      : {type}"
        if show_publisher:
            template += "\n  {cfg_more}PUBLISHER{cfg_end} : {publisher}"
        if show_url:
            template += "\n  {cfg_more}URL{cfg_end}       : {url}"

        for result in content.get('items', ()):
            sr = SearchResult(result)
            payload = {
                "score"     : sr.get_score(),
                "year"      : sr.get_year(),
                "doi"       : sr.get_doi().get_identifier(),
                "title"     : sr.get_title(),
                "authors"   : sr.get_authors(),
                "type"      : sr.get_type(),
                "publisher" : sr.get_publisher(),
                "url"       : sr.get_url(),
                "cfg_more"  : '',
                "cfg_end"   : '',
                "cfg_doi"   : '',
                "cfg_title" : '',
            }

            if self.colored_output:
                color_options = [
                    ("cfg_doi", self.color_doi),
                    ("cfg_title", self.color_title),
                    ("cfg_more", self.color_more),
                    ("cfg_end", 'reset'),
                ]
                for key, value in color_options:
                    payload[key] = Helper.get_fg_colorcode_by_identifier(value)

            print(template.format(**payload))
Esempio n. 34
0
def upgrade_bcf(node_dic):
    for hostname, node in node_dic.iteritems():
        if node.skip:
            safe_print("skip node %(fqdn)s due to %(error)s\n" % {
                'fqdn': node.fqdn,
                'error': node.error
            })
            continue
        if node.tag != node.env_tag:
            safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                       {'fqdn': node.fqdn})
            continue

        if node.os == const.CENTOS:
            Helper.generate_upgrade_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_upgrade_scripts_for_ubuntu(node)
        elif node.os == const.REDHAT:
            Helper.generate_upgrade_scripts_for_redhat(node)

        node_q.put(node)

    with open(const.LOG_FILE, "a") as log_file:
        for hostname, node in node_dic.iteritems():
            log_file.write(str(node))

    # Use multiple threads to setup compute nodes
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_upgrade_or_sriov_node,
                             args=(node_q, ))
        t.daemon = True
        t.start()
    node_q.join()

    sorted_time_dict = OrderedDict(
        sorted(time_dict.items(), key=lambda x: x[1]))
    for fqdn, h_time in sorted_time_dict.items():
        safe_print("node: %(fqdn)s, time: %(time).2f\n" % {
            'fqdn': fqdn,
            'time': h_time
        })

    safe_print("Big Cloud Fabric deployment finished! "
               "Check %(log)s on each node for details.\n" %
               {'log': const.LOG_FILE})
Esempio n. 35
0
File: bosi.py Progetto: xinwu/bosi-1
def upgrade_bcf(node_dic):
    for hostname, node in node_dic.iteritems():
        if node.skip:
            safe_print("skip node %(fqdn)s due to %(error)s\n" %
                      {'fqdn': node.fqdn, 'error': node.error})
            continue
        if node.tag != node.env_tag:
            safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                      {'fqdn': node.fqdn})
            continue

        if node.os == const.CENTOS:
            Helper.generate_upgrade_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_upgrade_scripts_for_ubuntu(node)
        elif node.os == const.REDHAT:
            Helper.generate_upgrade_scripts_for_redhat(node)

        node_q.put(node)

    with open(const.LOG_FILE, "a") as log_file:
        for hostname, node in node_dic.iteritems():
            log_file.write(str(node))

    # Use multiple threads to setup compute nodes
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_upgrade_node, args=(node_q,))
        t.daemon = True
        t.start()
    node_q.join()

    sorted_time_dict = OrderedDict(sorted(time_dict.items(),
                                          key=lambda x: x[1]))
    for fqdn, h_time in sorted_time_dict.items():
        safe_print("node: %(fqdn)s, time: %(time).2f\n" %
                   {'fqdn': fqdn, 'time': h_time})

    safe_print("Big Cloud Fabric deployment finished! "
               "Check %(log)s on each node for details.\n" %
              {'log': const.LOG_FILE})
Esempio n. 36
0
def deploy_bcf(config, fuel_cluster_id):
    # Deploy setup node
    Helper.safe_print("Start to prepare setup node\n")
    env = Environment(config, fuel_cluster_id)
    Helper.common_setup_node_preparation(env)

    # Generate detailed node information
    Helper.safe_print("Start to setup Big Cloud Fabric\n")
    nodes_config = None
    if 'nodes' in config:
        nodes_yaml_config = config['nodes']
    node_dic = Helper.load_nodes(nodes_yaml_config, env)

    # Generate scripts for each node
    for hostname, node in node_dic.iteritems():
        if node.os == const.CENTOS:
            Helper.generate_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_scripts_for_ubuntu(node)
        with open(const.LOG_FILE, "a") as log_file:
            log_file.write(str(node))
        if node.skip:
            Helper.safe_print("skip node %(hostname)s due to %(error)s\n" % {
                'hostname': hostname,
                'error': node.error
            })
            continue
        node_q.put(node)

    # Use multiple threads to setup nodes
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_setup_node)
        t.daemon = True
        t.start()
    node_q.join()
    Helper.safe_print(
        "Big Cloud Fabric deployment finished! Check %(log)s on each node for details.\n"
        % {'log': const.LOG_FILE})
Esempio n. 37
0
 def __get_live_transitions(self):
     live_transitions_ids = self.__get_live_transitions_ids()
     return [Helper.find_transition_by_id(self.transitions, id) for id in live_transitions_ids]
Esempio n. 38
0
 def file_path(file):
     return Helper.app_path() + "/" + Logger.dir + file
Esempio n. 39
0
 def run_selected_transitions(self, transition_id):
     transition = Helper.find_transition_by_id(self.transitions, transition_id)
     transition.run_transition()
     return self.__json_type_wrapper(RequestType.RUN_SELECTED_TRANSITION, transition.to_json(), True)
Esempio n. 40
0
    def split_cluster_randomic(self, URM, URM_df):
        # splitting URM in test set e train set
        selected_playlists = np.array([])
        available_playlists = np.arange(URM.shape[0])
        target_analyzer = TargetAnalyzer()
        segment_size = 1
        min_playlist_len_after_split = 5
        dist = target_analyzer.get_distribution_array(
            segment_size=segment_size)
        # in this way n_target = 10000
        helper = Helper()
        target_playlists = helper.get_target_playlists_list()[:5000]
        n_target = np.sum(dist)  # - len(target_playlists)

        while n_target > 0:
            random_index = randint(0, len(available_playlists) - 1)
            playlist_id = available_playlists[random_index]
            target_len = len(URM[playlist_id].data) * 0.8
            if target_len > min_playlist_len_after_split:
                target_segment = int(target_len / segment_size)
                while dist[target_segment] <= 0:
                    random_index = randint(0, len(available_playlists) - 1)
                    playlist_id = available_playlists[random_index]
                    target_len = len(URM[playlist_id].data) * 0.8
                    if target_len > min_playlist_len_after_split:
                        target_segment = int(target_len / segment_size)
                n_target -= 1
                dist[target_segment] -= 1
                selected_playlists = np.append(selected_playlists, playlist_id)
                available_playlists = np.delete(
                    available_playlists,
                    np.where(available_playlists == playlist_id))

        self.target_playlists = selected_playlists.astype(int)
        grouped = URM_df.groupby(
            'playlist_id', as_index=True).apply(lambda x: list(x['track_id']))

        grouped_test = grouped.copy()

        relevant_items = defaultdict(list)
        count = 0
        for playlist_id in selected_playlists:
            tracks = np.array(grouped[playlist_id])
            # if playlist_id in target_playlists:
            # to_be_removed = int(len(tracks) * 0.2)
            # to_be_removed_tracks = helper.get_sorted_tracks_in_playlist(playlist_id)[-to_be_removed:]
            # for track in to_be_removed_tracks:
            #     relevant_items[playlist_id].append(track)
            #     tracks = np.delete(tracks, np.where(tracks == track))
            # for i in range(to_be_removed):
            #    removed_track = tracks[-1]
            #    relevant_items[playlist_id].append(removed_track)
            #    tracks = np.delete(tracks, len(tracks) - 1)
            # else:
            to_be_removed = int(len(tracks) * 0.2)
            for i in range(to_be_removed):
                index = randint(0, len(tracks) - 1)
                removed_track = tracks[index]
                relevant_items[playlist_id].append(removed_track)
                tracks = np.delete(tracks, index)
            grouped[playlist_id] = tracks
            grouped_test[playlist_id] = relevant_items[playlist_id]
            count += 1
        all_tracks = self.tracks_df["track_id"].unique()
        matrix = MultiLabelBinarizer(classes=all_tracks,
                                     sparse_output=True).fit_transform(grouped)
        self.URM_train = matrix.tocsr()
        self.dict_test = relevant_items
        # bib URM
        # self.URM_train = helper.get_urm_csr_bib(URM = self.URM_train)
        # plotter = TargetAnalyzer()
        # plotter.plot_standard_distribution()
        # plotter.plot_distribution(self.URM_train, self.target_playlists)
        self.URM_test = MultiLabelBinarizer(
            classes=all_tracks, sparse_output=True).fit_transform(grouped_test)
        self.URM_test = self.URM_test.tocsr()
        self.URM_test = self.URM_test.astype(np.float64)
        self.URM_train = self.URM_train.astype(np.float64)
Esempio n. 41
0
def worker_setup_dhcp_agent():
    while True:
        node = dhcp_node_q.get()
        Helper.safe_print("Copy dhcp_agent.ini to %(hostname)s\n" %
                         {'hostname' : node.hostname})
        Helper.copy_file_to_remote(node, r'''%(dir)s/dhcp_agent.ini''' % {'dir' : node.setup_node_dir},
                                   '/etc/neutron', 'dhcp_agent.ini')
        Helper.safe_print("Copy metadata_agent.ini to %(hostname)s\n" %
                         {'hostname' : node.hostname})
        Helper.copy_file_to_remote(node, r'''%(dir)s/metadata_agent.ini''' % {'dir': node.setup_node_dir},
                                   '/etc/neutron', 'metadata_agent.ini')
        Helper.safe_print("Restart neutron-metadata-agent and neutron-dhcp-agent on %(hostname)s\n" %
                         {'hostname' : node.hostname})
        Helper.run_command_on_remote(node, 'service neutron-metadata-agent restart')
        Helper.run_command_on_remote(node, 'service neutron-dhcp-agent restart')
        Helper.safe_print("Finish deploying dhcp agent and metadata agent on %(hostname)s\n" %
                         {'hostname' : node.hostname})
        dhcp_node_q.task_done()
Esempio n. 42
0
                                     controller_node.setup_node_dir)
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_setup_dhcp_agent)
        t.daemon = True
        t.start()
    dhcp_node_q.join()

    Helper.safe_print("Big Cloud Fabric deployment finished! Check %(log)s on each node for details.\n" %
                     {'log' : const.LOG_FILE})


if __name__=='__main__':

    # Check if network is working properly
    code = subprocess.call("ping www.bigswitch.com -c1", shell=True)
    if code != 0:
        Helper.safe_print("Network is not working properly, quit deployment\n")
        exit(1)

    # Parse configuration
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--config-file", required=True,
                        help="BCF YAML configuration file")
    parser.add_argument('-f', "--fuel-cluster-id", required=False,
                        help="Fuel cluster ID. Fuel settings may override YAML configuration. Please refer to example.yaml")
    args = parser.parse_args()
    with open(args.config_file, 'r') as config_file:
        config = yaml.load(config_file)
    deploy_bcf(config, args.fuel_cluster_id)

Esempio n. 43
0
File: bosi.py Progetto: xinwu/bosi-1
def deploy_bcf(config, mode, fuel_cluster_id, rhosp, tag, cleanup,
               verify, verify_only, skip_ivs_version_check,
               certificate_dir, certificate_only, generate_csr,
               support, upgrade_dir):
    # Deploy setup node
    safe_print("Start to prepare setup node\n")
    env = Environment(config, mode, fuel_cluster_id, rhosp, tag, cleanup,
                      skip_ivs_version_check, certificate_dir, upgrade_dir)
    Helper.common_setup_node_preparation(env)
    controller_nodes = []

    # Generate detailed node information
    safe_print("Start to setup Big Cloud Fabric\n")
    nodes_yaml_config = config['nodes'] if 'nodes' in config else None
    node_dic = Helper.load_nodes(nodes_yaml_config, env)

    if upgrade_dir:
        return upgrade_bcf(node_dic)

    if generate_csr:
        safe_print("Start to generate csr for virtual switches.\n")
        # create ~/csr and ~/key directory
        Helper.run_command_on_local("mkdir -p %s" % const.CSR_DIR)
        Helper.run_command_on_local("mkdir -p %s" % const.KEY_DIR)
        for hostname, node in node_dic.iteritems():
            if node.skip:
                safe_print("skip node %(fqdn)s due to %(error)s\n" %
                           {'fqdn': node.fqdn, 'error': node.error})
                continue

            if node.tag != node.env_tag:
                safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                           {'fqdn': node.fqdn})
                continue
            if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE:
                Helper.generate_csr(node)
        safe_print("Finish generating csr for virtual switches.\n")
        return

    # copy neutron config from neutron server to setup node
    for hostname, node in node_dic.iteritems():
        if node.role == const.ROLE_NEUTRON_SERVER:
            controller_nodes.append(node)
    Helper.copy_neutron_config_from_controllers(controller_nodes)

    # check if vlan is the tenant network type for fuel environment
    if not Helper.check_if_vlan_is_used(controller_nodes):
        safe_print("tenant network type is not vlan. Stop deploying.\n")
        return

    # prepare keystone client from /etc/neutron/api-paste.ini
    #Helper.prepare_keystone_client(controller_nodes)

    # Generate scripts for each node
    for hostname, node in node_dic.iteritems():
        if support:
            support_node_q.put(node)

        if node.skip:
            safe_print("skip node %(fqdn)s due to %(error)s\n" %
                       {'fqdn': node.fqdn, 'error': node.error})
            continue

        if node.tag != node.env_tag:
            safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                       {'fqdn': node.fqdn})
            continue

        if node.os == const.CENTOS:
            Helper.generate_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_scripts_for_ubuntu(node)
        elif node.os == const.REDHAT:
            Helper.generate_scripts_for_redhat(node)

        if node.role == const.ROLE_NEUTRON_SERVER:
            controller_node_q.put(node)
        else:
            # python doesn't have deep copy for Queue, hence add to all
            node_q.put(node)
            verify_node_q.put(node)
            if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE:
                certify_node_q.put(node)

        if node.rhosp:
            Helper.chmod_node(node)

    with open(const.LOG_FILE, "a") as log_file:
        version = Helper.run_command_on_local("pip show bosi")
        log_file.write(str(version))
        for hostname, node in node_dic.iteritems():
            log_file.write(str(node))

    if support:
        safe_print("Start to collect logs.\n")
        # copy installer logs to ~/support
        Helper.run_command_on_local("mkdir -p %s" % const.SUPPORT_DIR)
        Helper.run_command_on_local("cp -r %(src)s %(dst)s" %
                                   {"src": const.LOG_FILE,
                                    "dst": const.SUPPORT_DIR})
        Helper.run_command_on_local("cp -r %(setup_node_dir)s/%(generated_script_dir)s %(dst)s" %
                                   {"setup_node_dir": env.setup_node_dir,
                                    "generated_script_dir": const.GENERATED_SCRIPT_DIR,
                                    "dst": const.SUPPORT_DIR})

        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=support_node_setup,
                                 args=(support_node_q,))
            t.daemon = True
            t.start()
        support_node_q.join()
        # compress ~/support
        Helper.run_command_on_local("cd /tmp; tar -czf support.tar.gz support")
        safe_print("Finish collecting logs. logs are at /tmp/support.tar.gz.\n")
        return

    # in case of verify_only or certificate_only, do not deploy
    if (not verify_only) and (not certificate_only):
        # Use single thread to setup controller nodes
        t = threading.Thread(target=worker_setup_node,
                             args=(controller_node_q,))
        t.daemon = True
        t.start()
        controller_node_q.join()

        # Use multiple threads to setup compute nodes
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=worker_setup_node, args=(node_q,))
            t.daemon = True
            t.start()
        node_q.join()

        sorted_time_dict = OrderedDict(sorted(time_dict.items(),
                                              key=lambda x: x[1]))
        for fqdn, h_time in sorted_time_dict.items():
            safe_print("node: %(fqdn)s, time: %(time).2f\n" %
                       {'fqdn': fqdn, 'time': h_time})

        safe_print("Big Cloud Fabric deployment finished! "
                   "Check %(log)s on each node for details.\n" %
                   {'log': const.LOG_FILE})

    if certificate_dir or certificate_only:
        # certify each node
        safe_print("Start to certify virtual switches.\n")
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=certify_node_setup,
                                 args=(certify_node_q,))
            t.daemon = True
            t.start()
        certify_node_q.join()
        safe_print('Certifying virtual switches done.\n')

    if verify or verify_only:
        # verify each node and post results
        safe_print("Verifying deployment for all compute nodes.\n")
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=verify_node_setup,
                                 args=(verify_node_q,))
            t.daemon = True
            t.start()
        verify_node_q.join()
        # print status
        # success nodes
        safe_print('Deployed successfully to: \n')
        for node_element in node_pass:
            safe_print(node_element + '\n')
        # failed nodes
        safe_print('Deployment to following failed: \n')
        for node_element in node_fail:
            safe_print(str(node_element) + ' : '
                       + str(node_fail[node_element]) + '\n')
Esempio n. 44
0
def support_node_setup(q):
    while True:
        node = q.get()
        Helper.support_node(node)
        q.task_done()
Esempio n. 45
0
File: bosi.py Progetto: xinwu/bosi-1
def support_node_setup(q):
    while True:
        node = q.get()
        Helper.support_node(node)
        q.task_done()
Esempio n. 46
0
 def _create_links(self, links):
     for link in links:
         link_obj = Connector(link["id"], Helper.find_place_by_id(self.places, link["place_id"]), link["direction"], link["weight"])
         self.links.append(link_obj)
Esempio n. 47
0
def deploy_bcf(config, mode, fuel_cluster_id, rhosp, tag, cleanup, verify,
               verify_only, skip_ivs_version_check, certificate_dir,
               certificate_only, generate_csr, support, upgrade_dir,
               offline_dir, sriov):
    # Deploy setup node
    safe_print("Start to prepare setup node\n")
    env = Environment(config, mode, fuel_cluster_id, rhosp, tag, cleanup,
                      skip_ivs_version_check, certificate_dir, upgrade_dir,
                      offline_dir, sriov)
    Helper.common_setup_node_preparation(env)
    controller_nodes = []

    # Generate detailed node information
    safe_print("Start to setup Big Cloud Fabric\n")
    nodes_yaml_config = config['nodes'] if 'nodes' in config else None
    node_dic = Helper.load_nodes(nodes_yaml_config, env)

    if upgrade_dir:
        return upgrade_bcf(node_dic)

    if sriov:
        return setup_sriov(node_dic)

    if generate_csr:
        safe_print("Start to generate csr for virtual switches.\n")
        # create ~/csr and ~/key directory
        Helper.run_command_on_local("mkdir -p %s" % const.CSR_DIR)
        Helper.run_command_on_local("mkdir -p %s" % const.KEY_DIR)
        for hostname, node in node_dic.iteritems():
            if node.skip:
                safe_print("skip node %(fqdn)s due to %(error)s\n" % {
                    'fqdn': node.fqdn,
                    'error': node.error
                })
                continue

            if node.tag != node.env_tag:
                safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                           {'fqdn': node.fqdn})
                continue
            if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE:
                Helper.generate_csr(node)
        safe_print("Finish generating csr for virtual switches.\n")
        return

    # copy neutron config from neutron server to setup node
    for hostname, node in node_dic.iteritems():
        if node.role == const.ROLE_NEUTRON_SERVER:
            controller_nodes.append(node)
    Helper.copy_neutron_config_from_controllers(controller_nodes)

    # check if vlan is the tenant network type for fuel environment
    if not Helper.check_if_vlan_is_used(controller_nodes):
        safe_print("tenant network type is not vlan. Stop deploying.\n")
        return

    # prepare keystone client from /etc/neutron/api-paste.ini
    #Helper.prepare_keystone_client(controller_nodes)

    # Generate scripts for each node
    for hostname, node in node_dic.iteritems():
        if support:
            support_node_q.put(node)

        if node.skip:
            safe_print("skip node %(fqdn)s due to %(error)s\n" % {
                'fqdn': node.fqdn,
                'error': node.error
            })
            continue

        if node.tag != node.env_tag:
            safe_print("skip node %(fqdn)s due to mismatched tag\n" %
                       {'fqdn': node.fqdn})
            continue

        if node.os == const.CENTOS:
            Helper.generate_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_scripts_for_ubuntu(node)
        elif node.os == const.REDHAT:
            Helper.generate_scripts_for_redhat(node)

        if node.role == const.ROLE_NEUTRON_SERVER:
            controller_node_q.put(node)
        else:
            # python doesn't have deep copy for Queue, hence add to all
            node_q.put(node)
            verify_node_q.put(node)
            if node.deploy_mode == const.T6 and node.role == const.ROLE_COMPUTE:
                certify_node_q.put(node)

        if node.rhosp:
            Helper.chmod_node(node)

    with open(const.LOG_FILE, "a") as log_file:
        version = Helper.run_command_on_local("pip show bosi")
        log_file.write(str(version))
        for hostname, node in node_dic.iteritems():
            log_file.write(str(node))

    if support:
        safe_print("Start to collect logs.\n")
        # copy installer logs to ~/support
        Helper.run_command_on_local("mkdir -p %s" % const.SUPPORT_DIR)
        Helper.run_command_on_local("cp -r %(src)s %(dst)s" % {
            "src": const.LOG_FILE,
            "dst": const.SUPPORT_DIR
        })
        Helper.run_command_on_local(
            "cp -r %(setup_node_dir)s/%(generated_script_dir)s %(dst)s" % {
                "setup_node_dir": env.setup_node_dir,
                "generated_script_dir": const.GENERATED_SCRIPT_DIR,
                "dst": const.SUPPORT_DIR
            })

        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=support_node_setup,
                                 args=(support_node_q, ))
            t.daemon = True
            t.start()
        support_node_q.join()
        # compress ~/support
        Helper.run_command_on_local("cd /tmp; tar -czf support.tar.gz support")
        safe_print(
            "Finish collecting logs. logs are at /tmp/support.tar.gz.\n")
        return

    # in case of verify_only or certificate_only, do not deploy
    if (not verify_only) and (not certificate_only):
        # Use single thread to setup controller nodes
        t = threading.Thread(target=worker_setup_node,
                             args=(controller_node_q, ))
        t.daemon = True
        t.start()
        controller_node_q.join()

        # Use multiple threads to setup compute nodes
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=worker_setup_node, args=(node_q, ))
            t.daemon = True
            t.start()
        node_q.join()

        sorted_time_dict = OrderedDict(
            sorted(time_dict.items(), key=lambda x: x[1]))
        for fqdn, h_time in sorted_time_dict.items():
            safe_print("node: %(fqdn)s, time: %(time).2f\n" % {
                'fqdn': fqdn,
                'time': h_time
            })

        safe_print("Big Cloud Fabric deployment finished! "
                   "Check %(log)s on each node for details.\n" %
                   {'log': const.LOG_FILE})

    if certificate_dir or certificate_only:
        # certify each node
        safe_print("Start to certify virtual switches.\n")
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=certify_node_setup,
                                 args=(certify_node_q, ))
            t.daemon = True
            t.start()
        certify_node_q.join()
        safe_print('Certifying virtual switches done.\n')

    if verify or verify_only:
        # verify each node and post results
        safe_print("Verifying deployment for all compute nodes.\n")
        for i in range(const.MAX_WORKERS):
            t = threading.Thread(target=verify_node_setup,
                                 args=(verify_node_q, ))
            t.daemon = True
            t.start()
        verify_node_q.join()
        # print status
        # success nodes
        safe_print('Deployed successfully to: \n')
        for node_element in node_pass:
            safe_print(node_element + '\n')
        # failed nodes
        safe_print('Deployment to following failed: \n')
        for node_element in node_fail:
            safe_print(
                str(node_element) + ' : ' + str(node_fail[node_element]) +
                '\n')
Esempio n. 48
0
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_setup_node)
        t.daemon = True
        t.start()
    node_q.join()
    Helper.safe_print(
        "Big Cloud Fabric deployment finished! Check %(log)s on each node for details.\n"
        % {'log': const.LOG_FILE})


if __name__ == '__main__':

    # Check if network is working properly
    code = subprocess.call("ping www.bigswitch.com -c1", shell=True)
    if code != 0:
        Helper.safe_print("Network is not working properly, quit deployment\n")
        exit(1)

    # Parse configuration
    parser = argparse.ArgumentParser()
    parser.add_argument("-c",
                        "--config-file",
                        required=True,
                        help="BCF YAML configuration file")
    parser.add_argument(
        '-f',
        "--fuel-cluster-id",
        required=False,
        help=
        "Fuel cluster ID. Fuel settings may override YAML configuration. Please refer to example.yaml"
    )
Esempio n. 49
0
currentDir = os.path.basename(os.getcwd())
currentFileName = os.path.basename(__file__)

libDir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
sys.path.append(libDir)
print libDir

from lib.router import Router
router = Router()
ROUTES = router.getRoutes()

from lib.loader import Loader
loader = Loader(currentFullRoute, ROUTES)

from lib.helper import Helper
helper = Helper()

from lib.filewriter import FileWriter
filewriter = FileWriter()

from lib.reporter import Reporter
reporter = Reporter(ROUTES)

# return to current path

sys.path.append(currentFullRoute)

# ---------------------------------------------------------------------- CUSTOM LIBS

from math import *
from graphviz import Digraph
Esempio n. 50
0
from lib.helper import Helper
from lib.encoder import Encoder
import sys

if __name__ == "__main__":
    helper = Helper(sys.argv)
    helper.ParseConfig().ShowConfigDescription()

    encoder = Encoder(helper)
    encoder.append_vars(helper.GetConfig("vars"))

    template = helper.LoadFile(helper.GetConfig("template"))
    data = template

    for evasion in helper.GetConfig("evasion"):
        data += helper.LoadFile("templates/evasions/" + evasion + ".vba")

    data = encoder.replace_var(data, "offset", encoder.get_encoding_offset())
    data = encoder.encode_user_vars(data)
    data = encoder.append_def_use_tag(data)
    data = encoder.rand_vars(data)
    data = encoder.rand_int(data)
    data = encoder.rand_smallint(data)

    encodedvars = helper.GetConfig("encodedvars")
    for var in encodedvars:
        data = encoder.replace_var(data, var, encodedvars[var], True)

    if "-s" in sys.argv or "--split_strings" in sys.argv:
        data = encoder.split_strings(data)
    if "-x" in sys.argv or "--strings_to_hex" in sys.argv:
Esempio n. 51
0
 def get_icon(self):
     if len(self.icon):
         return Helper.app_path() + "/icon/" + self.icon
     return ''
Esempio n. 52
0
def deploy_bcf(config, fuel_cluster_id):
    # Deploy setup node
    Helper.safe_print("Start to prepare setup node\n")
    env = Environment(config, fuel_cluster_id)
    Helper.common_setup_node_preparation(env)

    # Generate detailed node information
    Helper.safe_print("Start to setup Big Cloud Fabric\n")
    nodes_config = None
    if 'nodes' in config:
        nodes_yaml_config = config['nodes']
    node_dic = Helper.load_nodes(nodes_yaml_config, env)

    # Generate scripts for each node
    for hostname, node in node_dic.iteritems():
        if node.os == const.CENTOS:
            Helper.generate_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_scripts_for_ubuntu(node)
        with open(const.LOG_FILE, "a") as log_file:
            log_file.write(str(node))
        if node.skip:
            Helper.safe_print("skip node %(hostname)s due to %(error)s\n" %
                             {'hostname' : hostname,
                              'error'    : node.error})
            continue
        node_q.put(node)

    # Use multiple threads to setup nodes
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_setup_node)
        t.daemon = True
        t.start()
    node_q.join()
    Helper.safe_print("Big Cloud Fabric deployment finished! Check %(log)s on each node for details.\n" %
                     {'log' : const.LOG_FILE})
Esempio n. 53
0
File: bosi.py Progetto: kjiang/bosi
def certify_node_setup(q):
    while True:
        node = q.get()
        Helper.certify_node(node)
        q.task_done()
Esempio n. 54
0
def deploy_bcf(config, fuel_cluster_id):
    # Deploy setup node
    Helper.safe_print("Start to prepare setup node\n")
    env = Environment(config, fuel_cluster_id)
    Helper.common_setup_node_preparation(env)
    controller_node = None

    # Generate detailed node information
    Helper.safe_print("Start to setup Big Cloud Fabric\n")
    nodes_config = None
    if 'nodes' in config:
        nodes_yaml_config = config['nodes']
    node_dic = Helper.load_nodes(nodes_yaml_config, env)

    # Generate scripts for each node
    for hostname, node in node_dic.iteritems():
        if node.os == const.CENTOS:
            Helper.generate_scripts_for_centos(node)
        elif node.os == const.UBUNTU:
            Helper.generate_scripts_for_ubuntu(node)
        with open(const.LOG_FILE, "a") as log_file:
            log_file.write(str(node))
        if node.skip:
            Helper.safe_print("skip node %(hostname)s due to %(error)s\n" %
                             {'hostname' : hostname,
                              'error'    : node.error})
            continue
        node_q.put(node)

        if node.role == const.ROLE_NEUTRON_SERVER:
            controller_node = node
        elif node.deploy_dhcp_agent:
            dhcp_node_q.put(node)

    # Use multiple threads to setup nodes
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_setup_node)
        t.daemon = True
        t.start()
    node_q.join()

    # Use multiple threads to setup up dhcp agent and metadata agent
    if controller_node:
        Helper.safe_print("Copy dhcp_agent.ini from openstack controller %(controller_node)s\n" %
                         {'controller_node' : controller_node.hostname})
        Helper.copy_file_from_remote(controller_node, '/etc/neutron', 'dhcp_agent.ini',
                                     controller_node.setup_node_dir)
        Helper.safe_print("Copy metadata_agent.ini from openstack controller %(controller_node)s\n" %
                         {'controller_node' : controller_node.hostname})
        Helper.copy_file_from_remote(controller_node, '/etc/neutron', 'metadata_agent.ini',
                                     controller_node.setup_node_dir)
    for i in range(const.MAX_WORKERS):
        t = threading.Thread(target=worker_setup_dhcp_agent)
        t.daemon = True
        t.start()
    dhcp_node_q.join()

    Helper.safe_print("Big Cloud Fabric deployment finished! Check %(log)s on each node for details.\n" %
                     {'log' : const.LOG_FILE})
Esempio n. 55
0
    def split_sequential(self, URM, URM_df):
        segment = 1
        # splitting URM in test set e train set
        selected_playlists = np.array([])
        available_playlists = np.arange(URM.shape[0])
        target_analyzer = TargetAnalyzer()

        #Gets distribution of only last 5000 playlists
        dist = target_analyzer.get_distribution_array_only_last(segment)

        helper = Helper()
        target_playlists = helper.get_target_playlists_list()[:5000]
        #n_target = np.sum(dist) - len(target_playlists)

        # Removing from the cluster distribution the len of the sequential target
        for playlist_id in target_playlists:
            playlist_id = int(playlist_id)
            available_playlists = np.delete(
                available_playlists,
                np.where(available_playlists == playlist_id))
            selected_playlists = np.append(selected_playlists, playlist_id)
            #target_len = len(URM[playlist_id].data)
            #dist[target_len] -= 1

        print("Clustering with segment = " + str(segment))
        for key in tqdm(range(len(dist))):
            while dist[key] != 0:
                random_index = randint(0, len(available_playlists) - 1)
                playlist_id = available_playlists[random_index]
                target_segment = int(0.8 * len(URM[playlist_id].data))
                if target_segment == key:
                    available_playlists = np.delete(
                        available_playlists,
                        np.where(available_playlists == playlist_id))
                    selected_playlists = np.append(selected_playlists,
                                                   playlist_id)
                    dist[key] -= 1

        self.target_playlists = selected_playlists.astype(int)
        grouped = URM_df.groupby(
            'playlist_id', as_index=True).apply(lambda x: list(x['track_id']))

        relevant_items = defaultdict(list)

        for playlist_id in selected_playlists:
            #Tracks = lista delle tracks prese dalla URM
            tracks = np.array(grouped[playlist_id])
            if playlist_id in target_playlists:
                to_be_removed = int(len(tracks) * 0.2)

                #Torna le #to_be_removed tracks ordinate sequenzialmente. e le toglie dalla lista delle tracks
                to_be_removed_tracks = helper.get_sorted_tracks_in_playlist(
                    playlist_id)[-to_be_removed:]
                for track in to_be_removed_tracks:
                    relevant_items[playlist_id].append(track)
                    tracks = np.delete(tracks, np.where(tracks == track))
            else:
                to_be_removed = int(len(tracks) * 0.2)
                for i in range(to_be_removed):
                    index = randint(0, len(tracks) - 1)
                    removed_track = tracks[index]
                    relevant_items[playlist_id].append(removed_track)
                    tracks = np.delete(tracks, index)
            grouped[playlist_id] = tracks

        all_tracks = self.tracks_df["track_id"].unique()
        matrix = MultiLabelBinarizer(classes=all_tracks,
                                     sparse_output=True).fit_transform(grouped)
        self.URM_train = matrix.tocsr()
        self.URM_train = self.URM_train.astype(np.float64)
        self.dict_test = relevant_items