Ejemplo n.º 1
0
 def _close(self):
     if self.ovsdb:
         try:
             self.ovsdb.close()
         except Exception as e:
             LOG.error(str(e))
         self.ovsdb = None
Ejemplo n.º 2
0
    def __add_router_interface(self):

        # and pick the first in the list - the list should be non empty and
        # contain only 1 subnet since it is supposed to be a private network

        # But first check that the router does not already have this subnet
        # so retrieve the list of all ports, then check if there is one port
        # - matches the subnet
        # - and is attached to the router
        # Assumed that both management networks are created together so checking for one of them
        ports = self.neutron_client.list_ports()['ports']
        for port in ports:
            # Skip the check on stale ports
            if port['fixed_ips']:
                port_ip = port['fixed_ips'][0]
                if (port['device_id'] == self.ext_router['id']) and \
                   (port_ip['subnet_id'] == self.vm_int_net[0]['subnets'][0]):
                    LOG.info(
                        'Ext router already associated to the internal network.'
                    )
                    return

        for int_net in self.vm_int_net:
            body = {'subnet_id': int_net['subnets'][0]}
            self.neutron_client.add_interface_router(self.ext_router['id'],
                                                     body)
            LOG.debug('Ext router associated to ' + int_net['name'])
            # If ipv6 is enabled than add second subnet
            if self.ipv6_enabled:
                body = {'subnet_id': int_net['subnets'][1]}
                self.neutron_client.add_interface_router(
                    self.ext_router['id'], body)
Ejemplo n.º 3
0
def start_rabbitmq_client():
    """Start Rabbitmq client to listen instance notifications from Nova"""
    cfg = get_rabbit_config()
    rabbit_url = "rabbit://%s:%s@%s:%s/%s" % (cfg['user_id'], cfg['password'],
                                              cfg['host'], cfg['port'],
                                              cfg['virt_host'])
    LOG.info(rabbit_url)

    target = oslo_messaging.Target(exchange="nova",
                                   topic="notifications",
                                   server="info",
                                   version="2.1",
                                   fanout=True)
    transport = oslo_messaging.get_notification_transport(CONF, url=rabbit_url)
    endpoints = [
        InstCreateNotificationEp(),
        InstResizeNotificationEp(),
        InstDelNotificationEp()
    ]

    server = oslo_messaging.get_notification_listener(transport, [target],
                                                      endpoints, "threading")
    thread = threading.Thread(target=rpc_work, args=(server, ))
    thread.start()
    LOG.info("Rabbitmq Client Started!")

    return server
Ejemplo n.º 4
0
def get_application(application_name, is_summary=False):
    if not is_summary:
        session = requests.session()
        url = 'http://{}:8080/api/v1/namespaces/{}/pods'.format(
            app.config['K8S_IP'], application_name)
        reply = session.get(url)
        pod_list_json = reply.json()['items']

        url = 'http://{}:8080/api/v1/namespaces/{}/replicationcontrollers'.format(
            app.config['K8S_IP'], application_name)
        reply = session.get(url)
        rc_list_json = reply.json()['items']

    try:
        stack = heat_client.get_stack(application_name)
        stack_json = stack.to_dict()
        if not is_summary:
            paas_app = PaasApplication(
                application_name, stack_json,
                heat_client.get_resource_list(application_name), rc_list_json,
                pod_list_json)
        else:
            paas_app = PaasApplication(application_name, stack_json, None,
                                       None, None)
        return paas_app
    except Exception, e:
        LOG.warning(type(e))
        LOG.warning(e)
        return None
Ejemplo n.º 5
0
def main():
    Flags.PARSER.add_argument(
        '--input_file',
        type=str,
        required=True,
        help='CSV containing values to validate',
    )
    Flags.PARSER.add_argument(
        '--datasource',
        type=str,
        default=DATASOURCE.name,
        help='Datasource to validate against',
    )
    Flags.PARSER.add_argument(
        '--output_file',
        type=str,
        required=True,
        help='Output CSV to write validation results',
    )
    Flags.InitArgs()
    input_file = Flags.ARGS.input_file
    dimensions = _extract_dimensions(input_file)

    LOG.info('Starting validation over dimensions: %s', dimensions)
    validator = PivotedCSVValidator(Flags.ARGS.datasource, dimensions)
    validator.parse_and_run(input_file, Flags.ARGS.output_file)
    return validator.passed_validation
Ejemplo n.º 6
0
    def create_server(self,
                      vmname,
                      image,
                      flavor,
                      key_name,
                      nic,
                      sec_group,
                      avail_zone=None,
                      user_data=None,
                      config_drive=None,
                      files=None):
        """Create a new server."""
        if sec_group:
            security_groups = [sec_group['id']]
        else:
            security_groups = None

        # Also attach the created security group for the test
        LOG.info('Creating instance %s with AZ: "%s"', vmname, avail_zone)
        instance = self.novaclient.servers.create(
            name=vmname,
            image=image,
            flavor=flavor,
            key_name=key_name,
            nics=nic,
            availability_zone=avail_zone,
            userdata=user_data,
            config_drive=config_drive,
            files=files,
            security_groups=security_groups)
        return instance
Ejemplo n.º 7
0
	def commandReceived(self, cmd):
		if cmd.commandid in const.command_names.keys():
			LOG.debug("got command: %d(%s) from company %d: '%s'" % (cmd.cmd, const.command_names[cmd.commandid].__str__(), cmd.company + 1, cmd.text))

		ctime = time.time()
		companystr = self.client.getCompanyString(cmd.company)

		if cmd.company in self.companyIdling and cmd.company != const.PLAYER_SPECTATOR:
			# remove from that list
			idx = self.companyIdling.index(cmd.company)
			del self.companyIdling[idx]
			timediff = ctime - self.companyLastAction[cmd.company]
			if timediff > self.idletime:
				# we were here already, check if we got back from idling
				Broadcast("%s is back from idling after %s" % (companystr, self.timeFormat(timediff)), parentclient=self.client)
			
		self.companyLastAction[cmd.company] = ctime
		
		if cmd.commandid == const.commands['CMD_PLACE_SIGN'] and cmd.text != '':
			Broadcast("%s placed a sign: '%s'" % (companystr, cmd.text), parentclient=self.client)
		elif cmd.commandid == const.commands['CMD_RENAME_SIGN'] and cmd.text != '':
			Broadcast("%s renames a sign: '%s'" % (companystr, cmd.text), parentclient=self.client)
		elif cmd.commandid == const.commands['CMD_SET_COMPANY_COLOUR']:
			Broadcast("%s changed their color" % companystr, parentclient=self.client)
		elif cmd.commandid == const.commands['CMD_RENAME_COMPANY']:
			Broadcast("%s changed their company name to '%s'"%(companystr, cmd.text), parentclient=self.client)
		elif cmd.commandid == const.commands['CMD_SET_COMPANY_MANAGER_FACE']:
			Broadcast("%s changed their company face"%(companystr), parentclient=self.client)
		elif cmd.commandid == const.commands['CMD_RENAME_PRESIDENT']:
			Broadcast("%s changed their presidents name to '%s'"%(companystr, cmd.text), parentclient=self.client)
		elif cmd.commandid == const.commands['CMD_BUILD_INDUSTRY']:
			Broadcast("%s built a new industry"%(companystr), parentclient=self.client)
		elif cmd.commandid == const.commands['CMD_BUILD_COMPANY_HQ']:
			Broadcast("%s built or relocated their HQ"%(companystr), parentclient=self.client)
Ejemplo n.º 8
0
def savestatstofile(filename="serverstats.bin", servers=[]):
    if not config.getboolean("serverstats", "savehistory"):
        return
    t = time.time()
    try:
        try:
            import cPickle as pickle
        except:
            import pickle
    except ImportError:
        LOG.error("error while loading the pickle module...")
        return
    try:
        f = open(filename, 'rb')
        oldstats = pickle.load(f)
        f.close()
    except IOError:
        oldstats = {}
    oldstats[t] = servers
    try:
        f = open(filename, 'wb')
        pickle.dump(oldstats, f)
        f.close()
    except IoError:
        LOG.error("error while saving history file!")
Ejemplo n.º 9
0
    def run_server(self, generator_config, filename='/etc/trex_cfg.yaml'):
        """Run TRex server for specified traffic profile.

        :param traffic_profile: traffic profile object based on config file
        :param filename: path where to save TRex config file
        """
        cfg = self.__save_config(generator_config, filename)
        cores = generator_config.cores
        vtep_vlan = generator_config.gen_config.get('vtep_vlan')
        sw_mode = "--software" if generator_config.software_mode else ""
        vlan_opt = "--vlan" if (generator_config.vlan_tagging or vtep_vlan) else ""
        if generator_config.mbuf_factor:
            mbuf_opt = "--mbuf-factor " + str(generator_config.mbuf_factor)
        else:
            mbuf_opt = ""
        hdrh_opt = "--hdrh" if generator_config.hdrh else ""
        # --unbind-unused-ports: for NIC that have more than 2 ports such as Intel X710
        # this will instruct trex to unbind all ports that are unused instead of
        # erroring out with an exception (i40e only)
        cmd = ['nohup', '/bin/bash', '-c',
               './t-rex-64 -i -c {} --iom 0 --no-scapy-server '
               '--unbind-unused-ports --close-at-end {} {} '
               '{} {} --cfg {} &> /tmp/trex.log & disown'.format(cores, sw_mode,
                                                                 vlan_opt,
                                                                 hdrh_opt,
                                                                 mbuf_opt, cfg)]
        LOG.info(' '.join(cmd))
        subprocess.Popen(cmd, cwd=self.trex_dir)
        LOG.info('TRex server is running...')
Ejemplo n.º 10
0
def write_cache(app_name, app_json):
    '''
    just keep one line

    :param cfile:  cache file
    :param app_name: application name
    :param app_json: application info with  pod status & update_time
    :return:
    '''

    cfile = '/tmp/' + app_name
    delete_old_cache(cfile, app_name)
    try:
        f = file(cfile, "a")
        obj = {"app_name": app_name}
        datalist = []
        for i in app_json:
            rcdata = get_rc_name_in_heat_resource2(i)
            if rcdata is not None:
                datalist.append(rcdata)
        obj['data'] = datalist
        f.write(json.dumps(obj))
        f.write('\n')
        #LOG.info(datalist)
    except Exception, e:
        LOG.info(e.message)
Ejemplo n.º 11
0
 def zoomChange(self):
     if self.image.isNull():
         LOG.warning('image is null.')
         return
     self.draw_area.scale = 0.01 * self.zoom_value.value()
     self.draw_area.adjustSize()
     self.draw_area.update()
Ejemplo n.º 12
0
    def place_block(self, block, pos, ignore_top=True):
        """
        Updates the board by placing `block` at the position `pos`.
        """
        LOG.debug("Placing %s at %s" % (block, pos))

        solid_squares = block.get_solid_squares()
        heighest_columns = [0] * self.width

        for (x,y) in solid_squares:
            final_x, final_y = pos[0]+x, pos[1]+y

            if ignore_top and final_y >= self.height:
                continue

            assert self.valid_position(final_x, final_y, ignore_top), \
                "Trying to place %s outside the board limits! (%s)" % (block, pos)

            if self.board[final_y][final_x] != None:
                LOG.critical("Writing on (%d,%d), a position of the" % (final_x, final_y) + \
                        "board already filled, something wrong happend!")

            self.board[final_y][final_x] = block

            if final_y >= heighest_columns[final_x]:
                heighest_columns[final_x] = final_y + 1

        for (x, _) in solid_squares:
            final_x = pos[0]+x

            if heighest_columns[final_x] > self._column_heights[final_x]:
                self._column_heights[final_x] = heighest_columns[final_x]
Ejemplo n.º 13
0
def save_json_result(result, json_file, std_json_path, service_chain,
                     service_chain_count, flow_count, frame_sizes):
    """Save results in json format file."""
    filepaths = []
    if json_file:
        filepaths.append(json_file)
    if std_json_path:
        name_parts = [
            service_chain,
            str(service_chain_count),
            str(flow_count)
        ] + list(frame_sizes)
        filename = '-'.join(name_parts) + '.json'
        filepaths.append(os.path.join(std_json_path, filename))

    if filepaths:
        for file_path in filepaths:
            LOG.info('Saving results in json file: %s...', file_path)
            with open(file_path, 'w') as jfp:
                json.dump(result,
                          jfp,
                          indent=4,
                          sort_keys=True,
                          separators=(',', ': '),
                          default=lambda obj: obj.to_json())
Ejemplo n.º 14
0
 def sanitize_az_host(self, host_list, az_host):
     '''
     host_list: list of hosts as retrieved from openstack (can be empty)
     az_host: either a host or a az:host string
     if a host, will check host is in the list, find the corresponding az and
                 return az:host
     if az:host is passed will check the host is in the list and az matches
     if host_list is empty, will return the configured az if there is no
                 az passed
     '''
     if ':' in az_host:
         # no host_list, return as is (no check)
         if not host_list:
             return az_host
         # if there is a host_list, extract and verify the az and host
         az_host_list = az_host.split(':')
         zone = az_host_list[0]
         host = az_host_list[1]
         for hyp in host_list:
             if hyp.host == host:
                 if hyp.zone == zone:
                     # matches
                     return az_host
                     # else continue - another zone with same host name?
         # no match
         LOG.error('No match for availability zone and host ' + az_host)
         return None
     else:
         return self.auto_fill_az(host_list, az_host)
Ejemplo n.º 15
0
class Pod:
    def __init__(self, json_data):
        self.json_data = deepcopy(json_data)
        self.mem_usage = -1  # unit is MBytes
        self.mem_cache = -1  # unit is MBytes
        self.cpu_percentage = -1.0
        self.max_mem_limit = 0.0
        try:
            mem_limit_str = self.json_data['spec']['containers'][0][
                'resources']['limits']['memory']
            self.max_mem_limit = self.get_mem_from_str(mem_limit_str)
        except Exception, e:
            LOG.info(e)

        try:
            key = '/paas/applications/{}/pods/{}'.format(
                self.namespace, self.name)
            result = etcd_client.read(key).value
            tmp_json = json.loads(result)
            if int(time.time()) - tmp_json['timestamp'] <= 60:
                self.mem_usage = tmp_json['stats']['memory'][
                    'usage'] / 1024 / 1024
                self.mem_cache = tmp_json['stats']['memory'][
                    'cache'] / 1024 / 1024
                self.cpu_percentage = tmp_json['stats']['cpu'][
                    'cpu_percentage']
                if self.cpu_percentage is None:
                    self.cpu_percentage = -1.0
            else:
                LOG.warning('The record <{}>\'s timestamp <{}> is old'.format(
                    key, tmp_json['timestamp']))
        except Exception, e:
            LOG.error(e)
Ejemplo n.º 16
0
    def check_pod(self, pod, app_controller):
        if not pod['is_ready'] or not pod['is_running']:
            return

        if app_controller.memory_controller is None:
            return

        memory_percent = round(
            float(pod['mem_usage'] - pod['mem_cache']) /
            float(pod['max_mem_limit']) * 100, 1)
        if memory_percent >= app_controller.memory_controller.warn_percent:
            msg = '{}:{}:cache={},usage={},percent={}%'.format(
                app_controller.name, pod['name'], pod['mem_cache'],
                pod['mem_usage'], memory_percent)
            alert_record = AlertRecord(key=pod['name'],
                                       alert_type=AlertType.MEM,
                                       alert_level=AlertLevel.WARN,
                                       timestamp=int(time.time()),
                                       msg=msg,
                                       hostname=pod['host_IP'],
                                       mail_list=app_controller.mail_list,
                                       phone_list=app_controller.phone_list)
            if Alert.alert(alert_record):
                LOG.info('sent mail: {}'.format(msg))
            LOG.warn(msg)
        else:
            Alert.remove_alert_history(pod['name'])
Ejemplo n.º 17
0
 def stopWebserver(self, event, command):
     if self.webserverthread is None or (config.getboolean("main", "productive") and not event.isByOp()):
         return
     LOG.debug("stopping webserver ...")
     self.webserverthread.stop()
     self.webserverthread = None
     Broadcast("webserver stopped", parentclient=self.client, parent=event)
Ejemplo n.º 18
0
def delete_user_preferences(transaction):
    user_preferences = transaction.find_all_by_fields(UserPreferences, {})

    for preference in user_preferences:
        transaction.delete(preference)

    LOG.debug('Deleted User Preference entries')
Ejemplo n.º 19
0
    def _add_child_graph(self, parent, graphs):
        '''
        Add a graph which is a child of the given parent.

        @type  parent: Graph
        @param parent:
            The graph to spawn the child from.
        @type  graphs: list(Graph)
        @param graphs:
            The list to put the child into.
        '''
        LOG.info("Cloning from parent: %s", parent)
        while True:
            # Create a mutated child
            child = parent.clone(self._next_graph_name())
            self._mutate(child, self._mutation_factor)

            # Remove any inner nodes which have no inputs. We keep doing this
            # since removing one inner node might drop the depth of another.
            changed = True
            max_depth = child.num_layers - 1
            while changed:
                changed = False
                for node in tuple(child.mid):
                    if node.depth == 0 or node.depth >= max_depth:
                        child.remove_node(node)
                        changed = True

            if child.is_connected():
                graphs.append(child)
                LOG.info("Adding child graph:  %s", child)
                return
Ejemplo n.º 20
0
def process_fecth(process_name, process_codes):
    total = len(process_codes)
    threads = []
    THREAD_NUM = 30
    if (total % THREAD_NUM):
        count = (total / THREAD_NUM) + 1
    else:
        count = (total / THREAD_NUM)
    #print "%s threads (%s) start ... each num: %s" %(process_name,count,THREAD_NUM)
    for i in range(count):
        thread_name = "%s_thread_%d" % (process_name, i)
        thread_codes_start = i * THREAD_NUM
        thread_codes_end = i * THREAD_NUM + THREAD_NUM
        start = 0
        end = 0
        if thread_codes_start <= total:
            start = thread_codes_start
            if thread_codes_end <= total:
                end = thread_codes_end
            else:
                end = total
        thread_codes = process_codes[start:end]
        logger = LOG(thread_name)
        mylogger = logger.get_logger()
        th = threading.Thread(name=thread_name,
                              target=thread_fetch,
                              args=(thread_codes, thread_name, mylogger))
        threads.append(th)
        #print thread_name,len(thread_codes)
    for t in threads:
        t.start()
        t.join()
Ejemplo n.º 21
0
Archivo: wires.py Proyecto: gre7g/ktane
    def should_cut(self, post_number: int, color=None) -> None:
        LOG.debug("should_cut", post_number, color)

        # Did they specify a color (e.g. "last red" or "first white")?
        if color is None:
            # No all colors, so just skip None
            posts_in_use = [
                index for index, mapping in enumerate(self.mapping)
                if mapping is not None
            ]
        else:
            # Specific color
            posts_in_use = [
                index for index, mapping in enumerate(self.mapping)
                if COLOR_POSITIONS[mapping] == color
            ]
        if post_number < 0:
            # Count from end
            self.right_post = posts_in_use[post_number]
        else:
            # Post number (Warning: post #1 means index 0!)
            self.right_post = posts_in_use[post_number - 1]

        LOG.info("right_post=", self.right_post)

        for post in self.post_pins:
            post.irq(self.on_wrong_post, trigger=Pin.IRQ_RISING)
        self.post_pins[self.right_post].irq(self.on_right_post,
                                            trigger=Pin.IRQ_RISING)
Ejemplo n.º 22
0
 def wrap(*args):
     time1 = time.time()
     ret = fn(*args)
     time2 = time.time()
     LOG.info('*** %s function took %0.3f ms', fn.__name__,
              (time2 - time1) * 1000.0)
     return ret
Ejemplo n.º 23
0
def get_instance_path(os_node, instance_name):
    instance_folder = os.path.join(_get_instances_path(), os_node,
                                   instance_name)
    if not os.path.exists(instance_folder):
        LOG.debug("Creating the instance path %s", instance_folder)
        os.makedirs(instance_folder)
    return instance_folder
Ejemplo n.º 24
0
    def create(self, values):
        matches = self.find({'name': values[0], 'location': values[1], 'search_and': True})
        if not matches:
            self.verify_entry_type(values)
            records = DataConn().records()
            meta_dada = DataConn().meta_dada

            formatted_records = {}
            for line, row in enumerate(records):
                if row[-1] == 1:
                    for x in range(len(values)):
                        formatted_records[meta_dada[x]['field_name']] = self.format_for_necessary_size(values[x], meta_dada[x]['field_name'])

                    self.update_any_record(line, formatted_records)
                    field_number_created = line
                    break

            else:
                formatted_records = []
                for x in range(len(values)):
                    formatted_records.append(self.format_for_necessary_size(values[x], meta_dada[x]['field_name']))
                formatted_records.append(self.format_for_necessary_size('', 'owner'))
                field_number_created = DataConn().pack_in_file(formatted_records)
            LOG('info', self.only_log).message('Criado registro. %i. valores: %s' % (field_number_created, str(values)))
            return field_number_created
        else:
            LOG('debug', self.only_log).message('DuplicateKeyException no create. valores: ' + str(values))
            raise DuplicateKeyException
Ejemplo n.º 25
0
def savestatstofile(filename="serverstats.bin", servers=[]):
    if not config.getboolean("serverstats", "savehistory"):
        return
    t = time.time()
    try:
        try:
            import cPickle as pickle
        except:
            import pickle
    except ImportError:
        LOG.error("error while loading the pickle module...")
        return
    try:
        f = open(filename, 'rb')
        oldstats = pickle.load(f)
        f.close()
    except IOError:
        oldstats = {}
    oldstats[t] = servers
    try:
        f = open(filename, 'wb')
        pickle.dump(oldstats, f)
        f.close()
    except IoError:
        LOG.error("error while saving history file!")
Ejemplo n.º 26
0
 def remove_public_key(self, name):
     keypair_list = self.novaclient.keypairs.list()
     for key in keypair_list:
         if key.name == name:
             self.novaclient.keypairs.delete(name)
             LOG.info('Removed public key %s', name)
             break
Ejemplo n.º 27
0
def delete_role_permissions(transaction):
    for (role_name, permission_names) in list(ROLE_PERMISSIONS.items()):
        role = transaction.find_one_by_fields(Role, True, {'name': role_name})
        if not role:
            continue

        permissions = []

        for permission_name in permission_names:
            permission = transaction.find_one_by_fields(
                Permission,
                True,
                {
                    'permission': permission_name,
                    'resource_type_id': role.resource_type_id,
                },
            )
            if not permission:
                continue
            permissions.append(permission)

        for permission in permissions:
            role_permission = transaction.find_one_by_fields(
                RolePermissions,
                True,
                {
                    'role_id': role.id,
                    'permission_id': permission.id
                },
            )

            if role_permission:
                transaction.delete(role_permission)
    LOG.debug('Deleted Group, User and Query Policy Role Permissions')
Ejemplo n.º 28
0
    def __run_search_iteration(self, rate):
        """Run one iteration at the given rate level.

        rate: the rate to send on each port in percent (0 to 100)
        """
        self._modify_load(rate)

        # poll interval stats and collect them
        for stats in self.run_traffic():
            self.interval_collector.add(stats)
            time_elapsed_ratio = self.runner.time_elapsed() / self.run_config['duration_sec']
            if time_elapsed_ratio >= 1:
                self.cancel_traffic()
                if not self.skip_sleep():
                    time.sleep(self.config.pause_sec)
        self.interval_collector.reset()

        # get stats from the run
        stats = self.runner.client.get_stats()
        current_traffic_config = self._get_traffic_config()
        warning = self.compare_tx_rates(current_traffic_config['direction-total']['rate_pps'],
                                        stats['total_tx_rate'])
        if warning is not None:
            stats['warning'] = warning

        # save reliable stats from whole iteration
        self.iteration_collector.add(stats, current_traffic_config['direction-total']['rate_pps'])
        LOG.info('Average drop rate: %f', stats['overall']['drop_rate_percent'])
        return stats, current_traffic_config['direction-total']
Ejemplo n.º 29
0
def set_irq_affinity(set_bitmap, irqs, cpulist):
    """Set irq affinity to the specified cpulist for list of irqs.

    :param set_bitmap: True: set bitmap file, False: set list file
    :param irqs: irq list
    :param cpulist: cpu list
    """
    _irqs = set()

    if set_bitmap:
        filename = 'smp_affinity'
    else:
        filename = 'smp_affinity_list'

    for irq in irqs:
        irq_aff_path = "/proc/irq/%s/%s" % (irq, filename)
        try:
            with open(irq_aff_path, 'w') as f:
                f.write(cpulist)
            _irqs.update([irq])
        except Exception as e:
            LOG.warning("Failed to write pci affine file:%(F)s, irq:%(I)s, "
                        "error=%(E)s" % {
                            "F": filename,
                            "I": irq,
                            "E": e
                        })
    return _irqs
Ejemplo n.º 30
0
def reset_table_sequence_id(entity_class, transaction):

    mapper = class_mapper(entity_class)
    table_name = mapper.local_table

    if len(mapper.primary_key) > 1:
        LOG.info(
            'Cannot update primary key sequence for table \'%s\' as '
            'it has a composite primary key. ',
            table_name,
        )
    else:
        id_column = class_mapper(entity_class).primary_key[0].name
        reset_sequence_id_command = _RESET_SEQUENECE_COMMAND_FORMAT.format(
            table_name=table_name, id_column=id_column)

        # Disabling this warning as we don't want to expose the `execute`
        # method in our transaction class.
        # pylint:disable=W0212
        transaction._session.execute(reset_sequence_id_command)
        LOG.info(
            'Successfully staged updated primary key sequence for '
            'table \'%s\'',
            table_name,
        )
Ejemplo n.º 31
0
    def get_ndr_and_pdr(self):
        """Start the NDR/PDR iteration and return the results."""
        dst = 'Bidirectional' if self.run_config['bidirectional'] else 'Unidirectional'
        targets = {}
        if self.config.ndr_run:
            LOG.info('*** Searching NDR for %s (%s)...', self.run_config['l2frame_size'], dst)
            targets['ndr'] = self.config.measurement.NDR
        if self.config.pdr_run:
            LOG.info('*** Searching PDR for %s (%s)...', self.run_config['l2frame_size'], dst)
            targets['pdr'] = self.config.measurement.PDR

        self.run_config['start_time'] = time.time()
        self.interval_collector = IntervalCollector(self.run_config['start_time'])
        self.interval_collector.attach_notifier(self.notifier)
        self.iteration_collector = IterationCollector(self.run_config['start_time'])
        results = {}
        self.__range_search(0.0, 200.0, targets, results)

        results['iteration_stats'] = {
            'ndr_pdr': self.iteration_collector.get()
        }

        if self.config.ndr_run:
            LOG.info('NDR load: %s', results['ndr']['rate_percent'])
            results['ndr']['time_taken_sec'] = \
                results['ndr']['timestamp_sec'] - self.run_config['start_time']
            if self.config.pdr_run:
                LOG.info('PDR load: %s', results['pdr']['rate_percent'])
                results['pdr']['time_taken_sec'] = \
                    results['pdr']['timestamp_sec'] - results['ndr']['timestamp_sec']
        else:
            LOG.info('PDR load: %s', results['pdr']['rate_percent'])
            results['pdr']['time_taken_sec'] = \
                results['pdr']['timestamp_sec'] - self.run_config['start_time']
        return results
Ejemplo n.º 32
0
 def throwRandomData(self):
     rsize = 128
     rand = str(random.getrandbits(rsize))
     res = struct.pack("%ds"%rsize, rand)
     LOG.debug(" fuzzing with %d bytes: '%s'" % (rsize, rand))
     for i in range(0,127):
         self.sendMsg_UDP(i, res)
Ejemplo n.º 33
0
def add_role_permissions(transaction):
    reset_table_sequence_id(RolePermissions, transaction)
    for (role_name, permission_names) in list(ROLE_PERMISSIONS.items()):
        role = transaction.find_one_by_fields(Role, True, {'name': role_name})
        if not role:
            raise ValueError(
                'Could not find role \'{name}\''.format(name=role_name))

        permissions = []

        for permission_name in permission_names:
            permission = transaction.find_one_by_fields(
                Permission,
                True,
                {
                    'permission': permission_name,
                    'resource_type_id': role.resource_type_id,
                },
            )
            if not permission:
                raise ValueError('Could not find permission \'{name}\''.format(
                    name=permission_name))
            permissions.append(permission)

        for permission in permissions:
            role_permission = RolePermissions(role_id=role.id,
                                              permission_id=permission.id)
            transaction.add_or_update(role_permission, flush=True)
    LOG.debug('Added Group, User and Query Policy Role Permissions')
Ejemplo n.º 34
0
def get_controller_info(ssh_access, net, res_col, retry_count):
    if not ssh_access:
        return
    LOG.info('Fetching OpenStack deployment details...')
    sshcon = sshutils.SSH(ssh_access, connect_retry_count=retry_count)
    if sshcon is None:
        LOG.error('Cannot connect to the controller node')
        return
    res = {}
    res['distro'] = sshcon.get_host_os_version()
    res['openstack_version'] = sshcon.check_openstack_version()
    res['cpu_info'] = sshcon.get_cpu_info()
    if net:
        l2type = res_col.get_result('l2agent_type')
        encap = res_col.get_result('encapsulation')
        if l2type:
            if encap:
                res['nic_name'] = sshcon.get_nic_name(l2type, encap,
                                                      net.internal_iface_dict)
            res['l2agent_version'] = sshcon.get_l2agent_version(l2type)
    # print results
    CONLOG.info(res_col.ppr.pformat(res))
    FILELOG.info(json.dumps(res, sort_keys=True))

    res_col.add_properties(res)
Ejemplo n.º 35
0
    def measure_flow(self, label, target_ip):
        label = self.add_location(label)
        FlowPrinter.print_desc(label)

        # results for this flow as a dict
        perf_output = self.client.run_client(label, target_ip,
                                             self.server,
                                             bandwidth=self.config.vm_bandwidth,
                                             az_to=self.server.az)
        if self.config.keep_first_flow_and_exit:
            CONLOG.info(self.rescol.ppr.pformat(perf_output))
            FILELOG.info(json.dumps(perf_output, sort_keys=True))
            LOG.info('Stopping execution after first flow, cleanup all VMs/networks manually')
            sys.exit(0)

        if self.config.stop_on_error:
            # check if there is any error in the results
            results_list = perf_output['results']
            for res_dict in results_list:
                if 'error' in res_dict:
                    LOG.error('Stopping execution on error, cleanup all VMs/networks manually')
                    CONLOG.info(self.rescol.ppr.pformat(perf_output))
                    FILELOG.info(json.dumps(perf_output, sort_keys=True))
                    sys.exit(2)

        self.rescol.add_flow_result(perf_output)
        CONLOG.info(self.rescol.ppr.pformat(perf_output))
        FILELOG.info(json.dumps(perf_output, sort_keys=True))
Ejemplo n.º 36
0
def jsonloads(jsonstr):
    try:
        return json.loads(jsonstr)
    except ValueError:
        errmsg = "xCAT response data is not in JSON format"
        LOG.error(errmsg)
        raise ZVMException(msg=errmsg)
Ejemplo n.º 37
0
    def __add_router_interface(self):

        # and pick the first in the list - the list should be non empty and
        # contain only 1 subnet since it is supposed to be a private network

        # But first check that the router does not already have this subnet
        # so retrieve the list of all ports, then check if there is one port
        # - matches the subnet
        # - and is attached to the router
        # Assumed that both management networks are created together so checking for one of them
        ports = self.neutron_client.list_ports()['ports']
        for port in ports:
            # Skip the check on stale ports
            if port['fixed_ips']:
                port_ip = port['fixed_ips'][0]
                if (port['device_id'] == self.ext_router['id']) and \
                   (port_ip['subnet_id'] == self.vm_int_net[0]['subnets'][0]):
                    LOG.info('Ext router already associated to the internal network.')
                    return

        for int_net in self.vm_int_net:
            body = {
                'subnet_id': int_net['subnets'][0]
            }
            self.neutron_client.add_interface_router(self.ext_router['id'], body)
            LOG.debug('Ext router associated to ' + int_net['name'])
            # If ipv6 is enabled than add second subnet
            if self.ipv6_enabled:
                body = {
                    'subnet_id': int_net['subnets'][1]
                }
                self.neutron_client.add_interface_router(self.ext_router['id'], body)
Ejemplo n.º 38
0
def get_bdw_kbps(bdw, bdw_unit):
    if not bdw_unit:
        # bits/sec
        return bdw / 1000
    if bdw_unit in MULTIPLIERS:
        return int(bdw * MULTIPLIERS[bdw_unit])
    LOG.error('Error: unknown multiplier: ' + bdw_unit)
    return bdw
Ejemplo n.º 39
0
 def sendToLog(self):
     """
     Dispatcher to LOG
     @rtype: boolean
     @returns: True
     """
     LOG.info("EVENT: %s" % (self.msg))
     return True
Ejemplo n.º 40
0
 def delete(self, obj_typ, id=None, obj=None):
     if not (id or obj):
         LOG.error('Give either net_id or net_obj')
     if obj:
         id = obj.get('id')
     return self.sendjson('delete', '%(obj_typ)s/%(id)s' %
                          {'obj_typ': obj_typ,
                           'id': id})
Ejemplo n.º 41
0
def parse_config(filename):
    """
    Fnord
    """
    # {{{
    filename = expanduser(expandvars(filename))
    LOG.debug("Parsing configuration in file '%s'",filename)
    CONFIG.read(filename)
Ejemplo n.º 42
0
 def clearStats(self):
     if not config.getboolean('stats', 'enable'): return
     fn = config.get("stats", "cachefilename")
     try:
         os.remove(fn)
         LOG.debug("stats cleared")
     except:
         pass
Ejemplo n.º 43
0
	def init(self):
		LOG.debug("PlayerInfoPlugin started")
		self.updateConfig()
		self.enabled  = config.getboolean('playerinfos', 'enable')
		self.idletime = config.getint    ('playerinfos', 'idletime')
		self.registerCallback("on_receive_command", self.commandReceived)
		self.registerCallback("on_mainloop", self.onMainLoop)
		self.registerChatCommand("stopinfo", self.stopInfo)
		self.registerChatCommand("idleinfo", self.idleInfo)
Ejemplo n.º 44
0
 def func(engine):
     if not engine.running():
         LOG.error("Engine not running! Not executing action")
         return 0
     engine._update_game_lock.acquire()
     done_lines = f(engine)
     engine.print_game()
     engine._update_game_lock.release()
     return done_lines
Ejemplo n.º 45
0
    def __exec_light(self, cmd, timeout=TIMEOUT):
        proc = BantorraLightProcess(cmd, self.queue)
        proc.start()
        proc.join(timeout)

        if proc.is_alive():
            proc.kill()
            time.sleep(1)
            L.debug("proc.terminate. %s" % proc.is_alive())
Ejemplo n.º 46
0
 def startWebserver(self, event, command):
     if config.getboolean("main", "productive") and not event.isByOp():
         return
     if not config.getboolean("webserver", "enable") or not self.webserverthread is None:
         return
     LOG.debug("starting webserver ...")
     self.webserverthread = WebserverThread(self.client)
     self.webserverthread.start()
     Broadcast("webserver started on port %d"% self.webserverthread.port, parentclient=self.client, parent=event)
Ejemplo n.º 47
0
def decode_size_list(argname, size_list):
    try:
        pkt_sizes = size_list.split(',')
        for i in xrange(len(pkt_sizes)):
            pkt_sizes[i] = int(pkt_sizes[i])
    except ValueError:
        LOG.error('Invalid %s parameter. A valid input must be '
                  'integers seperated by comma.' % argname)
        sys.exit(1)
    return pkt_sizes
Ejemplo n.º 48
0
    def move_down(self):
        LOG.debug("ENGINE - move down")
        self._restart_timeout = True
        if self.game_state.drop_block_is_stuck():
            done_lines = self.game_state.move_block_down()
            self.game_state.start_new_drop()
            return done_lines

        self.game_state.move_block_down()
        return 0
Ejemplo n.º 49
0
 def delete_net(self, network):
     if network:
         name = network['name']
         # it may take some time for ports to be cleared so we need to retry
         for _ in range(1, 5):
             try:
                 self.neutron_client.delete_network(network['id'])
                 LOG.info('Network %s deleted.', name)
                 break
             except NetworkInUseClient:
                 time.sleep(1)
Ejemplo n.º 50
0
 def save_to_db(self, cfg):
     '''Save results to MongoDB database.'''
     LOG.info("Saving results to MongoDB database...")
     post_id = pns_mongo.\
         pns_add_test_result_to_mongod(cfg.vmtp_mongod_ip,
                                       cfg.vmtp_mongod_port,
                                       cfg.vmtp_db,
                                       cfg.vmtp_collection,
                                       self.results)
     if post_id is None:
         LOG.error("Failed to add result to DB")
Ejemplo n.º 51
0
 def disconnect(self, mode=M_BOTH):
     if not self.socket_tcp is None and mode & M_TCP:
         LOG.debug('closing TCP socket')
         self.socket_tcp.close()
         self.socket_tcp = None
     if not self.socket_udp is None and mode & M_UDP:
         LOG.debug('closing UDP socket')
         self.socket_udp.close()
         self.socket_udp = None
     self.connectionmode &= ~mode
     if self.connectionmode == M_NONE:
         self.running = False
Ejemplo n.º 52
0
def test_native_tp(nhosts, ifname, config):
    FlowPrinter.print_desc('Native Host to Host throughput')
    result_list = []
    server_host = nhosts[0]
    server = PerfInstance('Host-' + server_host.host + '-Server', config, server=True)

    if not server.setup_ssh(server_host):
        server.display('SSH failed, check IP or make sure public key is configured')
    else:
        server.display('SSH connected')
        server.create()
        # if inter-node-only requested we avoid running the client on the
        # same node as the server - but only if there is at least another
        # IP provided
        if config.inter_node_only and len(nhosts) > 1:
            # remove the first element of the list
            nhosts.pop(0)
        # IP address clients should connect to, check if the user
        # has passed a server listen interface name
        if ifname:
            # use the IP address configured on given interface
            server_ip = server.get_interface_ip(ifname)
            if not server_ip:
                LOG.error('Cannot get IP address for interface ' + ifname)
            else:
                server.display('Clients will use server IP address %s (%s)' %
                               (server_ip, ifname))
        else:
            # use same as ssh IP
            server_ip = server_host.host

        if server_ip:
            # start client side, 1 per host provided
            for client_host in nhosts:
                client = PerfInstance('Host-' + client_host.host + '-Client', config)
                if not client.setup_ssh(client_host):
                    client.display('SSH failed, check IP or make sure public key is configured')
                else:
                    client.buginf('SSH connected')
                    client.create()
                    if client_host == server_host:
                        desc = 'Native intra-host'
                    else:
                        desc = 'Native inter-host'
                    res = client.run_client(desc,
                                            server_ip,
                                            server,
                                            bandwidth=config.vm_bandwidth)
                    result_list.append(res)
                client.dispose()
    server.dispose()

    return result_list
Ejemplo n.º 53
0
    def create_net(self, network_name, subnet_name, cidr, dns_nameservers,
                   subnet_name_ipv6=None, cidr_ipv6=None, ipv6_mode=None,
                   enable_dhcp=True):

        for network in self.networks:
            if network['name'] == network_name:
                LOG.info('Found existing internal network: %s', network_name)
                return network

        body = {
            'network': {
                'name': network_name,
                'admin_state_up': True
            }
        }
        network = self.neutron_client.create_network(body)['network']
        body = {
            'subnet': {
                'name': subnet_name,
                'cidr': cidr,
                'network_id': network['id'],
                'enable_dhcp': True,
                'ip_version': 4,
                'dns_nameservers': dns_nameservers
            }
        }
        if not enable_dhcp:
            body['subnet']['enable_dhcp'] = False

        subnet = self.neutron_client.create_subnet(body)['subnet']
        # add subnet id to the network dict since it has just been added
        network['subnets'] = [subnet['id']]
        # If ipv6 is enabled than create and add ipv6 network
        if ipv6_mode:
            body = {
                'subnet': {
                    'name': subnet_name_ipv6,
                    'cidr': cidr_ipv6,
                    'network_id': network['id'],
                    'enable_dhcp': True,
                    'ip_version': 6,
                    'ipv6_ra_mode': ipv6_mode,
                    'ipv6_address_mode': ipv6_mode
                }
            }
            if not enable_dhcp:
                body['subnet']['enable_dhcp'] = False
            subnet = self.neutron_client.create_subnet(body)['subnet']
            # add the subnet id to the network dict
            network['subnets'].append(subnet['id'])
        LOG.info('Created internal network: %s.', network_name)
        return network
Ejemplo n.º 54
0
 def get_file_from_host(self, from_path, to_path):
     '''
     A wrapper api on top of paramiko scp module, to scp
     a remote file to the local.
     '''
     sshcon = self._get_client()
     scpcon = scp.SCPClient(sshcon.get_transport())
     try:
         scpcon.get(from_path, to_path)
     except scp.SCPException as exp:
         LOG.error("Receive failed: [%s]", exp)
         return 0
     return 1
Ejemplo n.º 55
0
 def put_file_to_host(self, from_path, to_path):
     '''
     A wrapper api on top of paramiko scp module, to scp
     a local file to the remote.
     '''
     sshcon = self._get_client()
     scpcon = scp.SCPClient(sshcon.get_transport())
     try:
         scpcon.put(from_path, remote_path=to_path)
     except scp.SCPException as exp:
         LOG.error("Send failed: [%s]", exp)
         return 0
     return 1
Ejemplo n.º 56
0
    def __exec(self, cmd, timeout=TIMEOUT):
        proc = BantorraProcess(cmd, self.queue)
        proc.start()
        proc.join(timeout)

        if proc.is_alive():
            proc.kill()
            time.sleep(3)
            L.debug("proc.terminate. %s" % proc.is_alive())

        if self.queue.empty():
            return None
        return self.queue.get()
Ejemplo n.º 57
0
 def create_port(self, net_id, sec_group_list, vnic_type):
     body = {
         "port": {
             "network_id": net_id,
             "security_groups": sec_group_list
         }
     }
     if vnic_type:
         body['port']['binding:vnic_type'] = vnic_type
     port = self.neutron_client.create_port(body)
     if self.config.debug:
         LOG.debug('Created port ' + port['port']['id'])
     return port['port']
Ejemplo n.º 58
0
    def _get_path(self, end_node):
        #LOG.debug("End node: %s" % end_node)
        path = []

        node = end_node
        while node.previous_node:
            for a in node.previous_actions:
                path.insert(0, a)

            node = node.previous_node

        LOG.debug("PATH: %s" % path)
        LOG.debug("Start node: %s" % node)
        return path
Ejemplo n.º 59
0
 def savetofile(self, filename):
     """
     Save the grf database from a file
     @type  filename: string
     @param filename: the filename to save to
     """
     if not self.canSaveLoad or not self.listchanged or not config.getboolean("serverstats", "savenewgrfs"):
         return
     import pickle
     try:
         f = open(filename, 'wb')
         pickle.dump(self.__database, f, 1)
         f.close()
     except IOError:
         LOG.error("error while saving newgrf cache file!")