Exemplo n.º 1
0
 def _custom_click_handler(self, wait_ajax):
     """Handler called from pytest_selenium"""
     if self.is_dimmed and not self._force:
         logger.error("Could not click %s because it was dimmed", repr(self))
         return
     sel.wait_for_element(self, timeout=5)
     return sel.click(self, no_custom_handler=True, wait_ajax=wait_ajax)
Exemplo n.º 2
0
def set_rails_loglevel(level, validate_against_worker='MiqUiWorker'):
    """Sets the logging level for level_rails and detects when change occured."""
    ui_worker_pid = '#{}'.format(get_worker_pid(validate_against_worker))

    logger.info('Setting log level_rails on appliance to {}'.format(level))
    yaml = store.current_appliance.get_yaml_config()
    if not str(yaml['log']['level_rails']).lower() == level.lower():
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

        yaml['log']['level_rails'] = level
        store.current_appliance.set_yaml_config(yaml)

        attempts = 0
        detected = False
        while (not detected and attempts < 60):
            logger.debug('Attempting to detect log level_rails change: {}'.format(attempts))
            for line in evm_tail:
                if ui_worker_pid in line:
                    if 'Log level for production.log has been changed to' in line:
                        # Detects a log level change but does not validate the log level
                        logger.info('Detected change to log level for production.log')
                        detected = True
                        break
            time.sleep(1)  # Allow more log lines to accumulate
            attempts += 1
        if not (attempts < 60):
            # Note the error in the logger but continue as the appliance could be slow at logging
            # that the log level changed
            logger.error('Could not detect log level_rails change.')
        evm_tail.close()
    else:
        logger.info('Log level_rails already set to {}'.format(level))
Exemplo n.º 3
0
def get_vm_config_modified_time(name, vm_name, datastore_url, provider_key):
    try:
        providers_data = cfme_data.get("management_systems", {})
        hosts = providers_data[provider_key]['hosts']
        host_creds = providers_data[provider_key].get('host_credentials', 'host_default')
        hostname = [host['name'] for host in hosts if name in host['name']]
        if not hostname:
            hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', name)
        connect_kwargs = {
            'username': credentials[host_creds]['username'],
            'password': credentials[host_creds]['password'],
            'hostname': hostname[0]
        }
        datastore_path = re.findall(r'([^ds:`/*].*)', str(datastore_url))
        ssh_client = SSHClient(**connect_kwargs)
        command = 'find ~/{}/{} -name {} | xargs  date -r'.format(
            datastore_path[0], str(vm_name), str(vm_name) + '.vmx')
        exit_status, output = ssh_client.run_command(command)
        ssh_client.close()
        modified_time = parser.parse(output.rstrip())
        modified_time = modified_time.astimezone(pytz.timezone(str(get_localzone())))
        return modified_time.replace(tzinfo=None)
    except Exception as e:
        logger.error(e)
        return False
Exemplo n.º 4
0
def map_vms_to_ids(provider_names_to_vm_names):
    """Takes a dictionary of providers with a list of vms and generates a list of vm_ids for each
    vm in the data structure.  We need this because more than one provider can lead to a """
    starttime = time.time()
    expected_num_ids = sum(len(x) for x in provider_names_to_vm_names.itervalues())
    expected_num_providers = len(provider_names_to_vm_names.keys())
    # Intended ouput here (List of vm ids):
    vm_ids = []
    # Intermediate data structure holding provider_id to list of vm names
    provider_ids_to_vm_names = {}

    # First get all providers details
    all_providers_details = []
    for pro_id in get_all_provider_ids():
        details = get_provider_details(pro_id)
        all_providers_details.append(details)

    providers_to_vms_copy = dict(provider_names_to_vm_names)
    # Next map provider_name to the provider_id
    for provider_name in provider_names_to_vm_names:
        for provider_detail in all_providers_details:
            if provider_name == provider_detail['name']:
                # Copy VMs from that provider to the Intermediate data structure
                provider_ids_to_vm_names[provider_detail['id']] = list(
                    provider_names_to_vm_names[provider_name])
                del providers_to_vms_copy[provider_name]
                break

    if len(providers_to_vms_copy) > 0:
        # Error, we did not find all providers, likely there is an issue with the scenario data
        # inside of cfme_performance.yml or cfme_performance.local.yml
        logger.error('Provider(s) + vm(s) not found in CFME Inventory: {}'.format(
            providers_to_vms_copy))

    provider_ids_to_vm_names_copy = copy.deepcopy(provider_ids_to_vm_names)
    # Now map each vm_name+ems_id to the actual vm_id and append to our list
    for vm_id in get_all_vm_ids():
        vm_details = get_vm_details(vm_id)
        for provider_id in provider_ids_to_vm_names:
            if ('ems_id' in vm_details and provider_id == vm_details['ems_id']):
                # Match provider_id, now check vm_name
                for vm_name in provider_ids_to_vm_names[provider_id]:
                    if vm_name == vm_details['name']:
                        logger.debug('Matching {} to vm id: {}'.format(vm_name, vm_id))
                        vm_ids.append(vm_id)
                        del (provider_ids_to_vm_names_copy[provider_id]
                            [provider_ids_to_vm_names_copy[provider_id].index(vm_name)])
                        break
        if (sum(len(x) for x in provider_ids_to_vm_names_copy.itervalues()) == 0):
            break

    # Now check for left over vms that we did not match:
    leftover_num_ids = sum(len(x) for x in provider_ids_to_vm_names_copy.itervalues())
    if leftover_num_ids > 0:
        logger.error('(Provider_id(s)) + VM(s) not found in CFME inventory: {}'.format(
            provider_ids_to_vm_names_copy))
    logger.debug('Mapped {}/{} vm ids/names over {}/{} provider ids/names in {}s'.format(
        len(vm_ids), expected_num_ids, len(provider_ids_to_vm_names.keys()), expected_num_providers,
        round(time.time() - starttime, 2)))
    return vm_ids
Exemplo n.º 5
0
def pytest_runtest_teardown(item, nextitem):
    name, location = get_test_idents(item)
    app = get_or_create_current_appliance()
    ip = app.address
    fire_art_test_hook(
        item, 'finish_test',
        slaveid=store.slaveid, ip=ip, wait_for_task=True)
    fire_art_test_hook(item, 'sanitize', words=words)
    jenkins_data = {
        'build_url': os.environ.get('BUILD_URL'),
        'build_number': os.environ.get('BUILD_NUMBER'),
        'git_commit': os.environ.get('GIT_COMMIT'),
        'job_name': os.environ.get('JOB_NAME')
    }
    try:
        caps = app.browser.widgetastic.selenium.capabilities
        param_dict = {
            'browserName': caps['browserName'],
            'browserPlatform': caps['platform'],
            'browserVersion': caps['version']
        }
    except Exception as e:
        logger.error(e)
        param_dict = None

    fire_art_test_hook(
        item, 'ostriz_send', env_params=param_dict,
        slaveid=store.slaveid, polarion_ids=extract_polarion_ids(item), jenkins=jenkins_data)
Exemplo n.º 6
0
 def migrate_cluster(self, clustername, username, src_host, new_host_list, user_info):
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return [False, "cluster not found"]
     prestatus = info['status']
     self.stop_cluster(clustername, username)
     for container in info['containers']:
         if not container['host'] == src_host:
             continue
         random.shuffle(new_host_list)
         for new_host in new_host_list:
             status,msg = self.migrate_container(clustername,username,container['containername'],new_host,user_info)
             if status:
                 break
             else:
                 logger.error(msg)
         else:
             if prestatus == 'running':
                 self.start_cluster(clustername, username, user_info)
             return [False, msg]
     logger.info("[Migrate] prestatus:%s for cluster(%s) user(%s)"%(prestatus, clustername, username))
     if prestatus == 'running':
         status, msg = self.start_cluster(clustername, username, user_info)
         if not status:
             return [False, msg]
     return [True, ""]
Exemplo n.º 7
0
    def login(self, user_name, password):
        login_url = ini_config.login_url
        # 访问登陆页
        self.browser.get(login_url)
        time.sleep(2)

        # 自动填写表单并提交,如果出现验证码需要手动填写
        while 1:
            try:
                user_name_obj = self.browser.find_element_by_id(
                    'TANGRAM__PSP_3__userName'
                )
                break
            except:
                logger.error(traceback.format_exc())
                time.sleep(1)
        user_name_obj.send_keys(user_name)
        ps_obj = self.browser.find_element_by_id('TANGRAM__PSP_3__password')
        ps_obj.send_keys(password)
        sub_obj = self.browser.find_element_by_id('TANGRAM__PSP_3__submit')
        sub_obj.click()

        # 如果页面的url没有改变,则继续等待
        while self.browser.current_url == login_url:
            time.sleep(1)
Exemplo n.º 8
0
def process_provider_vms(provider_key, matchers, delta, vms_to_delete):
    with lock:
        print '%s processing' % provider_key
    try:
        now = datetime.datetime.now()
        provider = provider_factory(provider_key)
        for vm_name in provider.list_vm():
            if not match(matchers, vm_name):
                continue

            try:
                vm_creation_time = provider.vm_creation_time(vm_name)
            except:
                logger.error('Failed to get creation/boot time for %s on %s' % (
                    vm_name, provider_key))
                continue

            if vm_creation_time + delta < now:
                vm_delta = now - vm_creation_time
                with lock:
                    vms_to_delete[provider_key].add((vm_name, vm_delta))
        with lock:
            print '%s finished' % provider_key
    except Exception as ex:
        with lock:
            print '%s failed' % provider_key
        logger.error('failed to process vms from provider %s', provider_key)
        logger.exception(ex)
Exemplo n.º 9
0
def save_cluster(user, beans, form):
    global G_vclustermgr
    clustername = form.get('clustername', None)
    if (clustername == None):
        return json.dumps({'success':'false', 'message':'clustername is null'})

    imagename = form.get("image", None)
    description = form.get("description", None)
    containername = form.get("containername", None)
    isforce = form.get("isforce", None)
    G_ulockmgr.acquire(user)
    try:
        if not isforce == "true":
            [status,message] = G_vclustermgr.image_check(user,imagename)
            if not status:
                return json.dumps({'success':'false','reason':'exists', 'message':message})

        user_info = post_to_user("/user/selfQuery/", {'token':form.get("token")})
        [status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,user_info["data"]["groupinfo"]["image"])
        if status:
            logger.info("image has been saved")
            return json.dumps({'success':'true', 'action':'save'})
        else:
            logger.debug(message)
            return json.dumps({'success':'false', 'reason':'exceed', 'message':message})
    except Exception as ex:
        logger.error(str(ex))
        return json.dumps({'success':'false', 'message': str(ex)})
    finally:
        G_ulockmgr.release(user)
Exemplo n.º 10
0
def process_provider_vms(provider_key, matchers, delta, vms_to_delete):
    with lock:
        print '{} processing'.format(provider_key)
    try:
        now = datetime.datetime.now()
        with lock:
            # Known conf issue :)
            provider = get_mgmt(provider_key)
        for vm_name in provider.list_vm():
            if not match(matchers, vm_name):
                continue

            try:
                vm_creation_time = provider.vm_creation_time(vm_name)
            except:
                logger.error('Failed to get creation/boot time for %s on %s' % (
                    vm_name, provider_key))
                continue

            if vm_creation_time + delta < now:
                vm_delta = now - vm_creation_time
                with lock:
                    vms_to_delete[provider_key].add((vm_name, vm_delta))
        with lock:
            print '{} finished'.format(provider_key)
    except Exception as ex:
        with lock:
            # Print out the error message too because logs in the job get deleted
            print '{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex))
        logger.error('failed to process vms from provider %s', provider_key)
        logger.exception(ex)
Exemplo n.º 11
0
 def add_msg(self,taskid,username,instanceid,status,token,errmsg):
     self.msgslock.acquire()
     try:
         self.taskmsgs.append(rpc_pb2.TaskMsg(taskid=str(taskid),username=username,instanceid=int(instanceid),instanceStatus=status,token=token,errmsg=errmsg))
     except Exception as err:
         logger.error(traceback.format_exc())
     self.msgslock.release()
Exemplo n.º 12
0
    def pytest_sessionfinish(self, exitstatus):
        # Now master/standalone needs to move all the reports to an appliance for the source report
        if store.parallelizer_role != 'master':
            manager().collect()

        # for slaves, everything is done at this point
        if store.parallelizer_role == 'slave':
            return

        # on master/standalone, merge all the collected reports and bring them back
        manager().merge()

        try:
            global ui_coverage_percent
            last_run = json.load(log_path.join('coverage', 'merged', '.last_run.json').open())
            ui_coverage_percent = last_run['result']['covered_percent']
            style = {'bold': True}
            if ui_coverage_percent > 40:
                style['green'] = True
            else:
                style['red'] = True
            store.write_line('UI Coverage Result: {}%'.format(ui_coverage_percent),
                **style)
        except Exception as ex:
            logger.error('Error printing coverage report to terminal')
            logger.exception(ex)
Exemplo n.º 13
0
def _setup_provider(provider_key, request=None):
    def skip(provider_key, previous_fail=False):
        if request:
            node = request.node
            name, location = get_test_idents(node)
            skip_data = {'type': 'provider', 'reason': provider_key}
            art_client.fire_hook('skip_test', test_location=location, test_name=name,
                skip_data=skip_data)
        if previous_fail:
            raise pytest.skip('Provider {} failed to set up previously in another test, '
                              'skipping test'.format(provider_key))
        else:
            raise pytest.skip('Provider {} failed to set up this time, '
                              'skipping test'.format(provider_key))
    # This function is dynamically "fixturized" to setup up a specific provider,
    # optionally skipping the provider setup if that provider has previously failed.
    if provider_key in _failed_providers:
        skip(provider_key, previous_fail=True)

    try:
        providers.setup_provider(provider_key)
    except Exception as ex:
        logger.error('Error setting up provider %s', provider_key)
        logger.exception(ex)
        _failed_providers.add(provider_key)
        skip(provider_key)
Exemplo n.º 14
0
def wait_for_miq_server_workers_started(evm_tail=None, poll_interval=5):
    """Waits for the CFME's workers to be started by tailing evm.log for:
    'INFO -- : MIQ(MiqServer#wait_for_started_workers) All workers have been started'
    Verified works with 5.5 and 5.6 appliances.
    """
    if evm_tail is None:
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

    attempts = 0
    detected = False
    max_attempts = 60
    while (not detected and attempts < max_attempts):
        logger.debug('Attempting to detect MIQ Server workers started: {}'.format(attempts))
        for line in evm_tail:
            if 'MiqServer#wait_for_started_workers' in line:
                if ('All workers have been started' in line):
                    logger.info('Detected MIQ Server is ready.')
                    detected = True
                    break
        time.sleep(poll_interval)  # Allow more log lines to accumulate
        attempts += 1
    if not (attempts < max_attempts):
        logger.error('Could not detect MIQ Server workers started in {}s.'.format(
            poll_interval * max_attempts))
    evm_tail.close()
Exemplo n.º 15
0
def main(args):
    try:
        readconfig(args['--config-file'])
    except Exception, e:
        logger.error("Error reading config file from location: {0}".format(args['--config-file']))
        logger.error(str(e))
        sys.exit(1)
Exemplo n.º 16
0
    def createImage(self,user,image,lxc,description="Not thing", imagenum=10):
        fspath = self.NFS_PREFIX + "/local/volume/" + lxc
        imgpath = self.imgpath + "private/" + user + "/"
        #tmppath = self.NFS_PREFIX + "/local/tmpimg/"
        #tmpimage = str(random.randint(0,10000000)) + ".tz"

        if not os.path.exists(imgpath+image) and os.path.exists(imgpath):
            cur_imagenum = 0
            for filename in os.listdir(imgpath):
                if os.path.isdir(imgpath+filename):
                    cur_imagenum += 1
            if cur_imagenum >= int(imagenum):
                return [False,"image number limit exceeded"]
        #sys_run("mkdir -p %s" % tmppath, True)
        sys_run("mkdir -p %s" % imgpath,True)
        try:
            sys_run("tar -cvf %s -C %s ." % (imgpath+image+".tz",self.dealpath(fspath)), True)
        except Exception as e:
            logger.error(e)
        #try:
            #sys_run("cp %s %s" % (tmppath+tmpimage, imgpath+image+".tz"), True)
            #sys_run("rsync -a --delete --exclude=lost+found/ --exclude=root/nfs/ --exclude=dev/ --exclude=mnt/ --exclude=tmp/ --exclude=media/ --exclude=proc/ --exclude=sys/ %s/ %s/" % (self.dealpath(fspath),imgpath+image),True)
        #except Exception as e:
        #    logger.error(e)
        #sys_run("rm -f %s" % tmppath+tmpimage, True)
        #sys_run("rm -f %s" % (imgpath+"."+image+"_docklet_share"),True)
        self.updateinfo(user,image,description)
        logger.info("image:%s from LXC:%s create success" % (image,lxc))
        return [True, "create image success"]
Exemplo n.º 17
0
def db_commit():
    try:
        db.session.commit()
    except Exception as err:
        db.session.rollback()
        logger.error(traceback.format_exc())
        raise
Exemplo n.º 18
0
 def detachFS(self, lxc, vgname="docklet-group"):
     rootfs = "/var/lib/lxc/%s/rootfs" % lxc
     Ret = sys_run("umount %s" % rootfs)
     if Ret.returncode != 0:
         logger.error("cannot umount rootfs:%s" % rootfs)
         return False
     return True
Exemplo n.º 19
0
 def copyImage(self,user,image,token,target):
     path = "/opt/docklet/global/images/private/"+user+"/"
     '''image_info_file = open(path+"."+image+".info", 'r')
     [createtime, isshare] = image_info_file.readlines()
     recordshare = isshare
     isshare = "unshared"
     image_info_file.close()
     image_info_file = open(path+"."+image+".info", 'w')
     image_info_file.writelines([createtime, isshare])
     image_info_file.close()'''
     try:
         sys_run('ssh root@%s "mkdir -p %s"' % (target,path))
         sys_run('scp %s%s.tz root@%s:%s' % (path,image,target,path))
         #sys_run('scp %s.%s.description root@%s:%s' % (path,image,target,path))
         #sys_run('scp %s.%s.info root@%s:%s' % (path,image,target,path))
         resimage = Image.query.filter_by(ownername=user,imagename=image).first()
         auth_key = env.getenv('AUTH_KEY')
         url = "http://" + target + ":" + master_port + "/image/copytarget/"
         data = {"token":token,"auth_key":auth_key,"user":user,"imagename":image,"description":resimage.description}
         result = requests.post(url, data=data).json()
         logger.info("Response from target master: " + str(result))
     except Exception as e:
         logger.error(e)
         '''image_info_file = open(path+"."+image+".info", 'w')
         image_info_file.writelines([createtime, recordshare])
         image_info_file.close()'''
         return {'success':'false', 'message':str(e)}
     '''image_info_file = open(path+"."+image+".info", 'w')
     image_info_file.writelines([createtime, recordshare])
     image_info_file.close()'''
     logger.info("copy image %s of %s to %s success" % (image,user,target))
     return {'success':'true', 'action':'copy image'}
Exemplo n.º 20
0
def delete_old_instances(texts, ec2provider, provider_key, date,
                         maxhours, excluded_instances, output):
    deletetime = maxhours * 3600
    try:
        matchers = [re.compile(text) for text in texts]
        with open(output, 'a+') as report:
            print("\n{}:\n-----------------------\n".format(provider_key))
            report.write("\n{}:\n-----------------------\n".format(provider_key))
            for vm in ec2provider.list_vm(include_terminated=True):
                creation = ec2provider.vm_creation_time(vm)
                message = "EC2:{provider}  {instance}  \t {time} \t {instance_type} " \
                          "\t {instance_status}\n".format(provider=provider_key, instance=vm,
                                                          time=(date - creation),
                                                          instance_type=ec2provider.vm_type(vm),
                                                          instance_status=ec2provider.vm_status(vm))
                print(message)
                report.write(message)
                if excluded_instances and vm in excluded_instances:
                    continue
                if not match(matchers, vm):
                    continue
                difference = (date - creation).total_seconds()
                if difference >= deletetime:
                    ec2provider.delete_vm(instance_id=vm)
                    print("EC2:{}  {} is successfully deleted".format(provider_key, vm))
    except Exception as e:
        logger.error(e)
Exemplo n.º 21
0
    def prepareImage(self,user,image,fspath):
        imagename = image['name']
        imagetype = image['type']
        imageowner = image['owner']
        #tmppath = self.NFS_PREFIX + "/local/tmpimg/"
        #tmpimage = str(random.randint(0,10000000)) + ".tz"
        if imagename == "base" and imagetype == "base":
            return
        if imagetype == "private":
            imgpath = self.imgpath + "private/" + user + "/"
        else:
            imgpath = self.imgpath + "public/" + imageowner + "/"
        #try:
        #    sys_run("cp %s %s" % (imgpath+imagename+".tz", tmppath+tmpimage))
        #except Exception as e:
        #    logger.error(e)
        try:
            sys_run("tar -C %s -xvf %s" % (self.dealpath(fspath),imgpath+imagename+".tz"), True)
            #sys_run("rsync -a --delete --exclude=lost+found/ --exclude=root/nfs/ --exclude=dev/ --exclude=mnt/ --exclude=tmp/ --exclude=media/ --exclude=proc/ --exclude=sys/ %s/ %s/" % (imgpath+imagename,self.dealpath(fspath)),True)
        except Exception as e:
            logger.error(e)
        #sys_run("rm -f %s" % tmppath+tmpimage)

        #self.sys_call("rsync -a --delete --exclude=nfs/ %s/ %s/" % (imgpath+image,self.dealpath(fspath)))
        #self.updatetime(imgpath,image)
        return
Exemplo n.º 22
0
    def stop_vnode(self, request, context):
        logger.info('stop vnode with config: ' + str(request))
        taskid = request.taskid
        username = request.username
        vnodeid = request.vnodeid
        brname = request.vnode.network.brname
        mount_list = request.vnode.mount
        lxcname = '%s-batch-%s-%s' % (username,taskid,str(vnodeid))

        logger.info("Stop the task with lxc:"+lxcname)
        container = lxc.Container(lxcname)
        if container.stop():
            logger.info("stop container %s success" % lxcname)
        else:
            logger.error("stop container %s failed" % lxcname)

        #umount oss
        self.umount_oss("/var/lib/lxc/%s/oss" % (lxcname), mount_list)

        logger.info("deleting container:%s" % lxcname)
        if self.imgmgr.deleteFS(lxcname):
            logger.info("delete container %s success" % lxcname)
        else:
            logger.error("delete container %s failed" % lxcname)

        #del ovs bridge
        if brname is not None:
            netcontrol.del_bridge(brname)

        #release gpu
        self.release_gpu_device(lxcname)

        return rpc_pb2.Reply(status=rpc_pb2.Reply.ACCEPTED,message="")
def get_datastores_per_host(provider_key):
    print('{} processing to get datastores per host'.format(provider_key))
    try:
        provider = get_mgmt(provider_key)

        vm_registered_files = get_registered_vm_files(provider_key)
        hosts = provider.list_host()
        host_datastore_url = {host: provider.list_host_datastore_url(host) for host in hosts}
        unregistered_files = []

        print("\n*********************UNREGISTERED FILES ON: {}**********************\n".format(
            provider_key))
        print('HOST_NAME\t\tFILE_PATH\t\tTEMPLATE_VM_ISO\t\tNUMBER_OF_FILES\n')
        for host in host_datastore_url:
            try:
                list_orphaned_files_per_host(host, host_datastore_url[host],
                                             provider_key, vm_registered_files,
                                             unregistered_files)
            except Exception as e:
                logger.error(e)
                continue

    except Exception as ex:
            # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
Exemplo n.º 24
0
    def add_gpu_device(self, lxcname, gpu_need):
        if gpu_need < 1:
            return [True, ""]
        self.gpu_lock.acquire()
        use_gpus = []
        for gpuid in self.gpu_status.keys():
            if self.gpu_status[gpuid] == "" and gpu_need > 0:
                use_gpus.append(gpuid)
                gpu_need -= 1
        if gpu_need > 0:
            self.gpu_lock.release()
            return [False, "No free GPUs"]
        for gpuid in use_gpus:
            self.gpu_status[gpuid] = lxcname
        try:
            gputools.add_device(lxcname, "/dev/nvidiactl")
            gputools.add_device(lxcname, "/dev/nvidia-uvm")
            for gpuid in use_gpus:
                gputools.add_device(lxcname,"/dev/nvidia"+str(gpuid))
                logger.info("Add gpu:"+str(gpuid) +" to lxc:"+str(lxcname))
        except Exception as e:
            logger.error(traceback.format_exc())
            for gpuid in use_gpus:
                self.gpu_status[gpuid] = ""
            self.gpu_lock.release()
            return [False, "Error occurs when adding gpu device."]

        self.gpu_lock.release()
        return [True, ""]
Exemplo n.º 25
0
def test_tables_fields(provider, test_item, soft_assert):

    navigate_to(test_item.obj, 'All')
    tb.select('List View')
    # NOTE: We must re-instantiate here table
    # in order to prevent StaleElementException or UsingSharedTables
    # TODO: Switch to widgetastic
    paged_tbl = PagedTable(table_locator="//div[@id='list_grid']//table")
    for row in paged_tbl.rows():
        cell = row[2]  # We're using indexing since it could be either 'name' or 'host'
        if cell:
            name = cell.text
        else:
            logger.error('Could not find NAME header on {}s list...'
                         .format(test_item.obj.__name__))
            continue
        for field in test_item.fields_to_verify:

            try:
                value = getattr(row, field)
            except AttributeError:
                soft_assert(False, '{}\'s list table: field  not exist: {}'
                            .format(test_item.obj.__name__, field))
                continue

            soft_assert(value, '{}\'s list table: {} row - has empty field: {}'
                        .format(test_item.obj.__name__, name, field))
Exemplo n.º 26
0
    def migrate_host(self, src_host, new_host_list, ulockmgr):
        [status, vcluster_list] = self.get_all_clusterinfo()
        if not status:
            return [False, vcluster_list]
        auth_key = env.getenv('AUTH_KEY')
        res = post_to_user("/master/user/groupinfo/", {'auth_key':auth_key})
        groups = json.loads(res['groups'])
        quotas = {}
        for group in groups:
            quotas[group['name']] = group['quotas']

        for vcluster in vcluster_list:
            if 'ownername' not in vcluster.keys():
                return [Flase, 'Ownername not in vcluster(%s).keys' % str(vcluster) ]
            try:
                username = vcluster['ownername']
                ulockmgr.acquire(username)
                clustername = vcluster['clustername']
                rc_info = post_to_user("/master/user/recoverinfo/", {'username':username,'auth_key':auth_key})
                groupname = rc_info['groupname']
                user_info = {"data":{"id":rc_info['uid'],"groupinfo":quotas[groupname]}}
                self.migrate_cluster(clustername, username, src_host, new_host_list, user_info)
            except Exception as ex:
                ulockmgr.release(username)
                logger.error(traceback.format_exc())
                return [False, str(ex)]
            ulockmgr.release(username)
        return [True, ""]
Exemplo n.º 27
0
def aufs_remove(basefs):
    try:
        if os.path.isdir(basefs):
            shutil.rmtree(basefs)
        elif os.path.isfile(basefs):
            os.remove(basefs)
    except Exception as e:
        logger.error(e)
Exemplo n.º 28
0
 def do_nav(self, _tries=0, *args, **kwargs):
     """Describes how the navigation should take place."""
     try:
         self.step(*args, **kwargs)
     except Exception as e:
         logger.error(e)
         raise
         self.go(_tries, *args, **kwargs)
Exemplo n.º 29
0
 def modifySettingFile(self, setting):
     if setting == None:
         logger.error("setting is None")
         return {'success':'false'}
     settingfile = open(fspath+"/global/sys/cloudsetting.json", 'w')
     settingfile.write(setting)
     settingfile.close()
     return {'success':'true'}
Exemplo n.º 30
0
 def get_clusterid(self, clustername, username):
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return -1
     if 'clusterid' in info:
         return int(info['clusterid'])
     logger.error ("internal error: cluster:%s info file has no clusterid " % clustername)
     return -1
Exemplo n.º 31
0
def set_rails_loglevel(level, validate_against_worker='MiqUiWorker'):
    """Sets the logging level for level_rails and detects when change occured."""
    ui_worker_pid = '#{}'.format(get_worker_pid(validate_against_worker))

    logger.info('Setting log level_rails on appliance to {}'.format(level))
    yaml = store.current_appliance.get_yaml_config()
    if not str(yaml['log']['level_rails']).lower() == level.lower():
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

        yaml['log']['level_rails'] = level
        store.current_appliance.set_yaml_config(yaml)

        attempts = 0
        detected = False
        while (not detected and attempts < 60):
            logger.debug(
                'Attempting to detect log level_rails change: {}'.format(
                    attempts))
            for line in evm_tail:
                if ui_worker_pid in line:
                    if 'Log level for production.log has been changed to' in line:
                        # Detects a log level change but does not validate the log level
                        logger.info(
                            'Detected change to log level for production.log')
                        detected = True
                        break
            time.sleep(1)  # Allow more log lines to accumulate
            attempts += 1
        if not (attempts < 60):
            # Note the error in the logger but continue as the appliance could be slow at logging
            # that the log level changed
            logger.error('Could not detect log level_rails change.')
        evm_tail.close()
    else:
        logger.info('Log level_rails already set to {}'.format(level))
Exemplo n.º 32
0
def aufs_merge(image, basefs):
    allfiles = os.listdir(image)
    if ".wh..wh..opq" in allfiles:
        #this is a new dir in image, remove the dir in basefs with the same name, and copy it to basefs
        shutil.rmtree(basefs)
        shutil.copytree(image, basefs, symlinks=True)
        aufs_clean(basefs)
        return
    for onefile in allfiles:
        try:
            if onefile[:7] == ".wh..wh":
                # aufs mark, but not white-out mark, ignore it
                continue
            elif onefile[:4] == ".wh.":
                # white-out mark, remove the file in basefs
                aufs_remove(basefs + "/" + onefile[4:])
            elif os.path.isdir(image + "/" + onefile):
                if os.path.isdir(basefs + "/" + onefile):
                    # this is a dir in image and basefs, merge it
                    aufs_merge(image + "/" + onefile, basefs + "/" + onefile)
                elif os.path.isfile(basefs + "/" + onefile):
                    # this is a dir in image but file in basefs, remove the file and copy the dir to basefs
                    os.remove(basefs + "/" + onefile)
                    shutil.copytree(image + "/" + onefile,
                                    basefs + "/" + onefile,
                                    symlinks=True)
                elif not os.path.exists(basefs + "/" + onefile):
                    # this is a dir in image but not exists in basefs, copy the dir to basefs
                    shutil.copytree(image + "/" + onefile,
                                    basefs + "/" + onefile,
                                    symlinks=True)
                else:
                    # error
                    logger.error(basefs + "/" + onefile + " cause error")
            elif os.path.isfile(image + "/" + onefile):
                if os.path.isdir(basefs + "/" + onefile):
                    # this is a file in image but dir in basefs, remove the dir and copy the file to basefs
                    shutil.rmtree(basefs + "/" + onefile)
                    shutil.copy2(image + "/" + onefile,
                                 basefs + "/" + onefile,
                                 follow_symlinks=False)
                elif os.path.isfile(basefs + "/" + onefile):
                    # this is a file in image and basefs, remove the file and copy the file to basefs
                    os.remove(basefs + "/" + onefile)
                    shutil.copy2(image + "/" + onefile,
                                 basefs + "/" + onefile,
                                 follow_symlinks=False)
                elif not os.path.isdir(basefs + "/" + onefile):
                    # this is a file in image but not exists in basefs, copy the file to basefs
                    shutil.copy2(image + "/" + onefile,
                                 basefs + "/" + onefile,
                                 follow_symlinks=False)
                else:
                    # error
                    logger.error(basefs + "/" + onefile + " cause error")
        except Exception as e:
            logger.error(e)
Exemplo n.º 33
0
def scaleout_cluster(user, beans, form):
    global G_vclustermgr
    global G_ulockmgr
    clustername = form.get('clustername', None)
    logger.info ("scaleout: %s" % form)
    if (clustername == None):
        return json.dumps({'success':'false', 'message':'clustername is null'})
    G_ulockmgr.acquire(user)
    try:
        logger.info("handle request : scale out %s" % clustername)
        image = {}
        image['name'] = form.get("imagename", None)
        image['type'] = form.get("imagetype", None)
        image['owner'] = form.get("imageowner", None)
        user_info = post_to_user("/user/selfQuery/", {'token':form.get("token")})
        user_info = json.dumps(user_info)
        setting = {
                'cpu': form.get('cpuSetting'),
                'memory': form.get('memorySetting'),
                'disk': form.get('diskSetting')
                }
        res = post_to_user("/user/usageInc/", {'token':form.get('token'), 'setting':json.dumps(setting)})
        status = res.get('success')
        result = res.get('result')
        if not status:
            return json.dumps({'success':'false', 'action':'scale out', 'message': result})
        [status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, user_info, setting)
        if status:
            return json.dumps({'success':'true', 'action':'scale out', 'message':result})
        else:
            post_to_user("/user/usageRecover/", {'token':form.get('token'), 'setting':json.dumps(setting)})
            return json.dumps({'success':'false', 'action':'scale out', 'message':result})
    except Exception as ex:
        logger.error(str(ex))
        return json.dumps({'success':'false', 'message': str(ex)})
    finally:
        G_ulockmgr.release(user)
Exemplo n.º 34
0
def logout(request):
    """
    登出
    :param request:
    :return:
    """
    auth_token = request.GET.get("auth_token")
    client_ip = request.GET.get("client_ip")

    if not auth_token or not client_ip:  # auth_token参数者无效
        logger.warn('缺少登出参数')
        return JsonResponse({'msg': "缺少参数"}, status=403)

    user = UserTokenCache.validate(client_ip, auth_token)
    # referer = request.META.get("HTTP_REFERER")
    # netloc = urlparse.urlparse(referer).netloc if referer else None
    if user:  # 如果缓存中有合法的user,则依次访问已注册站点sso_logout_callback接口,携带用户名和token
        UserTokenCache.delete(auth_token)
        opener = urllib2.build_opener()
        for site in user['sites']:
            # if site != netloc:
            if site != client_ip:  # TODO 这里实际并不一定有效,因为site可能并不是ip
                try:
                    uams = Site.objects.get(host=site)
                    sso_logout_callback = (uams.logout if uams.logout else ("http://"+site + "/uams/logoutnotify")) \
                                          + "?auth_token=" + auth_token
                    logger.info("send logout request to site:" +
                                sso_logout_callback)
                    opener.open(sso_logout_callback, None)
                except Exception as e:
                    logger.error('logout failed: ' + str(e))
                    pass
        history(client_ip, user['username'], '登出')
        return JsonResponse({'msg': "logouted."})
    else:  # 未登录或者已过期,则显示登录界面
        logger.error("token不存在")
        return JsonResponse({'msg': "token有误"}, status=403)
 def on_get(self, req, resp):
     requestObj = req.params
     responseObj = {"responseId": 111, "message": "", "data": {}}
     # validate schema
     afterValidation = self.validateSchema(requestObj)
     if not afterValidation[0]:
         responseObj["responseId"] = 110
         responseObj["message"] = afterValidation[1]
     try:
         if not dbu.checkIfUserIsSuperuser(
                 req.params["kartoon-fapi-incoming"]["_id"]):
             # check if user is superuser
             responseObj["responseId"] = 109
             responseObj["message"] = "Unauthorized access"
         elif dbu.countDocumentsByUsername(requestObj["username"]) != 1:
             # check if user exist
             responseObj["responseId"] = 107
             responseObj["message"] = "User does not exist"
         else:
             # 01. get userId
             userId = dbu.getIdByUsername(requestObj["username"])
             # 02. get user accessible pulses
             pulseIds = self.getLimitedPulseIdsForUser(userId)
             # 03. get activePulses from pulseIds
             pulses = dbpu.getLesserPulsesByIds(pulseIds)
             # 04. clean up mongo objects
             pulses = self.convertMongoDBObjectsToObjects(pulses)
             # 05. attach pulses in response
             responseObj["data"]["pulses"] = pulses
             # 06. set responseId to success
             responseObj["responseId"] = 211
     except Exception as ex:
         log.error((thisFilename, inspect.currentframe().f_code.co_name),
                   exc_info=True)
         responseObj["message"] = str(ex)
     resp.media = responseObj
Exemplo n.º 36
0
    def login(self, user_name, password):
        login_url = ini_config.login_url
        # 访问登陆页
        self.browser.get(login_url)
        time.sleep(2)

        # 自动填写表单并提交,如果出现验证码需要手动填写
        while 1:
            try:
                user_name_obj = self.browser.find_element_by_id(
                    'TANGRAM__PSP_3__userName')
                break
            except:
                logger.error(traceback.format_exc())
                time.sleep(1)
        user_name_obj.send_keys(user_name)
        ps_obj = self.browser.find_element_by_id('TANGRAM__PSP_3__password')
        ps_obj.send_keys(password)
        sub_obj = self.browser.find_element_by_id('TANGRAM__PSP_3__submit')
        sub_obj.click()

        # 如果页面的url没有改变,则继续等待
        while self.browser.current_url == login_url:
            time.sleep(1)
Exemplo n.º 37
0
 def flush_cluster(self,username,clustername,containername):
     begintime = datetime.datetime.now()
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return [False, "cluster not found"]
     containers = info['containers']
     imagetmp = username + "_tmp_docklet"
     for container in containers:
         if container['containername'] == containername:
             logger.info("container: %s found" % containername)
             worker = self.nodemgr.ip_to_rpc(container['host'])
             worker.create_image(username,imagetmp,containername)
             fimage = container['image']
             logger.info("image: %s created" % imagetmp)
             break
     else:
         logger.error("container: %s not found" % containername)
     for container in containers:
         if container['containername'] != containername:
             logger.info("container: %s now flush" % container['containername'])
             worker = self.nodemgr.ip_to_rpc(container['host'])
             #t = threading.Thread(target=onework.flush_container,args=(username,imagetmp,container['containername']))
             #threads.append(t)
             worker.flush_container(username,imagetmp,container['containername'])
             container['lastsave'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
             container['image'] = fimage
             logger.info("thread for container: %s has been prepared" % container['containername'])
     clusterpath = self.fspath + "/global/users/" + username + "/clusters/" + clustername
     infofile = open(clusterpath,'w')
     infofile.write(json.dumps(info))
     infofile.close()
     self.imgmgr.removeImage(username,imagetmp)
     endtime = datetime.datetime.now()
     dtime = (endtime - begintime).seconds
     logger.info("flush spend %s seconds" % dtime)
     logger.info("flush success")
Exemplo n.º 38
0
def test_add_user_check_table(driver):
    '''
    Test that submitting a new user adds a new entry to the users table.
    '''
    res = True
    msg = ''

    newentry = {
        'username': '******', 'email': '*****@*****.**',
        'dob': '01.01.2001', 'addr': '1 Road'
    }

    fillin_field(driver, 'inputEmail', newentry['username'])
    fillin_field(driver, 'inputUsername', newentry['email'])
    fillin_field(driver, 'inputBirthdate', newentry['dob'])
    fillin_field(driver, 'inputAddress', newentry['addr'])

    click_button(driver, 'addButton')
    # driver.find_element_by_id('addButton').click()

    table = driver.find_element_by_id('tableBody')
    table_rows = table.find_elements_by_tag_name('tr')

    try:
        WebDriverWait(driver, 4).until(EC.text_to_be_present_in_element(
            (By.ID, 'tableBody'), newentry['username']
        ))

    except TimeoutException as ex:
        res = False
        msg = ex.msg
        log.error('TimeoutException: {}'.format(ex))

    assert res == True, msg
    for e in newentry.values():
        assert e in table.text, 'Entry {} is missing from table!'.format(e)
Exemplo n.º 39
0
 def disable_domain(self, ip, domain):
     """
     设置此代理下的不可用域名
     :param ip:
     :param domain:
     :return:
     """
     # 判断数据库中是否存在此代理
     if self.proxies.count_documents({'_id': ip}):
         # 判断此代理是否存在该不可用域名
         if self.proxies.count_documents({
                 '_id': ip,
                 'disable_domains': domain
         }):
             logger.error("此代理ip已存在该不可用域名:{},{}".format(ip, domain))
         else:
             # 更新此代理ip的不可用域名列表
             self.proxies.update_one({'_id': ip},
                                     {'$push': {
                                         'disable_domains': domain
                                     }})
             logger.info("代理IP不可用域名更新成功:{},{}".format(ip, domain))
     else:
         logger.error("不存在此代理ip:{}".format(ip))
Exemplo n.º 40
0
def get_config_from_yaml(config_file):
    """
    从config.yaml文件中,解析配置项
    :param config_file:
    :return:
    """
    dir_list = []
    sensitive_list = []
    try:
        with open(config_file, "r") as file:
            config = yaml.load(file)

            for k, v in config.items():
                if k == "dir":
                    dir_list = v
                elif k == "sensitive":
                    sensitive_list = v
                else:
                    logger.warn("error tag in config.yaml")

    except yaml.YAMLError as err:
        logger.error("Error in configuration file:", err)

    return dir_list, sensitive_list
Exemplo n.º 41
0
 def test_Living_change3(self):
     '''在直播过程中更改推流类型'''
     try:
         logger.info("在直播过程中更改推流类型")
         self.login()
         self.getin_live()
         self.set_living()
         home = HomePage(self.driver)
         home.click_system_setup_blck()
         mange = Manage(self.driver)
         mange.interaction_constraints2() # 在直播过程中回到主页
         self.getin_live()
         self.disconnet()
         sleep(2)
         self.select_live_type("子码流", 0)
         sleep(1)
         self.connet()
         sleep(2)
         self.assertEqual(self.check_live_state(3),"已开启")
         self.disconnet(4)
         sleep(2)
         self.select_live_type("主码流", 1)
         sleep(1)
         self.connet(4)
         sleep(2)
         self.assertEqual(self.check_live_state(3),"已开启")
     except Exception as msg:
         logger.error(u"异常原因:%s"%msg)
         self.driver.get_screenshot_as_file(os.path.join(readconfig.screen_path,'test_Living_change2.png'))
         raise Exception("false")
     finally:
         self.driver.switch_to.default_content()
         home.click_system_setup_blck()
         home.click_record()
         recordpage = RecordPage(self.driver)
         recordpage.stop_live()
Exemplo n.º 42
0
def check_basic_params(callback, api_key, netloc):
    """
    检查基本参数
    :param callback
    :param api_key
    :param netloc
    :return:callback, api_key, resp403
    """

    if not (callback and api_key):
        logger.error('callback api_key为空')
        return HttpResponse("参数缺失", status=403), None
    # 验证host来源
    try:
        # TODO 开发时临时处理
        site = Site.objects.get(host=netloc)
        # site = Site.objects.filter(host=netloc, apikey__apikey=api_key).first()
        if site:
            return None, site
        else:
            raise Site.DoesNotExist
    except Site.DoesNotExist:
        logger.error("'" + netloc + "'不是合法站点")
        return HttpResponse("'" + netloc + "'不是合法站点", status=403), None
Exemplo n.º 43
0
def get_vm_config_modified_time(name, vm_name, datastore_url, provider_key):
    try:
        providers_data = cfme_data.get("management_systems", {})
        hosts = providers_data[provider_key]['hosts']
        hostname = [host['name'] for host in hosts if name in host['name']]
        if not hostname:
            hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', name)
        connect_kwargs = {
            'username': credentials['host_default']['username'],
            'password': credentials['host_default']['password'],
            'hostname': hostname[0]
        }
        datastore_path = re.findall(r'([^ds:`/*].*)', str(datastore_url))
        ssh_client = SSHClient(**connect_kwargs)
        command = 'find ~/{}/{} -name {} | xargs  date -r'.format(
            datastore_path[0], str(vm_name), str(vm_name) + '.vmx')
        exit_status, output = ssh_client.run_command(command)
        ssh_client.close()
        modified_time = parser.parse(output.rstrip())
        modified_time = modified_time.astimezone(pytz.timezone(str(get_localzone())))
        return modified_time.replace(tzinfo=None)
    except Exception as e:
        logger.error(e)
        return False
Exemplo n.º 44
0
 def unshareImage(self, user, imagename):
     public_imgpath = self.imgpath + "public/" + user + "/"
     imgpath = self.imgpath + "private/" + user + "/"
     '''if os.path.isfile(imgpath + image + ".tz"):
         image_info_file = open(imgpath+"."+image+".info", 'r')
         [createtime, isshare] = image_info_file.readlines()
         isshare = "unshare"
         image_info_file.close()
         image_info_file = open(imgpath+"."+image+".info", 'w')
         image_info_file.writelines([createtime, isshare])
         image_info_file.close()'''
     try:
         #sys_run("rm -rf %s/" % public_imgpath+image, True)
         image = Image.query.filter_by(imagename=imagename,
                                       ownername=user).first()
         image.hasPublic = False
         if image.hasPrivate == False:
             db.session.delete(image)
         db.session.commit()
         sys_run("rm -f %s" % public_imgpath + imagename + ".tz", True)
         #sys_run("rm -f %s" % public_imgpath+"."+image+".info", True)
         #sys_run("rm -f %s" % public_imgpath+"."+image+".description", True)
     except Exception as e:
         logger.error(e)
Exemplo n.º 45
0
def delete_cluster(user, beans, form):
    global G_vclustermgr
    global G_ulockmgr
    clustername = form.get('clustername', None)
    if (clustername == None):
        return json.dumps({'success':'false', 'message':'clustername is null'})
    G_ulockmgr.acquire(user)
    try:
        logger.info ("handle request : delete cluster %s" % clustername)
        user_info = post_to_user("/user/selfQuery/" , {'token':form.get("token")})
        user_info = json.dumps(user_info)
        [status, usage_info] = G_vclustermgr.get_clustersetting(clustername, user, "all", True)
        if status:
            post_to_user("/user/usageRelease/", {'token':form.get('token'), 'cpu':usage_info['cpu'], 'memory':usage_info['memory'],'disk':usage_info['disk']})
        [status, result] = G_vclustermgr.delete_cluster(clustername, user, user_info)
        if status:
            return json.dumps({'success':'true', 'action':'delete cluster', 'message':result})
        else:
            return json.dumps({'success':'false', 'action':'delete cluster', 'message':result})
    except Exception as ex:
        logger.error(str(ex))
        return json.dumps({'success':'false', 'message': str(ex)})
    finally:
        G_ulockmgr.release(user)
Exemplo n.º 46
0
 def shareImage(self, user, imagename):
     imgpath = self.imgpath + "private/" + user + "/"
     share_imgpath = self.imgpath + "public/" + user + "/"
     '''image_info_file = open(imgpath+"."+image+".info", 'r')
     [createtime, isshare] = image_info_file.readlines()
     isshare = "shared"
     image_info_file.close()
     image_info_file = open(imgpath+"."+image+".info", 'w')
     image_info_file.writelines([createtime, isshare])
     image_info_file.close()'''
     try:
         image = Image.query.filter_by(imagename=imagename,
                                       ownername=user).first()
         if image.hasPublic == True:
             return
         image.hasPublic = True
         db.session.commit()
         sys_run("mkdir -p %s" % share_imgpath, True)
         sys_run(
             "cp %s %s" % (imgpath + imagename + ".tz",
                           share_imgpath + imagename + ".tz"), True)
         #sys_run("rsync -a --delete %s/ %s/" % (imgpath+image,share_imgpath+image), True)
     except Exception as e:
         logger.error(e)
Exemplo n.º 47
0
    def find(self, conditions={}, count=0):
        """
        3.1 实现查询功能: 根据条件进行查询, 可以指定查询数量, 先分数降序, 速度升序排, 保证优质的代理IP在上面.
        :param conditions: 查询条件字典
        :param count: 限制最多取出多少个代理IP
        :return: 返回满足要求代理IP(Proxy对象)列表
        """
        try:
            cursor = self.proxies.find(conditions, limit=count).sort(
                [('score', pymongo.DESCENDING), ('speed', pymongo.ASCENDING)]
            )
        except Exception as e:
            logger.error(e)


        # 准备列表  用于存储查询处理代理IP
        proxy_list = []
        # 遍历cursor
        for item in cursor:
            item.pop('_id')
            proxy = Proxy(**item)
            proxy_list.append(proxy)

        return proxy_list
Exemplo n.º 48
0
def get_appliance(provider):
    '''Fixture to provision appliance to the provider being tested if necessary'''
    global appliance_list, main_provider
    appliance_vm_prefix = "test_vm_analysis"

    if provider.key not in appliance_list:
        try:
            # see if the current appliance is on the needed provider
            ip_addr = urlparse(store.base_url).hostname
            appl_name = provider.mgmt.get_vm_name_from_ip(ip_addr)
            logger.info("re-using already provisioned appliance on {}...".format(provider.key))
            main_provider = provider.key
            appliance = Appliance(provider.key, appl_name)
            appliance.configure_fleecing()
            appliance_list[provider.key] = appliance
        except Exception as e:
            logger.error("Exception: %s" % str(e))
            # provision appliance and configure
            ver_to_prov = str(version.current_version())
            logger.info("provisioning {} appliance on {}...".format(ver_to_prov, provider.key))
            appliance = None
            try:
                appliance = provision_appliance(
                    vm_name_prefix=appliance_vm_prefix,
                    version=ver_to_prov,
                    provider_name=provider.key)
                logger.info("appliance IP address: " + str(appliance.address))
                appliance.configure(setup_fleece=True)
            except Exception as e:
                logger.error("Exception: %s" % str(e))
                if appliance is not None:
                    appliance.destroy()
                raise CFMEException(
                    'Appliance encountered error during initial setup: {}'.format(str(e)))
            appliance_list[provider.key] = appliance
    return appliance_list[provider.key]
Exemplo n.º 49
0
 def blocks(self):
     try:
         bug = self.data
         if bug is None:
             return False
         result = False
         if bug.is_opened:
             result = True
         if bug.upstream_bug:
             if not version.appliance_is_downstream(
             ) and bug.can_test_on_upstream:
                 result = False
         if result is False and version.appliance_is_downstream():
             if bug.fixed_in is not None:
                 return version.current_version() < bug.fixed_in
         return result
     except xmlrpclib.Fault as e:
         code = e.faultCode
         s = e.faultString.strip().split("\n")[0]
         logger.error("Bugzilla thrown a fault: {}/".format(code, s))
         logger.warning("Ignoring and taking the bug as non-blocking")
         store.terminalreporter.write(
             "Bugzila made a booboo: {}/{}\n".format(code, s), bold=True)
         return False
Exemplo n.º 50
0
 def __init__(self, addr_cidr, etcdclient, mode, masterip):
     self.etcd = etcdclient
     self.masterip = masterip
     self.user_locks = threading.Lock()
     if mode == 'new':
         logger.info("init network manager with %s" % addr_cidr)
         self.center = IntervalPool(addr_cidr=addr_cidr)
         # allocate a pool for system IPs, use CIDR=27, has 32 IPs
         syscidr = 27
         [status, sysaddr] = self.center.allocate(syscidr)
         if status == False:
             logger.error ("allocate system ips in __init__ failed")
             sys.exit(1)
         # maybe for system, the last IP address of CIDR is available
         # But, EnumPool drop the last IP address in its pool -- it is not important
         self.system = EnumPool(sysaddr+"/"+str(syscidr))
         self.usrgws = {}
         self.users = {}
         #self.vlanids = {}
         #self.init_vlanids(4095, 60)
         #self.init_shared_vlanids()
         self.dump_center()
         self.dump_system()
     elif mode == 'recovery':
         logger.info("init network manager from etcd")
         self.center = None
         self.system = None
         self.usrgws = {}
         self.users = {}
         #self.vlanids = {}
         self.load_center()
         self.load_system()
         #self.load_vlanids()
         #self.load_shared_vlanids()
     else:
         logger.error("mode: %s not supported" % mode)
Exemplo n.º 51
0
    def createImage(self,
                    user,
                    image,
                    lxc,
                    description="Not thing",
                    imagenum=10):
        fspath = self.NFS_PREFIX + "/local/volume/" + lxc
        imgpath = self.imgpath + "private/" + user + "/"
        #tmppath = self.NFS_PREFIX + "/local/tmpimg/"
        #tmpimage = str(random.randint(0,10000000)) + ".tz"

        if not os.path.exists(imgpath + image) and os.path.exists(imgpath):
            cur_imagenum = 0
            for filename in os.listdir(imgpath):
                if os.path.isdir(imgpath + filename):
                    cur_imagenum += 1
            if cur_imagenum >= int(imagenum):
                return [False, "image number limit exceeded"]
        #sys_run("mkdir -p %s" % tmppath, True)
        sys_run("mkdir -p %s" % imgpath, True)
        try:
            sys_run(
                "tar -cvf %s -C %s ." %
                (imgpath + image + ".tz", self.dealpath(fspath)), True)
        except Exception as e:
            logger.error(e)
        #try:
        #sys_run("cp %s %s" % (tmppath+tmpimage, imgpath+image+".tz"), True)
        #sys_run("rsync -a --delete --exclude=lost+found/ --exclude=root/nfs/ --exclude=dev/ --exclude=mnt/ --exclude=tmp/ --exclude=media/ --exclude=proc/ --exclude=sys/ %s/ %s/" % (self.dealpath(fspath),imgpath+image),True)
        #except Exception as e:
        #    logger.error(e)
        #sys_run("rm -f %s" % tmppath+tmpimage, True)
        #sys_run("rm -f %s" % (imgpath+"."+image+"_docklet_share"),True)
        self.updateinfo(user, image, description)
        logger.info("image:%s from LXC:%s create success" % (image, lxc))
        return [True, "create image success"]
Exemplo n.º 52
0
def process_provider_vms(provider_key,
                         provider_type,
                         matchers,
                         delta,
                         vms_to_delete,
                         list_vms=None):
    with lock:
        print('{} processing'.format(provider_key))
    try:
        now = datetime.datetime.now()
        with lock:
            # Known conf issue :)
            provider = get_mgmt(provider_key)
        vm_list = provider.list_vm()
        if list_vms:
            list_provider_vms(provider_key)

        for vm_name in vm_list:
            try:
                if not match(matchers, vm_name):
                    continue

                if provider_type == 'virtualcenter' and provider.vm_status(
                        vm_name) == 'poweredOff':
                    hostname = provider.get_vm_host_name(vm_name)
                    vm_config_datastore = provider.get_vm_config_files_path(
                        vm_name)
                    datastore_url = provider.get_vm_datastore_path(
                        vm_name, vm_config_datastore)
                    vm_creation_time = get_vm_config_modified_time(
                        hostname, vm_name, datastore_url, provider_key)
                else:
                    vm_creation_time = provider.vm_creation_time(vm_name)

                if vm_creation_time + delta < now:
                    vm_delta = now - vm_creation_time
                    with lock:
                        vms_to_delete[provider_key].add((vm_name, vm_delta))
            except Exception as e:
                logger.error(e)
                logger.error(
                    'Failed to get creation/boot time for {} on {}'.format(
                        vm_name, provider_key))
                continue

        with lock:
            print('{} finished'.format(provider_key))
    except Exception as ex:
        with lock:
            # Print out the error message too because logs in the job get deleted
            print('{} failed ({}: {})'.format(provider_key,
                                              type(ex).__name__, str(ex)))
        logger.error(
            'failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
Exemplo n.º 53
0
def get_vmdb_yaml_config(ssh_client):
    ver = get_version()
    if ver == '56' or ver == '57' or ver == '58' or ver == '59' or ver == '510':
        base_data = ssh_client.run_rails_command(
            'puts\(Settings.to_hash.deep_stringify_keys.to_yaml\)', ignore_stderr=True)
        if base_data.rc:
            logger.error("Config couldn't be found")
            logger.error(base_data.output)
            raise Exception('Error obtaining vmdb config')
        yaml_data = base_data.output[:base_data.output.find('DEPRE')]
    elif ver == '55':
        base_data = ssh_client.run_command('cat "/var/www/miq/vmdb/config/vmdb.yml.db"')
        if base_data.rc:
            logger.error("Config couldn't be found")
            logger.error(base_data.output)
            raise Exception('Error obtaining vmdb config')
        yaml_data = base_data.output
    return yaml.load(yaml_data)
Exemplo n.º 54
0
    def list_images(self, user):
        images = {}
        images["private"] = []
        images["public"] = {}
        imgpath = self.imgpath + "private/" + user + "/"
        try:
            Ret = sys_run("ls %s" % imgpath, True)
            private_images = str(Ret.stdout, "utf-8").split()
            for image in private_images:
                if not image[-3:] == '.tz':
                    continue
                imagename = image[:-3]
                fimage = {}
                fimage["name"] = imagename
                fimage["isshared"] = self.isshared(user, imagename)
                [time,
                 description] = self.get_image_info(user, imagename, "private")
                fimage["time"] = time
                fimage["description"] = description
                images["private"].append(fimage)
        except Exception as e:
            logger.error(e)

        imgpath = self.imgpath + "public" + "/"
        try:
            Ret = sys_run("ls %s" % imgpath, True)
            public_users = str(Ret.stdout, "utf-8").split()
            for public_user in public_users:
                imgpath = self.imgpath + "public/" + public_user + "/"
                try:
                    Ret = sys_run("ls %s" % imgpath, True)
                    public_images = str(Ret.stdout, "utf-8").split()
                    if len(public_images) == 0:
                        continue
                    images["public"][public_user] = []
                    for image in public_images:
                        if not image[-3:] == '.tz':
                            continue
                        imagename = image[:-3]
                        fimage = {}
                        fimage["name"] = imagename
                        [time, description
                         ] = self.get_image_info(public_user, imagename,
                                                 "public")
                        fimage["time"] = time
                        fimage["description"] = description
                        images["public"][public_user].append(fimage)
                except Exception as e:
                    logger.error(e)
        except Exception as e:
            logger.error(e)

        return images
 def wrapper(outside_self,request,*args, **kwargs):
     try:
         self.start = time.time()
         self.__request_validate(request)
         response=self.__run(func,outside_self,request,*args, **kwargs)
         self.end=time.time()
         return response
     except PubErrorCustom as e:
         logger.error('[%s : %s  ] : [%s]'%(outside_self.__class__.__name__, getattr(func, '__name__'),e.msg))
         return HttpResponse(success=False, msg=e.msg, data=None)
     except InnerErrorCustom as e:
         logger.error('[%s : %s  ] : [%s]'%(outside_self.__class__.__name__, getattr(func, '__name__'),e.msg))
         return HttpResponse(success=False, msg=e.msg, rescode=e.code, data=None)
     except Exception as e:
         logger.error('[%s : %s  ] : [%s]'%(outside_self.__class__.__name__, getattr(func, '__name__'),str(e)))
         return HttpResponse(success=False, msg=str(e), data=None)
Exemplo n.º 56
0
    def prepareFS(self, user, image, lxc, size="1000", vgname="docklet-group"):
        rootfs = "/var/lib/lxc/%s/rootfs" % lxc
        layer = self.NFS_PREFIX + "/local/volume/" + lxc
        #check mountpoint
        Ret = sys_run("mountpoint %s" % rootfs)
        if Ret.returncode == 0:
            logger.info("%s not clean" % rootfs)
            sys_run("umount -l %s" % rootfs)
        Ret = sys_run("mountpoint %s" % layer)
        if Ret.returncode == 0:
            logger.info("%s not clean" % layer)
            sys_run("umount -l %s" % layer)

        try:
            sys_run("rm -rf %s %s" % (rootfs, layer))
            sys_run("mkdir -p %s %s" % (rootfs, layer))
        except Exception as e:
            logger.error(e)

        #prepare volume
        if check_volume(vgname, lxc):
            logger.info("volume %s already exists, delete it")
            delete_volume(vgname, lxc)
        if not new_volume(vgname, lxc, size):
            logger.error("volume %s create failed" % lxc)
            return False

        try:
            sys_run("mkfs.ext4 /dev/%s/%s" % (vgname, lxc), True)
            sys_run("mount /dev/%s/%s %s" % (vgname, lxc, layer), True)
            #self.sys_call("mkdir -p %s/overlay %s/work" % (layer,layer))
            #self.sys_call("mount -t overlay overlay -olowerdir=%s/local/basefs,upperdir=%s/overlay,workdir=%s/work %s" % (self.NFS_PREFIX,layer,layer,rootfs))
            #self.prepareImage(user,image,layer+"/overlay")
            self.prepareImage(user, image, layer)
            logger.info("image has been prepared")
            sys_run(
                "mount -t aufs -o br=%s=rw:%s/local/packagefs=ro+wh:%s/local/basefs=ro+wh -o udba=reval none %s/"
                % (layer, self.NFS_PREFIX, self.NFS_PREFIX, rootfs), True)
            sys_run("mkdir -m 777 -p %s/local/temp/%s" %
                    (self.NFS_PREFIX, lxc))

        except Exception as e:
            logger.error(e)

        logger.info("FS has been prepared for user:%s lxc:%s" % (user, lxc))
        return True
Exemplo n.º 57
0
def deploy_template(provider_crud,
                    vm_name,
                    template_name=None,
                    timeout_in_minutes=15):
    mgmt = provider_crud.get_mgmt_system()
    data = provider_crud.get_yaml_data()
    deploy_args = {}
    deploy_args.update(vm_name=vm_name)
    if isinstance(mgmt, RHEVMSystem):
        deploy_args.update(cluster_name=data['default_cluster'])
    elif isinstance(mgmt, EC2System):
        deploy_args.update(instance_type=data['default_flavor'])
    elif isinstance(mgmt, OpenstackSystem):
        deploy_args.update(flavour_name=data['default_flavor'])
        deploy_args.update(assign_floating_ip=data['default_ip_pool'])

    if template_name is None:
        template_name = data['small_template']

    logger.info(
        "Getting ready to deploy VM %s from template %s on provider %s" %
        (vm_name, template_name, data['name']))

    try:
        logger.debug("deploy args: " + str(deploy_args))
        mgmt.deploy_template(template_name, **deploy_args)
        wait_for(mgmt.does_vm_exist, [vm_name],
                 num_sec=timeout_in_minutes * 60,
                 delay=30)
    except Exception as e:
        logger.error('Could not provisioning VM %s (%s)', vm_name, e.message)
        logger.info('Attempting cleanup on VM %s', vm_name)
        try:
            if mgmt.does_vm_exist(vm_name):
                # Stop the vm first
                logger.warning('Destroying VM %s', vm_name)
                if mgmt.delete_vm(vm_name):
                    logger.info('VM %s destroyed', vm_name)
                else:
                    logger.error('Error destroying VM %s', vm_name)
        except Exception as f:
            logger.error('Could not destroy VM %s (%s)', vm_name, f.message)
        finally:
            raise e
Exemplo n.º 58
0
def _vm_cleanup(mgmt, vm_name):
    """Separated to make the logic able to propagate the exceptions directly."""
    try:
        logger.info("VM/Instance status: %s", mgmt.vm_status(vm_name))
    except Exception as f:
        logger.error("Could not retrieve VM/Instance status: %s: %s",
                     type(f).__name__, str(f))
    logger.info('Attempting cleanup on VM/instance %s', vm_name)
    try:
        if mgmt.does_vm_exist(vm_name):
            # Stop the vm first
            logger.warning('Destroying VM/instance %s', vm_name)
            if mgmt.delete_vm(vm_name):
                logger.info('VM/instance %s destroyed', vm_name)
            else:
                logger.error('Error destroying VM/instance %s', vm_name)
    except Exception as f:
        logger.error('Could not destroy VM/instance %s (%s: %s)', vm_name,
                     type(f).__name__, str(f))
Exemplo n.º 59
0
 def usageInc(self, *args, **kwargs):
     '''
     Usage: usageModify(cur_user = token_from_auth, modification = data_from_form)
     Modify the usage info of user
     '''
     cur_user = kwargs['cur_user']
     modification = kwargs['modification']
     logger.info("record usage for user:%s" % cur_user.username)
     groupname = cur_user.user_group
     groupinfo = self.groupQuery(name=groupname)['data']
     usage = UserUsage.query.filter_by(username=cur_user.username).first()
     if usage == None:
         new_usage = UserUsage(cur_user.username)
         db.session.add(new_usage)
         db.session.commit()
         usage = UserUsage.query.filter_by(
             username=cur_user.username).first()
     if int(modification['cpu']) <= 0 or int(
             modification['memory']) <= 0 or int(modification['disk']) <= 0:
         return {
             'success': False,
             'result': "cpu,memory and disk setting cannot less than zero"
         }
     cpu = int(usage.cpu) + int(modification['cpu'])
     memory = int(usage.memory) + int(modification['memory'])
     disk = int(usage.disk) + int(modification['disk'])
     if cpu > int(groupinfo['cpu']):
         logger.error("cpu quota exceed, user:%s" % cur_user.username)
         return {'success': False, 'result': "cpu quota exceed"}
     if memory > int(groupinfo['memory']):
         logger.error("memory quota exceed, user:%s" % cur_user.username)
         return {'success': False, 'result': "memory quota exceed"}
     if disk > int(groupinfo['disk']):
         logger.error("disk quota exceed, user:%s" % cur_user.username)
         return {'success': False, 'result': "disk quota exceed"}
     usage.cpu = str(cpu)
     usage.memory = str(memory)
     usage.disk = str(disk)
     db.session.commit()
     return {'success': True, 'result': "distribute the resource"}
Exemplo n.º 60
0
 def check_allcontainers(self):
     [both, onlylocal, onlyglobal] = self.diff_containers()
     logger.info("check all containers and repair them")
     status = True
     result = True
     for container in both:
         logger.info("%s in LOCAL and GLOBAL checks..." % container)
         [status, meg] = self.check_container(container)
         result = result & status
     if len(onlylocal) > 0:
         result = False
         logger.error("some container only exists in LOCAL: %s" % onlylocal)
     if len(onlyglobal) > 0:
         result = False
         logger.error("some container only exists in GLOBAL: %s" %
                      onlyglobal)
     if status:
         logger.info("check all containers success")
         return [True, 'all is ok']
     else:
         logger.error("check all containers failed")
         return [False, 'not ok']