Esempio n. 1
0
def save_problem_ci_results(ci_error, db, error, eval_data, gist, problem_ci,
                            results, should_merge):
    if not should_merge:
        # If problem_ci fails, don't save to aggregate bot scores collection
        if ci_error:
            log.error('Problem CI failed, not saving to bots '
                      'official scores as this is likely an issue '
                      'with the new version of the problem.')
            problem_ci.status = PROBLEM_CI_STATUS_FAILED
            problem_ci.error = ci_error
            update_pr_status_problem_ci(ci_error, problem_ci, eval_data)
        else:
            log.info('Problem CI not yet finished')

    else:
        # Aggregate data from bot evals now that they're done
        gists = BoxList()
        for bot_eval_key in problem_ci.bot_eval_keys:
            bot_eval = db.get(get_eval_db_key(bot_eval_key))
            save_to_bot_scores(
                bot_eval, bot_eval.eval_key,
                Box(score=bot_eval.results.score, eval_key=bot_eval.eval_key))
            gists.append(bot_eval.gist)
        problem_ci.gists = gists
        update_pr_status_problem_ci(error, problem_ci, eval_data)
        problem_ci.status = PROBLEM_CI_STATUS_PASSED
    db.set(problem_ci.id, problem_ci)
Esempio n. 2
0
 def reduce():
     result = dbox(problem_ci)
     # Refetch all bots in case scores came in after initial request
     for bot_eval_key in problem_ci.bot_eval_keys:
         bot_eval = db.get(get_eval_db_key(bot_eval_key))
         past_bot_scores = get_past_bot_scores(bot_eval)
         bot_eval_no_eval_key = deepcopy(bot_eval)
         del bot_eval_no_eval_key['eval_key']
         log.info(f'Checking confidence interval for bot_eval '
                  f'{box2json(bot_eval)}\n'
                  f'past scores: {box2json(past_bot_scores)}')
         if bot_eval.results.errors:
             result.error = str(bot_eval.results.errors)
             log.error(result.error + ': bot details ' \
                 f'{box2json(bot_eval_no_eval_key)}')
             return result
         in_interval, interval_info = score_within_confidence_interval(
             bot_eval, past_bot_scores)
         if not in_interval:
             result.error = f'Score for bot {bot_eval.results.score}' \
                 f' not within confidence interval ' \
                 f'{interval_info.low} to {interval_info.high}, ' \
                 f'mean: {interval_info.mean} ' \
                 f'problem CI failed'
             log.error(result.error + ': bot details ' \
                 f'{box2json(bot_eval_no_eval_key)}')
             return result
     else:
         log.success('Score for bot within confidence interval, '
                     'problem CI successful!')
         return result
Esempio n. 3
0
def merge_pull_request(pull_request: Box) -> Error:
    error = Error()
    if blconfig.is_test or get_test_name_from_callstack():
        log.info('Skipping pr merge in test')
    else:
        log.info(f'Merging pull request ' f'{box2json(pull_request)}')
        github_client = Github(blconfig.github_token)
        repo = github_client.get_repo(pull_request.base_full_name)
        pr = repo.get_pull(pull_request.number)
        if dbox(pr.raw_data).mergeable_state == 'draft':
            log.info('Pull request is draft, not trying to merge')
        else:
            try:
                merge_status = pr.merge('Automatically merged by Botleague')
                if not merge_status.merged:
                    error.message = merge_status.message
                    error.http_status_code = 400
            except GithubException as e:
                error.message = str(e)
                error.http_status_code = e.status

    if error:
        log.error(f'Error merging pull request '
                  f'{box2json(pull_request)} '
                  f'Error: {box2json(error)}')

    return error
Esempio n. 4
0
    def set_container_logs_and_errors(self, containers, results, job):
        for container in containers:
            image_name = container.attrs["Config"]["Image"]
            container_id = \
                f'{image_name}_{container.short_id}'
            run_logs = container.logs(timestamps=True).decode()
            results.json_results_from_logs = self.get_json_out(run_logs)
            log.log('CONTAINER', f'{container_id} logs begin \n' + ('-' * 80))
            log.log('CONTAINER', run_logs)
            log.log('CONTAINER', f'{container_id} logs end \n' + ('-' * 80))
            log_url = self.upload_logs(
                run_logs, filename=f'{image_name}_job-{job.id}.txt')

            exit_code = container.attrs['State']['ExitCode']
            if exit_code != 0:
                results.errors[container_id] = f'Container failed with' \
                    f' exit code {exit_code}'
                log.error(f'Container {container_id} failed with {exit_code}'
                          f' for job {box2json(job)}, logs: {log_url}')
            elif container.status == 'dead':
                results.errors[container_id] = f'Container died, please retry.'
                log.error(f'Container {container_id} died'
                          f' for job {box2json(job)}, logs: {log_url}')

            log.info(f'Uploaded logs for {container_id} to {log_url}')
            results.logs[container_id] = log_url
    def check_gce_ops_in_progress(self):
        ops_still_in_progress = BoxList()
        for op in self.gce_ops_in_progress:

            try:
                op_result = Box(
                    self.gce.zoneOperations().get(project=self.project,
                                                  zone=self.zone,
                                                  operation=op.name).execute())
            except:
                log.exception('Could not get op_result')
                break
            if op_result.status == 'DONE':
                if 'error' in op_result:
                    log.error(f'GCE operation resulted in an error: '
                              f'{op_result.error}\nOperation was:'
                              f'\n{box2json(op)}')
                    if op.operationType == 'insert':
                        # Retry the creation?
                        pass
                    # elif op.operationType == 'start':
                    #
            else:
                ops_still_in_progress.append(op)
        self.gce_ops_in_progress = ops_still_in_progress
Esempio n. 6
0
 def request_eval(endpoint: str, eval_data: Box) -> PrResponse:
     try:
         if 'REPLACE_PROBLEM_HOST' in os.environ:
             endpoint = os.environ['REPLACE_PROBLEM_HOST'] + \
                        endpoint[endpoint.find('/eval'):]
         # TODO: Don't pass everything through to endpoint - i.e. cleanse
         serializable_data = json.loads(
             eval_data.to_json(default=str, sort_keys=True))
         endpoint_resp = requests.post(endpoint,
                                       json=serializable_data,
                                       timeout=10)
     except requests.exceptions.Timeout:
         ret = EvalErrorPrResponse('Endpoint %s took too long to respond' %
                                   endpoint)
     else:
         # Yay, we did not timeout!
         if endpoint_resp.status_code != 200:
             ret = EvalErrorPrResponse(
                 'Endpoint %s failed with HTTP %r, response body was %s' %
                 (endpoint, endpoint_resp.status_code,
                  endpoint_resp.content))
             log.error(ret.msg)
         else:
             ret = EvalStartedPrResponse(
                 'Started evaluation at %s' % endpoint, eval_data)
             # Now wait for a /confirm and /results request with the eval_key
     log.info(f'Request eval resp: {ret.msg}')
     return ret
 def handle_timed_out_jobs(self, job):
     max_seconds = Box(job, default_box=True).eval_spec.max_seconds
     used_default_max_seconds = False
     if not max_seconds:
         used_default_max_seconds = True
         if job.job_type == JOB_TYPE_EVAL:
             max_seconds = 60 * 5
         elif job.job_type in [
                 JOB_TYPE_SIM_BUILD, JOB_TYPE_DEEPDRIVE_BUILD
         ]:
             max_seconds = 60 * 60
         else:
             log.error(f'Unexpected job type {job.job_type} for job: '
                       f'{box2json(job)} setting timeout to 5 minutes')
             max_seconds = 60 * 5
     if time.time() - job.started_at.timestamp() > max_seconds:
         if used_default_max_seconds:
             log.debug('No max_seconds in problem definition, used default')
         log.error(f'Job took longer than {max_seconds} seconds, '
                   f'consider stopping instance: {job.instance_id} '
                   f'in case the instance is bad. Job:\n{box2json(job)}')
         job.status = JOB_STATUS_TIMED_OUT
         self.jobs_db.set(job.id, job)
         self.make_instance_available(job.instance_id)
         # TODO: Stop the instance in case there's an issue with the
         #  instance itself
         # TODO: Set job error timeout
         pass
 def confirm_evaluation(self, job) -> bool:
     if in_test():
         status = JOB_STATUS_CREATED
         ret = True
     elif dbox(job).confirmed:
         log.info(f'Job already confirmed ' f'{box2json(job)}')
         status = JOB_STATUS_CREATED
         ret = True
     else:
         url = f'{job.botleague_liaison_host}/confirm'
         json = {'eval_key': job.eval_spec.eval_key}
         log.info(f'Confirming eval {json} at {url}...')
         confirmation = requests.post(url, json=json)
         if 400 <= confirmation.status_code < 500:
             status = JOB_STATUS_DENIED_CONFIRMATION
             log.error('Botleague denied confirmation of job, skipping')
             ret = False
         elif not confirmation.ok:
             status = JOB_STATUS_CREATED
             log.error('Unable to confirm job with botleague liaison, '
                       'will try again shortly')
             ret = False
         else:
             status = JOB_STATUS_CREATED
             log.success(f'Confirmed eval job ' f'{box2json(job)} at {url}')
             ret = True
     job.status = status
     job.confirmed = ret
     self.save_job(job)
     return ret
 def semaphore_requested(self) -> Union[bool, str]:
     status = self.db.get(STATUS)
     if status == RUNNING + self.id:
         return False
     else:
         log.info('Semaphore changed to %s, stopping' % status)
         if not status.startswith(REQUESTED) and status != STOPPED:
             log.error('Unexpected semaphore status %s' % status)
         return status
Esempio n. 10
0
 def get_available(instances):
     ret = []
     for inst in instances:
         inst_meta = dbox(self.instances_db.get(inst.id))
         if not inst_meta:
             log.error(f'Could not find instance {inst.id} in DB')
         elif inst_meta.status == INSTANCE_STATUS_AVAILABLE:
             ret.append(inst)
     return ret
Esempio n. 11
0
    def work(self):
        with self._lock:
            entities = tuple(self._queue)
            self._queue.clear()

        for entity in entities:
            try:
                entity.save()
            except:  # noqa
                log.error("Unable to save %s, details below\n%s" %
                    (entity, format_exception_trace(locals=True, separate=True)))
Esempio n. 12
0
def handle_results(request):
    final_results, error, gist = handle_results_request(request)
    resp_box = Box(results=final_results, error=error, gist=gist)
    resp = Response(json=resp_box.to_dict())
    if error:
        resp.status_code = error.http_status_code
        log.error(f'Error handling results {error} - results:'
                  f' {resp_box.to_json(indent=2)}')
    else:
        log.info(f'Results response {resp_box.to_json(indent=2)}')
    return resp
Esempio n. 13
0
 def find_id(self, id):
     '''
     寻找元素
     :return:
     '''
     try:
         exsit = self.driver.find_element_by_id(id)
         return True
     except:
         log.error('未定位到元素:' + '%s' % (id))
         self.screenshot()
         return False
Esempio n. 14
0
 def find_id(self, id):
     """
     寻找元素
     :return:
     """
     try:
         exsit = self.driver.find_element_by_id(id)
         return True
     except:
         log.error("未定位到元素:" + "%s" % (id))
         self.screenshot()
         return False
Esempio n. 15
0
def get_ids(id):
    '''
    定位页面resouce-id元素组
    :param id:
    :return:列表
    '''
    try:
        elements = driver.find_elements_by_id(id)
        driver.implicitly_wait(2)
        return elements
    except:
        log.error('未定位到元素:' + '%s') % (id)
Esempio n. 16
0
def get_file_from_github(repo, filename, ref=None):
    """@:param filename: relative path to file in repo"""
    try:
        args = [filename]
        if ref is not None:
            args.append(ref)
        contents = repo.get_contents(*args)
        content_str = contents.decoded_content.decode('utf-8')
    except UnknownObjectException:
        log.error('Unable to find %s in %s', filename, repo.html_url)
        content_str = ''
    ret = get_str_or_box(content_str, filename)
    return ret
Esempio n. 17
0
def handle_eval_request(problem_name):
    start = time.time()
    log.info(f'Starting eval request {json.dumps(request.json, indent=2)}')

    db = get_config_db()
    if ON_GAE and db.get('DISABLE_EVAL') is True:
        return make_error('Evals are disabled', 423)

    try:
        # Unpack our endpoint parameters from the URL arguments
        eval_id = request.json['eval_id']
        eval_key = request.json['eval_key']
        seed = request.json['seed']
        docker_tag = request.json['docker_tag']
        eval_request = Box(request.json, default_box=True)
        max_seconds = eval_request.problem_def.max_seconds or None
        botleague_liaison_host = eval_request.botleague_liaison_host or None

        pull_request = request.json.get('pull_request', None)
    except KeyError as err:
        log.error(traceback.format_exc())
        log.exception('Error getting required params')

        # If any or our required parameters were missing,
        # send a "400 Bad Request" response
        ret = make_error('the parameter {} is required'.format(err.args[0]),
                         400)
    else:
        try:
            ret = submit_eval_job(
                docker_tag,
                eval_id,
                eval_key,
                problem_name,
                pull_request,
                seed,
                max_seconds,
                eval_request.problem_def,
                eval_request,
                botleague_liaison_host,
            )

        except Exception as err:
            # If anything went wrong inside the endpoint logic,
            # send a "500 Internal Server Error" response
            log.error(traceback.format_exc())
            log.exception('Problem submitting job')
            ret = make_error(err, 500)
    log.info(ret)
    log.info(f'Eval request took {time.time() - start} seconds')
    return ret
Esempio n. 18
0
 def __getitem__(self, name):
     handler = self._handlers.get(name)
     if handler is UNKNOWN:
         with self._entries._lock:
             handler = self._handlers.get(name)
             if handler is UNKNOWN:
                 if not self._module:
                     try:
                         self._module = __import__(self._type.module_name)
                     except:
                         log.error("[Dispatcher] Unable to import module: %s" % format_exception_trace())
                         self._module = {"__doc__": "Unable to load %s" % self._type.module_name}
                 self._handlers[name] = handler = getattr(self._module, name, None)
     return handler
Esempio n. 19
0
        def volume_list():
            try:
                cmd = "nova volume-list"
                cmd_result = shell.shell_cmd(cmd)
                with open(list_dir+list_file_volume,'w') as f:
                    f.writelines(cmd_result)
                if cmd_result:
                    logging.info("%s %s" %(tenant_name,cmd_result))
                else:
                    logging.warning("%s:Instace information is empty,please check the relevant configuration" %tenant_name)
            except IOError:
                logging.error("Tenant:%s %s" % (tenant_name))
		raise
            return cmd_result
Esempio n. 20
0
 def find_name(self, name):
     '''
     判断页面是否存在某个元素
     :param name: text
     :return:
     '''
     findname = "//*[@text='%s']" % (name)
     try:
         exsit = self.driver.find_element_by_xpath(findname)
         return True
     except:
         log.error('未定位到元素:' + '%s' % (name))
         self.screenshot()
         return False
 def obtain_semaphore(self, timeout=None) -> bool:
     start = time.time()
     # TODO: Avoid polling by creating a Firestore watch and using a
     #   mutex to avoid multiple threads processing the watch.
     if self.db.get(STATUS) == Box():
         log.warning('No semaphore document found, creating one!')
         self.db.set(STATUS, RUNNING + self.id)
         return True
     elif self.db.get(STATUS) in [Box(), STOPPED]:
         self.db.set(STATUS, RUNNING + self.id)
         return True
     self.request_semaphore()
     # TODO: Check for a third loop that requested access and alert, die,
     #  or re-request. As-is we just zombie.
     while not self.granted_semaphore():
         log.info('Waiting for other loop to end')
         if self.started_waiting_for_other_loop_time is None:
             self.started_waiting_for_other_loop_time = time.time()
         if time.time() - self.started_waiting_for_other_loop_time > 5:
             log.error('Assuming previous loop died without releasing '
                       'semaphore. Setting to stopped.')
             self.db.set(STATUS, STOPPED)
             self.started_waiting_for_other_loop_time = None
         if self.kill_now:
             log.warning('Killing loop while requesting semaphore, '
                         'here be dragons!')
             if self.db.compare_and_swap(STATUS, REQUESTED + self.id,
                                         self.previous_status):
                 # Other loop never saw us, good!
                 return False
             else:
                 # We have problems
                 if self.db.get(STATUS) == GRANTED + self.id:
                     # Other loop beat us in a race to set status
                     # and released so thinks we own the semaphore.
                     self.release_semaphore()
                     # TODO: Create an alert from this log.
                     raise RuntimeError(f'No {self.id} running! '
                                        f'Needs manual start')
                 else:
                     # Could be that a third loop requested.
                     self.release_semaphore()
                     # TODO: Create an alert from this log.
                     raise RuntimeError(f'Race condition encountered in '
                                        f'{self.id} Needs manual start')
         elif timeout is not None and time.time() - start > timeout:
             return False
         else:
             time.sleep(1)
     return True
        def volume_list():
            try:
                cmd = "nova volume-list"
                cmd_result = shell.shell_cmd(cmd)
                with open(list_dir+list_file_volume,'w') as f:
                    f.writelines(cmd_result)
                if cmd_result:
                    logging.info("%s %s" %(tenant_name,cmd_result))
                else:
                    logging.warning("%s:Instace information is empty,please check the relevant configuration" %tenant_name)
            except IOError:
                logging.error("Tenant:%s %s" % (tenant_name))
		raise
            return cmd_result
Esempio n. 23
0
 def run_containers(self, containers_args: list = None):
     log.info('Running containers %s ...' % containers_args)
     containers = [self.start_container(**c) for c in containers_args]
     try:
         containers, success = self.monitor_containers(containers)
     except Exception as e:
         log.error(f'Exception encountered while running '
                   f'containers: '
                   f'{box2json(BoxList(containers))}, '
                   'stopping all containers.')
         for container in containers:
             log.error(f'Stopping orphaned container: {container}')
             container.stop(timeout=1)
         raise e
     return containers, success
Esempio n. 24
0
 def get_ids(self, id):
     '''
     定位页面resouce-id元素组
     :param id:
     :return:列表
     '''
     try:
         # elements = self.driver.find_elements_by_id(id)
         elements = WebDriverWait(
             self.driver, 10).until(lambda x: x.find_elements_by_id(id))
         self.driver.implicitly_wait(2)
         return elements
     except:
         self.screenshot()
         log.error('未定位到元素:' + '%s' % (id))
Esempio n. 25
0
def rest_team_port(port):
    user = '******'
    ip = '192.168.205.3'
    passwd='1haoche@151111!'
    
    # logging in switch
    child = expect.spawn('telnet %s' %ip)
    index = child.expect('User:'******'user:'******'user:%s' %user)
            child.expect('Password:'******'passwd:',passwd

            # rest port status
            child.expect('>')
            child.sendline('en')
            #
            child.expect('#')
            child.sendline('conf t')
            #
            child.expect('\(config\)#')
            child.sendline('interface port-channel %s' %port)
            #
            child.expect('\(config-if-Po%s\)#' %port)
            child.sendline('shut')
            child.expect('\(config-if-Po%s\)#' %port)
            child.sendline('no shut')
            print 'port:',port
            logging.info('port:%s' %port)
            #
            child.expect('\(config-if-Po%s\)#' %port)
            child.sendline('do show interface port-channel')

            print child.before
        except:
            traceback.print_exc()
            logging.error("login failed....")
        finally:
             child.close(force=True) 
             logging.info("login end .......")
Esempio n. 26
0
 def create_new_course_offering(self, year, quarter):
     if self.course_id:
         try:
             new_offering = CourseOffering(course_id=self.course_id,
                                           year=year,
                                           quarter=quarter)
             db_session.add(new_offering)
             db_session.flush()
             self.course_offering_id = new_offering.id
         except (IntegrityError, TypeError):
             db_session.rollback()
             log.error(
                 "Error due to attempted insertion of duplicate new course offering"
             )
     db_session.commit()
     return self
Esempio n. 27
0
 def get_xpath(self, xpath):
     """
     定位页面xpath元素
     :param id:
     :return:
     """
     try:
         # element = WebDriverWait(self.driver, 10).until(lambda x: x.find_element_by_xpath(xpath))
         WebDriverWait(self.driver, 15).until(
             lambda driver: driver.find_element_by_xpath(xpath).is_displayed()
         )
         element = self.driver.find_element_by_xpath(xpath)
         return element
     except:
         self.screenshot()
         log.error("未定位到元素:" + "%s" % (xpath))
Esempio n. 28
0
def get_name(name):
    '''
    定位页面text元素
    :param name:
    :return:
    '''
    # element = driver.find_element_by_name(name)
    # return element

    findname = "//*[@text='%s']" % (name)
    try:
        element = driver.find_element_by_xpath(findname)
        driver.implicitly_wait(2)
        return element
    except:
        log.error('未定位到元素:' + '%s') % (name)
Esempio n. 29
0
 def create_new_section(self, location, instructor, section_type, time_):
     if self.course_offering_id:
         try:
             new_section = Section(
                 course_offering_id=self.course_offering_id,
                 type=section_type,
                 location=location,
                 time=time_)
             new_section.instructor = instructor
             db_session.add(new_section)
         except IntegrityError:
             db_session.rollback()
             log.error(
                 "Error due to attempted insertion of duplicate new course section"
             )
     db_session.commit()
     return self
Esempio n. 30
0
def post_results_with_retries(max_attempts=5, **kwargs):
    done = False
    valid_results_codes = [200, 400, 500]
    attempts = 0
    resp = None
    while not done:
        resp = requests.post(**kwargs)
        if resp.status_code in valid_results_codes:
            done = True
        elif attempts < max_attempts:
            log.error(f'Failed posting results, response {resp}, retrying')
            time.sleep(1)
        else:
            done = True
        attempts += 1

    return resp
Esempio n. 31
0
    def get_id(self, id):
        '''
        定位页面resouce-id元素
        :param id:
        :return:
        '''
        try:
            # element = WebDriverWait(self.driver, 10).until(lambda x: x.find_element_by_id(id()))

            WebDriverWait(self.driver, 15).until(
                lambda driver: driver.find_element_by_id(id).is_displayed())
            self.driver.implicitly_wait(2)
            element = self.driver.find_element_by_id(id)
            return element
        except:
            self.screenshot()
            log.error('未定位到元素:' + '%s' % (id))
Esempio n. 32
0
    def _compile(self):
        while 1:
            try:
                extension = self.subsystem.extensions.get(LISTING)
                if extension:
                    if settings.STORE_BYTECODE:
                        location = self.locate(LISTING)
                        if location:
                            signature = location + extension
                        else:
                            signature = None
                    else:
                        signature = None
                else:
                    location = self.locate(SOURCE_CODE)
                    if location:
                        signature = location + self.subsystem.source_extension
                    else:
                        signature = None

                return self.subsystem.compile(self, signature)
            except RequirePrecompileError as error:
                executable = error.executable
                if executable is self:
                    log.write("Require %s" % self)
                    raise
                else:
                    log.write("Precompile %s" % executable)
                    try:
                        executable.compile()
                    except Exception:
                        show_exception_trace(caption="Unable to precompile %s" % executable, locals=True)
                        return ErrorBytecode(self, cause=sys.exc_info())
            except CompilationError as error:
                log.error("Unable to compile %s\n%sDue to error in %s" %
                    (self, settings.LOGGING_INDENT, error.source))
                return ErrorBytecode(self, cause=sys.exc_info())
            except SourceSyntaxError as error:
                log.error("Unable to compile %s\n%sDue to syntax error%s: %s"
                    % (self, settings.LOGGING_INDENT,
                        (" on line %d" % error.lineno if error.lineno else ""), error))
                return ErrorBytecode(self, cause=sys.exc_info())
            except Exception as error:
                show_exception_trace(caption="Unable to compile %s" % self, locals=True)
                return ErrorBytecode(self, cause=sys.exc_info())
Esempio n. 33
0
def fetch_instance_id() -> Tuple[str, bool]:
    if in_test() or 'INSTANCE_ID' in os.environ:
        ret = os.environ['INSTANCE_ID']
        is_real = False
    else:
        try:
            ret = requests.get(f'{METADATA_URL}/id',
                               headers={
                                   'Metadata-Flavor': 'Google'
                               }).text
            log.success('INSTANCE_ID: ' + ret)
        except Exception as e:
            log.error('Unable to get GCP instance metadata. '
                      'Are you on GCP? If not, you can manually'
                      ' set the INSTANCE_ID'
                      ' in your env for testing purposes.')
            exit(1)
        is_real = True
    return ret, is_real
Esempio n. 34
0
 def create_new_course(self, name, description, course_code, department,
                       prereqs):
     try:
         new_course = Course(name=name,
                             course_code=course_code,
                             description=description,
                             department=department)
         for prereq in prereqs:
             new_course.prereqs.append(prereq)
         db_session.add(new_course)
         db_session.flush()
         self.course_id = new_course.id
     except IntegrityError:
         db_session.rollback()
         log.error(
             "Error due to attempted insertion of duplicate new course")
     finally:
         db_session.commit()
         return self
 def run(self):
     if not self.obtain_semaphore():
         log.error('Could not obtain semaphore! Check to see if other loop '
                   'is running!')
         self.sleep_one_second()  # We'll be in a reboot loop until shutdown
         return
     log.success(f'Running {self.loop_name}, loop_id: {self.id}')
     while not self.semaphore_released():
         if self.kill_now:
             self.exit()
             return
         else:
             try:
                 self.fn()
                 self.sleep_one_second()
             except Exception:
                 self.kill_now = True
                 self.caught_exception = True
                 log.exception('Exception in loop, killing')
Esempio n. 36
0
 def main(self):
     while self.running:
         if self._socket is None:
             self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
             try:
                 self._socket.bind((self._address, self._port))
             except:
                 log.error("Log server is already running or incorrect address or port")
                 self.stop()
                 return
             self._socket.listen(LISTEN_BACKLOG)
             log.write("Listen on %s:%d" % (self._address, self._port))
         reading, writing, erratic = select.select((self._socket,), (), (), self.quantum)
         if reading:
             try:
                 session_socket, address = self._socket.accept()
                 log.write("Start session with %s" % session_socket.getpeername()[0])
                 LogServerSession(session_socket).start()
             except socket.error:
                 continue
 def volume_detach():
     volume_detach_id = []
     server_detach_id = []
     ##############
     # server_detach_id
     cmd_detach_server = "nova volume-list |grep in-use|awk '{print $12}'"
     result_server = shell.shell_cmd(cmd_detach_server)
     for vo_d in xrange(len(result_server)):
         v_d = result_server[vo_d].split()[0]
         server_detach_id.append(v_d)
     #############
     # volume_detach_id
     cmd_detach_volume = "nova volume-list |grep in-use|awk '{print $2}'"
     result_volume = shell.shell_cmd(cmd_detach_volume)
     for so_d in xrange(len(result_volume)):
         s_d = result_volume[so_d].split()[0]
         volume_detach_id.append(s_d)
     stop_list = []
     for e in xrange(len(server_detach_id)):
         fd =  tuple((server_detach_id[e],volume_detach_id[e]))
         stop_list.append(fd)
     #print "SSSSSSTTTTTTOOOOPPP:%s" %stop_list
     #############
     server_list = []
     cmd_list = "nova list |grep SHUTOFF|awk '{print $2}'"
     result_list = shell.shell_cmd(cmd_list)
     for li in xrange(len(result_list)):
         lst = result_list[li].split()[0]
         server_list.append(lst)
     #print "SSSSSSEEEEEEVVVVV:%s" % server_list
     for server,volume in stop_list:
         #print  "kkkkkkkKKKKKKKKKKKKKKKKKKKKKKKKK",server
         if server in server_list:
             cmd = "nova volume-detach %s %s" %(server,volume)
             result = shell.shell_cmd(cmd)
             logging.debug("volume[%s] already detach... "     % volume)
             print "volume[%s] already detach... "     % volume
         else:
             logging.error("instance[%s] is not stop... "   %server)
        def volume_attach():
            volume_file = list_dir+list_file_volume_json
            try:
                with open(volume_file) as f:
                    json_load =json.load(f,encoding="utf-8")
                print "This is json_load:%s" %json_load
                start_list = []
                for j in xrange(len(json_load)):
                    cc = json_load[j]
                    device = str(cc['device'])
                    server = str(cc['server_id'])
                    volume = str(cc['id'])
                    ff = tuple((server,volume,device))
                    start_list.append(ff)
                print "mmmmmmMMMMMMM",start_list
                server_list = []
                ###########
                # server_list
                cmd_server = "nova list |grep SHUTOFF |awk '{print $2}'"
                server_result = shell.shell_cmd(cmd_server)
                for s in xrange(len(server_result)):
                    s_l = server_result[s].split()[0]
                    server_list.append(s_l)
                print "TTTTTTTTTTTTTTTTTTT",server_list
                 
                for server_id,volume_id,device_path in start_list:
                    if server_id in server_list:
                        cmd_v = "nova volume-attach %s %s %s" % (server,volume,device_path)
                        result = shell.shell_cmd(cmd_v)
                        logging.debug(cmd_v)
                        print "volume[%s] already detach... "     % volume_id
                    else:
                         logging.error("instance[%s] is not stop... "   %server_id)

            except IOError:
                logging.error("%s list file not found!!!" %volume_file)
                raise
Esempio n. 39
0
 def inner(args):
     temp = func(args)
     file_name=path+io_generate_name()
     if temp == '4k':
         try:
             print "生成文件为:%s 大小为:4k" % file_name
             logging.info("生成文件为:%s 大小为:4k" % file_name)
             with open(file_name+'_4k','wb') as f:
                 f.write(os.urandom(1024*4))
         except IOError:
             print "写入4k文件失败"
             logging.error("写入4k文件失败")
             return  4
     elif temp == '1m':
         try:
             print "生成文件为:%s 大小为:1m" % file_name
             logging.info("生成文件为:%s 大小为:1m" % file_name)
             with open(file_name+'_1m','wb') as f:
                 f.write(os.urandom(1024*1024))
         except IOError:
             print "写入1m文件失败"
             logging.error( "写入1m文件失败")
             return  1000
     elif temp == '10m':
         try:
             print "生成文件为:%s 大小为:10m" % file_name
             logging.info("生成文件为:%s 大小为:10m" % file_name)
             with open(file_name+'_10m','wb') as f:
                 f.write(os.urandom(1024*1024*10))
         except IOError:
             print "写入10m文件失败"
             logging.error("写入10m文件失败")
             return  10000
     else:
         print "请输入写入的文件大小...<4k|1m|10m>"
         logging.warning("请输入写入的文件大小...<4k|1m|10m>")
     return  temp
Esempio n. 40
0
    def main(self):
        if not self.running:
            return

        parser = Parser(builder=builder, result=[])
        parser.cache = True
        log.write("Listen on %s:%d" % (self._address or "*", self._port))
        while self.running:
            try:
                reading, writing, erratic = select.select((self._socket, self._session_socket), (), (), self.quantum)
            except select.error:
                log.error("Unable to check state")
            else:
                if self._socket in reading:
                    try:
                        message, address = self._socket.recvfrom(512)
                    except socket.error:
                        log.error("Unable to receive request")
                        continue
                    log.write("Receive request from %s" % address[0])
                    try:
                        parser.parse(chunk="<session>")
                        parser.parse(chunk=message)
                        parser.parse("</session>")
                    except ParsingException as error:
                        try:
                            self._socket.sendto("<reply><error>Incorrect request: %s</error></reply>" % error, address)
                        except socket.error:
                            log.error("Unable to send response")
                    else:
                        for name, options in parser.result:
                            try:
                                handler = modules[name]
                            except KeyError:
                                response = "<reply><error>Incorrect request</error></reply>"
                            else:
                                try:
                                    log.write("Invoke \"%s\" for %s" % (name, address[0]))
                                    if inspect.isgeneratorfunction(handler):
                                        response = "".join(handler(options))
                                    else:
                                        response = handler(options)
                                except WatcherError as error:
                                    response = "<reply><error>%s</error></reply>" % error
                                    log.write(error)
                                except Exception as error:
                                    message = "Execution error: %s" % error
                                    response = "<reply><error>%s</error></reply>" % message
                                    show_thread_trace(caption=message)
                            if not response:
                                response = "<reply><error>No reply</error></reply>"
                            try:
                                self._socket.sendto(response, address)
                            except socket.error:
                                log.error("Unable to send response")
                    parser.reset(result=[])
                if self._session_socket in reading:
                    try:
                        session_socket, address = self._session_socket.accept()
                    except socket.error:
                        log.error("Unable to accept connection")
                    log.write("Start session with %s" % session_socket.getpeername()[0])
                    WatcherSession(session_socket, address).start()
Esempio n. 41
0
 def main(self):
     parser = Parser(builder=builder, result=[])
     parser.cache = True
     parser.parse(chunk="<session>")
     while self.running:
         try:
             reading, writing, erratic = select.select((self._socket,), (), (), self.quantum)
         except select.error:
             log.error("Unable to check session state")
         else:
             if reading:
                 try:
                     message = self._socket.recv(4096)
                 except socket.error:
                     log.error("Unable to receive request")
                     break
                 if not message:
                     break
                 try:
                     parser.parse(chunk=message)
                 except ParsingException:
                     log.error("Unable to parse request")
                     try:
                         self._socket.send("<reply><error>Incorrect request</error></reply>")
                     except socket.error:
                         log.error("Unable to send response")
                     break
                 for name, options in parser.result:
                     try:
                         handler = modules[name]
                     except KeyError:
                         log.error("Unable to find action")
                         response = "<reply><error>Incorrect request</error></reply>"
                     else:
                         try:
                             log.write("Invoke \"%s\" for %s" % (name, self._address[0]))
                             if inspect.isgeneratorfunction(handler):
                                 response = "".join(handler(options))
                             else:
                                 response = handler(options)
                         except WatcherError as error:
                             response = "<reply><error>%s</error></reply>" % error
                             log.write(error)
                         except Exception as error:
                             message = "Execution error: %s" % error
                             response = "<reply><error>%s</error></reply>" % message
                             show_exception_trace(caption=message, locals=True)
                     if not response:
                         response = "<reply><error>No reply</error></reply>"
                     try:
                         self._socket.send(response)
                     except socket.error:
                         log.error("Unable to send response")
                         break
                 del parser.result[:]
     self._socket.close()
Esempio n. 42
0
    def work(self):
        # obtain entry(ies)
        with self._lock:
            try:
                entry = self._queue.popleft()
            except IndexError:
                return

            # consume as much write actions as possible
            if entry[0] == actions.WRITE:
                entries, luid = [entry], entry[1]
                while self._queue:
                    next_entry = self._queue[0]
                    if next_entry[0] != actions.WRITE or next_entry[1] != luid:
                        break
                    entries.append(self._queue.popleft())

        # def restore():
        #     with self._lock:
        #         if entry[0] in (actions.ASSUME, actions.REVOKE):
        #             self._queue.appendleft(entry)
        #         else:
        #             for entry in reversed(entries):
        #                 self._queue.appendleft(entry)

        # send entry(ies)
        try:
            if self._stream is None:

                # (re)connect to the server
                while 1:
                    log.write("Connect to %s:%d" % (self._address, self._port))
                    stream_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    stream_socket.settimeout(RECONNECT_TIMEOUT) # self.quantum
                    try:
                        stream_socket.connect((self._address, self._port))
                        break
                    except socket.timeout:
                        if not self.running:
                            return
                    except socket.error as error:
                        log.error(error.strerror or str(error))

                # create stream
                self._stream = LogSocketStream(self, stream_socket)
                with self._lock:
                    for luid, name in self._actual.iteritems():
                        self._queue.appendleft((actions.ASSUME, luid, name))

            # send entry(ies) to the server
            try:
                if entry[0] == actions.ASSUME:
                    self._assume_request.pack_into(self._stream, actions.ASSUME, entry[1], entry[2])
                    with self._lock:
                        self._actual[entry[1]] = entry[2]
                elif entry[0] == actions.REVOKE:
                    self._revoke_request.pack_into(self._stream, actions.REVOKE, entry[1])
                    with self._lock:
                        del self._actual[entry[1]]
                else:
                    self._write_request.pack_into(self._stream, actions.WRITE, entry[1], len(entries))
                    for index, entry in enumerate(entries):
                        entry[2].pack_into(self._stream, *entry[3])
                return 0
            except socket.error as error:
                log.error(error.strerror or error.message)
                self._stream = None
        except:
            with self._lock:
                if entry[0] == actions.WRITE:
                    self._queue.extendleft(entries)
                else:
                    self._queue.appendleft(entry)
            raise
Esempio n. 43
0
 def _write(self, message):
     if settings.LOGGING_OUTPUT and settings.LOGGER:
         log.error(message)
     else:
         console.error(message)
	def tenant_do_work(self,tenant_name,cmd):
		#tenant_name = 'kycloudprod'
		list_dir = '/var/lib/platform-manager/'
		cmd_date = "date  '+%Y-%m-%d'"
		date = shell.shell_cmd(cmd_date)[0].split()[0]
		list_file_nova='list_'+tenant_name+'_'+date
		list_file_volume ='list_'+tenant_name+'_volume'
		list_file_volume_flag =list_file_volume+'_flag'
		list_file_volume_json = list_file_volume+'.json'
		list_file_volume_back = list_file_volume+'.bak'
		if not os.path.exists(list_dir):
    			os.makedirs(list_dir,0o755)
			#os.chdir(list_dir)
		

		os.chdir(home_path)

		#TENANT_NAME_LIST =
		env_set_func(tenant_name)
		logging.info(os.environ.data)
		#####################
		# cmd_list 
		######################
		cmd_list = "nova list"
		#######################
		# cmd_volume_list
		############################
		cmd_volume_list = "nova volume-list"
		######################
		# cmd_stop  
		#######################
		############################
		cmd_stop = []
		if cmd == "stop":
			server_running_id = []
			server_cmd = "nova list |grep Running|awk '{print $2}'"
			cmd_result = shell.shell_cmd(server_cmd)
		#
			for i in xrange(len(cmd_result)):
                		lo = cmd_result[i].split()[0]
                        	server_running_id.append(lo)
		#
		#
			for server_stop in server_running_id:
				cmd_lop = "nova stop %s" %server_stop
				cmd_stop.append(cmd_lop)
			logging.info("this is Server_runing_id: %s" % server_running_id)
			# print "this is Server_runing_id: %s" % server_running_id
		##########################################
		# cmd_start
		#################################
		cmd_start = []
		if cmd == "start":
			server_shutdown_id = []
			server_stop_cmd = " nova list|grep Shutdown |awk '{print $2}'"
			cmd_result_stop = shell.shell_cmd(server_stop_cmd)
		#
			for j in xrange(len(cmd_result_stop)):
				po = cmd_result_stop[j].split()[0]
				server_shutdown_id.append(po)
		
		##
			for server_start in server_shutdown_id:
				cmd_pop = "nova start %s" %server_start
				cmd_start.append(cmd_pop)
		# print "this is Server_shutdown_id: %s" % server_shutdown_id
		####################################
		# cmd_volume_attach
		#######################################
		cmd_volume_attach = []
		if cmd == "volume_attach" :
			start_list = []
			#print "This is json_load:%s" %(list_dir+list_file_volume_json)
			f = open(list_dir+list_file_volume_json)
			json_load =json.load(f,encoding="utf-8")
			f.close()
			print "This is json_load:%s" %json_load
			for j in xrange(len(json_load)):
				cc = json_load[j]
				device = str(cc['device'])
				server_id = str(cc['server_id'])
				volume_id = str(cc['id'])
				ff = tuple((server_id,volume_id,device))
				start_list.append(ff)
			
			for server,volume,device_path in start_list:
				cmd_v = "nova volume-attach %s %s %s" % (server,volume,device_path)
				cmd_volume_attach.append(cmd_v)
			# print "This is server_volume_id: %s" % server_volume_id
			logging.info("This is start_list: %s" % start_list)
		##################################
		# cmd_volume_detach
		###########################
		cmd_volume_detach = []
		if cmd == "volume_detach":
			server_volume_detach_id = []
			volume_detach_id = []
			cmd_detach_server = "nova volume-list |grep in-use|awk '{print $(NF-1)}'"
			cmd_result_server = shell.shell_cmd(cmd_detach_server)
			for de in xrange(len(cmd_result_server)):
				sd = cmd_result_server[de].split()[0]
				server_volume_detach_id.append(sd)
			cmd_detach_volume = "nova volume-list |grep in-use|awk '{print $2}'"
			cmd_result_volume = shell.shell_cmd(cmd_detach_volume)
			for dv in xrange(len(cmd_result_volume)):
				vd = cmd_result_volume[dv].split()[0]
				volume_detach_id.append(vd)
			stop_list = []
			for e in xrange(len(server_volume_detach_id)):
                        	fd = tuple((server_volume_detach_id[e],volume_detach_id[e]))
                        	stop_list.append(fd)
                	for server_volume_01,volume_01 in stop_list:
                        	cmd_d = "nova volume-detach %s %s" % (server_volume_01,volume_01)
                        	cmd_volume_detach.append(cmd_d)
			#print "This is volume_detach_id:%s" % volume_detach_id
			#print "This is server_volume_detach_id:%s" %server_volume_detach_id
		##############################################
		#############################
		# volume_json
		############################
		if cmd == "stop" :
			volume_json_key = []
			#volume_key_cmd = "grep in-use %s |awk '{print $2}'" %(list_dir+list_file_volume)
			volume_key_cmd = "nova volume-list|grep in-use |awk '{print $2}'"
			volume_json_key_result = shell.shell_cmd(volume_key_cmd)
			for k in xrange(len(volume_json_key_result)):
				jk = volume_json_key_result[k].split()[0]
				volume_json_key.append(jk)
			volume_json = []
			for js in xrange(len(volume_json_key)):
				volume_value_cmd = "nova volume-show %s |grep device |awk '{print $4,$5,$6,$7,$12,$13}'" %volume_json_key[js]
				reuslt = shell.shell_cmd(volume_value_cmd)
				reuslt_ev = eval(reuslt[0].split('\n')[0])[0]
				volume_json.append(reuslt_ev)
			# json 
			fp = open(list_dir+list_file_volume_json,'w')
			json.dump(volume_json,fp)
			fp.close()
			logging.info(volume_json)
		
		##############################################
		content_list = []
		cmd_name = {
			"list":cmd_list,
			"stop":cmd_stop,
			"volume_list":cmd_volume_list,
			"start":cmd_start, 
			"volume_attach":cmd_volume_attach,
			"volume_detach":cmd_volume_detach
		}
		print "This is cmdname : %s" % cmd_name
		logging.debug("This is cmd_name:%s" %cmd_name)
		print cmd
		cmd_list = []
		cmd_result = cmd_name.get(cmd)
		if isinstance(cmd_result,str):
			cmd_list.insert(0,cmd_result)
		if isinstance(cmd_result,list):
			cmd_list = cmd_result
		#print "This is cmd_list: %s" % cmd_list
		logging.debug("This is cmd_list: %s" % cmd_list)
		####################
		in_use_check_id = []
		in_use_check_server_cmd = "nova volume-list|grep in-use|awk '{print $(NF-1)}'"
		in_use_check_result = shell.shell_cmd(in_use_check_server_cmd)
		for in_use in xrange(len(in_use_check_result)):
			iu = in_use_check_result[in_use].split()[0]
			in_use_check_id.append(iu)
			 
		for cmd_l in cmd_laist:
			#print "this is cmd_l:%s" % cmd_l
			logging.debug("this is cmd_l:%s" % cmd_l)
			if cmd_l == "nova volume-list":
				content = shell.shell_cmd(cmd_l)
				#content = os.system('nova list')
				back_cmd = "cp -f %s %s" %(list_dir+list_file_volume,list_dir+list_file_volume_back)
				shell.shell_cmd(back_cmd)
				f = open(list_dir+list_file_volume,'w')
				f.writelines(content)
				f.close()
				logging.info(content)
				content_list.append(content)
			else:
				if "nova start" in cmd_l:
					server_id = cmd_l.split()[2] 
					if  server_id in in_use_check_id:  
						content = shell.shell_cmd(cmd_l)
						#content = os.system('nova list')
                                        	f = open(list_dir+list_file_nova,'a')
                                        	f.writelines(content)
                                        	f.close()
                                        	logging.info(content)
                                        	content_list.append(content)

				else:
					content = shell.shell_cmd(cmd_l)
					#content = os.system('nova list')
					f = open(list_dir+list_file_nova,'a')
					f.writelines(content)
					f.close()
					logging.info(content)
					content_list.append(content)
		#print "This is content: %s" % content
		time.sleep(10)
		print "\033[1;31;40m Tenant: %s perform the %s action ,please later.....\033[0m" % (tenant_name,cmd)
		##############################
		# check_result_instance and check_result_volume
		##################################
		# check_result_instance
		cmd_check = "nova list |awk '{print $6}'|grep -v Status|grep -v ^$"
		check_cmd_result = shell.shell_cmd(cmd_check)
		check_result = []
		for c in xrange(len(check_cmd_result)):
			check = check_cmd_result[c].split()[0]
			check_result.append(check)
		num = len(check_result)
		###########################3
		# check_result_volume
		cmd_check_volume = "nova volume-list|grep -v Status|awk '{print $4}'|grep -v ^$"
		check_volume_result = shell.shell_cmd(cmd_check)
		check_volume_result = []
		for v in xrange(len(check_volume_result)):
			check_v = check_cmd_result[v].split()[0]
			check_volume_result.append(check_v)
		num_volume = len(check_volume_result)
		#print "I'am herer"
		if cmd == "stop" or cmd == "volume_detach":
			if num == check_result.count("SHUTOFF") and num_volume == check_volume_result.count("available"):
				logging.info("%s %s" %(check_result,check_volume_result))
				logging.info("Tenant: %s All instance stop successfully!" %tenant_name)
				logging.info("Tenant: %s All volume deattch successfully!" %tenant_name)
				print "\033[1;31;40m Tenant: %s all instance stop successfully!\033[0m" % tenant_name
				print "\033[1;31;40m Tenant: %s all volume deattch successfully!\033[0m" % tenant_name
			elif num == check_result.count("SHUTOFF"):
				logging.info(check_result)
				logging.info("Tenant: %s All instance stop successfully!" %tenant_name)
				print "\033[1;31;40m Tenant: %s all instance stop successfully!\033[0m" % tenant_name
			elif 0 == check_result.count("SHUTOFF"):
				logging.info(check_result)
				logging.critical("Tenant: %s All instance stop failure!" %tenant_name)
			elif num != check_result.count("SHUTOFF"):
				logging.info(check_result)
				logging.error("Tenant:%s All your stoped operating withou success!" %tenant_name)
			elif num_volume == check_volume_result.count("available"):
				logging.info(check_volume_result)
				logging.info("Tenant: %s All volume deattch successfully!" %tenant_name)
				print "\033[1;31;40m Tenant: %s all volume deattch successfully!\033[0m" % tenant_name
			elif 0 == check_volume_result.count("available"):
				logging.info(check_volume_result)
				logging.critical("Tenant: %s All volume deattch failure!" %tenant_name)
			elif num != check_result.count("SHUTOFF") or num_volume != check_volume_result.count("available"):
				logging.info("There are info %s,%s" %(check_volume_result,check_result))
				logging.error("Tenant:%s All your stoped operating withou success!" %tenant_name)
		if cmd == "start":
			#######################################
			if num == check_result.count("ACTIVE"):
				logging.info(check_result)
				logging.info("Tenant:%s All instance started successfully!" %tenant_name)
				print "\033[1;31;40m Tenant:%s All instance started successfully!\033[0m" %tenant_name
			elif 0 == check_result.count("ACTIVE"):
				logging.info(check_result)
				logging.critical("Tenant: %s All instance start failure!" %tenant_name)
			elif num != check_result.count("ACTIVE"):
				logging.info(check_result)
				logging.error("Tenant:%s All your started operating withou success!" %tenant_name)
		if cmd == "volume_attach":
			check_volume_name_old = []
			cmd_volume_name_old ="grep  in-use %s |awk '{print $2}'" %(list_file_volume)
			name_old_result = shell.shell_cmd(cmd_volume_name_old)
			for o in xrange(len(name_old_result)):
				vo = name_old_result[o].split()[0]
				check_volume_name_old.append(vo)
			check_volume_name = []
			cmd_volume_name ="nova volume-list |grep in-use|awk '{print $2}'"
			name_result = shell.shell_cmd(cmd_volume_name)
			for l in xrange(len(name_result)):
				vl =  name_result[l].split()[0]
				check_volume_name.append(vl)
			#######################################
			if len(check_volume_name) == len(check_volume_name_old):
				logging.info((check_volume_result))
				logging.info("Tenant:%s All volume attach successfully!" %tenant_name)
				print "\033[1;31;40m Tenant:%s All volume attach successfully!\033[0m" %tenant_name
				flag = open(list_dir+list_file_volume_flag,'w')
				flag.writelines('0')
				flag.close()
				return 0
			elif len(check_volume_name) == 0:
				logging.info(check_volume_result)
				logging.critical("Tenant: %s All volume attach failure!" %tenant_name)
				flag = open(list_dir+list_file_volume_flag,'w')
				flag.writelines('1')
				flag.close()
				return 1
			else:
				logging.info(check_volume_result)
				logging.error("Tenant:%s All your started operating withou success!" %tenant_name)
				flag = open(list_dir+list_file_volume_flag,'w')
				flag.writelines('-1')
				flag.close()
				return -1