def get_env_app_opco(env, app):
    try:
        return environment_list.objects.get(env=env, app_list__app=app)
    except environment_list.DoesNotExist:
        logger.info("Error :::: Specified environment " + env +
                    " and application " + app +
                    " does not exist in the Data Base")
        return None
 def __init__(self, q):
     logger.info("Forking process to read jenkins")
     super(j_process, self).__init__()
     self.name = "j_daemon_"
     #self.daemon = True
     self.q = q
     self.thread_list = []
     self.timeout = 10
Exemple #3
0
def get_environment(detail):
    env = ''
    try:
        env = filter(
            lambda e: 'ENV_PREFIX' in e.values(),
            filter(lambda a: 'parameters' in a.keys(),
                   detail['actions'])[0]['parameters'])
        return env[0]['value']
    except KeyError:
        logger.info("ENV_PREFIX does not exist")
Exemple #4
0
 def lookForNewBuilds(self):
     details = get_job_info(self.job_url)
     builds = get_builds(details)
     jLatestBuild = builds[0]['number']
     try:
         dLatestBuild = get_latest_build_by_url(self.job_url)
     except (build_archive.DoesNotExist, latest_build.DoesNotExist):
         logger.info("no entry found for the build " + self.job_url)
         return True, builds
     if jLatestBuild == dLatestBuild:
         return False, None
     else:
         __bns__ = map(lambda x: x if int(x['number']) > dLatestBuild else None, builds)
         buildsNotSaved = filter(None, __bns__)
         return True, buildsNotSaved
Exemple #5
0
    def saveNewBuild(self, url):
        try:
            detail = get_job_info(url)
            env = get_environment(detail).upper()
            duration = detail['duration']
            timestamp = detail['timestamp']
            status = detail['result']
            build_no = detail['number']
            is_building = detail['building']
            is_success = not is_building
            automation_report_ok = False
            self.blocks = []
            if not is_building:
                count = self.getCount(duration, url)
                self.env_opco_app = get_env_app_opco(env, self.app)
                if self.env_opco_app is None:
                    pass
                exec_count = create_exec_count(count, self.env_opco_app)
                self.build_detail = create_build_detail(
                    url, build_no, timestamp, status)
                build_archive = create_build_archive(self.build_detail,
                                                     exec_count)

                is_success = True if not is_building and (
                    status == 'SUCCESS' or status == 'UNSTABLE') else False

                if is_success:
                    try:
                        test_run = self.getTestRuns(url)
                        automation_report_ok = True

                    except KeyError as e:
                        logger.info(
                            "Error. Jenkins doesn't like this automation report..... {0} ..... {1}"
                            .format(e, url))
                        build_archive.exec_count.count = get_blank_count()

                build_archive.save()
                build, self.env_in_latest_build = update_latest_build(
                    env, build_archive, is_success)
                if build:
                    logger.info("Latest build got updated with " + url)
                else:
                    logger.info("Latest build was not updated for " + url)
                if automation_report_ok:
                    map(lambda x: x.save(), test_run)
                    f_report = create_fitnesse_report(self.build_detail)
                    f_report.save()
                    map(lambda x: f_report.test_run.add(x), test_run)

            return True

        except KeyError as e:
            logger.info(
                "Error. Jenkins doesn't like this build..... {0} ..... {1}".
                format(e, url))
            return True
def start():
    logger.info("Reading all job list")
    job_app = test_job_app.objects.all()
    q = collections.deque()
    c = 0
    for job in job_app:
        c += 1
        pipeline = pipeline_opco.objects.get(test_job_app=job)
        logger.info("Putting " + job.test_job_url + " for " + job.app +
                    " in queue")
        q.append({
            "job": job,
            "pipeline_url": pipeline.pipeline_url,
            "stopwatch": datetime.now()
        })
    proc = j_process(q)
    proc.start()
 def run(self):
     logger.info("started process j_daemon with id " + str(self.pid))
     #path = os.path.join(os.path.dirname(os.path.abspath(__file__)),"j_daemon")
     process_info = {self.pid: self.name}
     #with open (path, 'wb') as fp:
     #    pickle.dump(process_info,fp)
     save_daemon_process_id(process_info)
     __q_len = len(self.q)
     '''
     till db connection pooling is done
     
     __t_count = 10 if __q_len > 10 else __q_len
     '''
     __t_count = 1
     self.createPool(__t_count)
     self.fire()
     self.waitForThreads()
     logger.info("All threads are DADE .... The process is exiting")
Exemple #8
0
 def getCount(self, duration, url):
     url = getApiUrl(getFitnesseReportUrl(url))
     try:
         detail = get_job_info(url)
         children = detail['children']
         total_pass = total_fail = total_ignore = 0
         for child in children:
             total_pass += int(child['passCount'])
             total_fail += int(child['failCount'])
             total_ignore += int(child['ignoredCount'])
             self.blocks.append(child['name'])
         total = total_pass + total_fail + total_ignore
         count = create_count(total, total_pass, total_fail, total_ignore,
                              duration)
         logger.info("scenario count " + str(count.total) + " for " + url)
     except HTTPError:
         logger.info("no result for this build " + url)
         return get_blank_count()
     return count
 def waitForThreads(self):
     while any(t.isAlive() for t in self.thread_list):
         logger.info("Checking heartbeat .... All threads are alive")
         time.sleep(10)
 def fire(self):
     for t in self.thread_list:
         logger.info("firing thread " + t.name)
         #            t.daemon = True
         t.start()
 def createPool(self, __t_count):
     logger.info("Creating thread pool of size " + str(__t_count))
     for _ in range(__t_count):
         self.thread_list.append(t_maker(_ + 65, self, "thread_" + str(_)))