def handle_delivery_success(self): """ - Marks project build as complete - Sends job details to RPM tracking piece and deletes the job from the tube """ # Mark project build as complete BuildTracker(self.job['namespace'], logger=self.logger).complete() self.logger.debug('Marked project build: {} as complete.'.format( self.job['namespace'])) self.logger.debug('Putting job details to master_tube for tracker\'s' ' consumption') self.set_buildphase_data(build_phase_status='complete', build_phase_end_time=timezone.now()) self.set_build_data(build_status='complete', build_end_time=timezone.now()) # sending notification as delivery complete and also addingn this into # tracker. self.job['action'] = 'notify_user' self.queue.put(json.dumps(self.job), 'master_tube') # Put some delay to avoid mismatch in uploading jod details to # master_tube time.sleep(10) self.job['action'] = 'tracking' self.queue.put(json.dumps(self.job), 'master_tube')
def run_test(self): """Run Openshift test build for job, which runs the user defined tests.""" namespace = self.job["namespace"] project = self.job["project_hash_key"] self.setup_data() self.set_buildphase_data(build_phase_status='processing', build_phase_start_time=timezone.now()) try: self.openshift.login() # TODO: This needs to be addressed after addressing Issue #276 build_id = self.openshift.build(project, 'test') if not build_id: return False except OpenshiftError as e: self.logger.error(e) return False BuildTracker(namespace).start() test_status = self.openshift.wait_for_build_status(project, build_id, 'Complete', status_index=2) logs = self.openshift.get_build_logs(project, build_id, "test") test_logs_file = os.path.join(self.job['logs_dir'], 'test_logs.txt') self.set_buildphase_data(build_phase_log_file=test_logs_file) self.export_logs(logs, test_logs_file) return test_status
def handle_job(self, job): """ This checks if parents for the current project are being built. If any parent build is in progress, it pushes the job back to the queue to be processed later. Else, it goes ahead with running build for the job. """ self.job = job self.setup_data() self.set_buildphase_data( build_phase_status='processing', build_phase_start_time=timezone.now() ) cause_of_build = get_cause_of_build( os.environ.get('JENKINS_MASTER'), self.job["job_name"], self.job["jenkins_build_number"] ) self.job["cause_of_build"] = cause_of_build self.set_build_data(build_trigger=cause_of_build) parent_build_running = False parents = self.job.get('depends_on', '').split(',') parents_in_build = [] # Reset retry params self.job['retry'] = None self.job['retry_delay'] = None self.job['last_run_timestamp'] = None for parent in parents: is_build_running = BuildTracker(parent, logger=self.logger).is_running() if is_build_running: parents_in_build.append(parent) parent_build_running = parent_build_running or \ is_build_running if parent_build_running: self.logger.info('Parents in build: {}, pushing job: {} back ' 'to queue'.format(parents_in_build, self.job)) self.set_buildphase_data( build_phase_status='requeuedparent' ) # Retry delay in seconds self.job['retry'] = True self.job['retry_delay'] = settings.BUILD_RETRY_DELAY self.job['last_run_timestamp'] = time.time() self.queue.put(json.dumps(self.job), 'master_tube') else: self.logger.info('Starting build for job: {}'.format(self.job)) success = self.build_container() if success: self.job["build_status"] = True self.handle_build_success() else: self.job["build_status"] = False self.handle_build_failure()
def collect(self): """Initiate the garbage collection.""" index_files = glob(self._index_location + "/*.yml") # Go through index files if self._verbose: print("Going through the index.") for index_file in index_files: if "index_template" not in index_file: data = lib.load_yaml(index_file) if "Projects" not in data: raise Exception("Invalid index file") for entry in data["Projects"]: app_id = entry["app-id"] job_id = entry["job-id"] desired_tag = entry["desired-tag"] # Initialize mismatch list to recieve data container_name = str.format( "{namespace}{name}", namespace=(str(app_id) + "/") if str(app_id) != "library" else "", name=str(job_id)) container_tag = container_name + ":" + str(desired_tag) if container_name not in self.index_containers: # self.index_containers[container_name] = [] if not BuildTracker(container_tag).is_running(): self.index_containers[container_name].append( desired_tag) # Match index data with registry metadata for r_name, r_info in self._registry_info.tags.iteritems(): r_tags = r_info["tags"] if r_name not in self.mismatched: self.mismatched[r_name] = [] for item1 in r_tags: # On mismatch add to list if r_name in self.index_containers and item1 not in self.index_containers[ r_name]: self.mismatched[r_name].append(item1) if r_name not in self.index_containers: self.mismatched[r_name].append(item1) end_msg = "Images to Remove" if self._collect: end_msg = "Images Removed" self._delete_mismatched() print str.format("{0} : \n{1}", end_msg, self.mismatched)
def _mark_for_removal(self): """Orphans mismatched images from registry.""" lib.print_msg("Marking mismatched containers for removal...", self._verbose) for container_full_name, tag_list in self._mismatched.iteritems(): # For every entry in mismatched, if a build is not currently # running remove it # Formulate necessary data for tag in tag_list: if "/" in container_full_name: container_namespace, container_name = \ container_full_name.split("/") else: container_namespace = None container_name = container_full_name mark_removal_from_local_registry( self._verbose, container_namespace, container_name, tag, BuildTracker( get_container_name(container_namespace, container_name, tag)).is_running())
def build_container(self): """Run Openshift build for job""" namespace = self.job["namespace"] # project_name = self.job["project_name"] project_hash_key = self.job["project_hash_key"] try: self.openshift.login() build_id = self.openshift.build(project_hash_key, 'build') if not build_id: return False except OpenshiftError as e: self.logger.error(e) return False BuildTracker(namespace).start() build_status = self.openshift.wait_for_build_status( project_hash_key, build_id, 'Complete') logs = self.openshift.get_build_logs(project_hash_key, build_id) build_logs_file = os.path.join(self.job['logs_dir'], 'build_logs.txt') self.set_buildphase_data(build_phase_log_file=build_logs_file) self.export_logs(logs, build_logs_file) return build_status