Example #1
0
    def __run_recipe(self, session, recipe):
        """
        Run recipe
        :param Session session:
        :param BaseRecipe recipe:
        """

        if session.before_hooks:
            log.info(make_bold("Executing before ingestion hook(s)..."))
            self.__run_hooks(session, session.before_hooks, False)

        recipe.describe()

        if session.blocking and not session.is_automated():
            input("Press Enter to Continue...: ")

        log.title("\nRunning")

        if session.blocking:
            # It needs to display progress bar (how many files imported)
            t = Thread(target=run_status, args=(recipe,))
            t.daemon = True
            t.start()
            recipe.run()
            t.join()
        else:
            # Non-blocking mode, only 1 file is importing, no need to show progress bar (it takes longer time to join threads)
            recipe.run()

        recipe.importer = None

        if session.after_hooks:
            log.info(make_bold("Executing after ingestion hook(s)..."))
            self.__run_hooks(session, session.after_hooks, True)
Example #2
0
File: cfg.py Project: ASzc/nagoya
def mod_aconf(path):
    log.info("Editing {0}".format(path))
    a = aconf.ApacheHttpdConf()
    a.read_file(path)
    yield a
    log.debug("Writing to path {0}".format(path))
    a.write_file(path)
Example #3
0
	def fetch_entitlements_if_necessary(self):
		log.debug('Fetching Origin entitlements')

		# Get the etag
		etag_path = get_data_path('origin-entitlements.etag')
		etag = ''
		if isfile(self.entitlements_path) and isfile(etag_path):
			with open(etag_path, mode='r') as file:
				etag = file.read()

		# Fetch entitlements if etag does not match
		url = 'https://raw.githubusercontent.com/acidicoala/public-entitlements/main/origin/v1/entitlements.json'

		try:
			response = requests.get(url, headers={'If-None-Match': etag}, timeout=10)
		except Exception as e:
			log.error(f"Failed to fetch origin entitlements. {str(e)}")
			return

		if response.status_code == 304:
			log.debug(f'Cached Origin entitlements have not changed')
			return

		if response.status_code != 200:
			log.error(f'Error while fetching entitlements: {response.status_code} - {response.text}')
			return

		try:
			index = 1000000
			entitlements: List[dict] = json.loads(response.text)
			for entitlement in entitlements:
				entitlement.update({
					"entitlementId": index,
					"lastModifiedDate": "2020-01-01T00:00Z",
					"entitlementSource": "ORIGIN-OIG",
					"grantDate": "2020-01-01T00:00:00Z",
					"suppressedBy": [],
					"version": 0,
					"isConsumable": False,
					"productCatalog": "OFB",
					"suppressedOffers": [],
					"originPermissions": "0",
					"useCount": 0,
					"projectId": "123456",
					"status": "ACTIVE"
				})
				index += 1
		except ValueError as e:
			log.error(f"Failed to decode entitlements from json. {str(e)}")
			return

		# Cache entitlements
		with open(self.entitlements_path, 'w') as f:
			f.write(json.dumps(entitlements, indent=4, ensure_ascii=False))

		# Cache etag
		with open(etag_path, 'w') as f:
			f.write(response.headers['etag'])

		log.info('Origin entitlements were successfully fetched and cached')
Example #4
0
 def __init__(self, coverage_id):
     """
     The resumer keeps track of data providers that have been imported so that a record is kept if several
     runs are performed
     :param str coverage_id: the id of the coverage that is imported
     """
     if ConfigManager.track_files:
         self.__RESUMER_FILE_NAME__ = ConfigManager.resumer_dir_path + coverage_id + self.__RESUMER_FILE_SUFFIX__
         self.imported_data = []
         try:
             if os.path.isfile(self.__RESUMER_FILE_NAME__) and os.access(
                     self.__RESUMER_FILE_NAME__, os.R_OK):
                 log.info(
                     "We found a resumer file in the ingredients folder. The slices listed in "
                     + str(self.__RESUMER_FILE_NAME__) +
                     " will not be imported.")
                 self.resume_fp = open(self.__RESUMER_FILE_NAME__)
                 self.imported_data = json.loads(self.resume_fp.read())
                 self.resume_fp.close()
         except IOError as e:
             raise RuntimeException(
                 "Could not read the resume file, full error message: " +
                 str(e))
         except ValueError as e:
             log.warn(
                 "The resumer JSON file could not be parsed. A new one will be created."
             )
Example #5
0
    def intercept_entitlements(self, flow: HTTPFlow):
        if BaseAddon.host_and_path_match(
                flow,
                host=OriginAddon.api_host,
                path=r"^/ecommerce2/entitlements/\d+$"):  # Real DLC request
            self.patch_origin_client()

            log.info('Intercepted an Entitlements request from Origin ')

            # Get legit user entitlements
            try:
                entitlements: List = json.loads(
                    flow.response.text)['entitlements']
            except KeyError:
                entitlements = []

            # Inject our entitlements
            entitlements.extend(self.injected_entitlements)

            for e in entitlements:
                try:
                    log.debug(f"\t{e['___name']}")
                except KeyError:
                    log.debug(f"\t{e['entitlementTag']}")

            # Modify response
            flow.response.status_code = 200
            flow.response.reason = 'OK'
            flow.response.text = json.dumps({'entitlements': entitlements})

            flow.response.headers.add('X-Origin-CurrentTime', '1609452000')
            flow.response.headers.add('X-Origin-Signature', 'nonce')
Example #6
0
    def fetch_entitlements_if_necessary(self):
        log.debug('Fetching origin entitlements')

        # Get the etag
        etag_path = get_data_path('origin-entitlements.etag')
        etag = ''
        if isfile(self.entitlements_path) and isfile(etag_path):
            with open(etag_path, mode='r') as file:
                etag = file.read()

        # Fetch entitlements if etag does not match
        url = 'https://raw.githubusercontent.com/acidicoala/origin-entitlements/master/entitlements.json'
        response = requests.get(url, headers={'If-None-Match': etag})

        if response.status_code == 304:
            log.debug(f'Cached Origin entitlements have not changed')
            return

        if response.status_code != 200:
            log.error(
                f'Error while fetching entitlements: {response.status_code} - {response.text}'
            )
            return

        # Cache entitlements
        with open(self.entitlements_path, 'w') as f:
            f.write(response.text)

        # Cache etag
        with open(etag_path, 'w') as f:
            f.write(response.headers['etag'])

        log.info('Origin entitlements were successfully fetched and cached')
Example #7
0
    def signin(self):
        try:
            inputs = self.get_input()
            log.info('signin:' + str(inputs))
            cellphone = inputs['cellphone']
            password = md5(inputs['password']).hexdigest()
            user = Users.get(Users.cellphone == cellphone)
            if not user or user.password != password:
                return self.unauthorized()

            t = int(time.time())
            if not user.token or t - time.mktime(
                    user.token_created_time.timetuple()) > 144000:
                token = gen_token()
                user.token = token
                user.token_created_time = datetime.datetime.now()
            else:
                token = user.token
            self.set_login(user.cellphone, token)
            user.last_login_time = datetime.datetime.now()
            user.save()
            return self.success()
        except Exception as e:
            log.error('execs signin %s' % traceback.format_exc())
            return self.unauthorized()
Example #8
0
def interpret_result(ret, test, separator):
    """
    Collect statistics, and log failed / known fails tests.
    """
    global TOTAL_TESTS_COUNT, TOTAL_TESTS_IGNORED, TOTAL_QUERIES_COUNT, FAILED_TESTS
    TOTAL_TESTS_COUNT += 1
    TOTAL_QUERIES_COUNT += len(test.queries)
    if test.skip:
        log.warn("Test evaluation skipped, reason: %s", test.skip)
        IGNORED_TESTS.append("{} (evaluation skipped, reason: {})".format(
            test.testid, test.skip))
        ret = True
    elif not ret and test.knownfail:
        log.warn("Test result ignored, reason: %s", test.knownfail)
        IGNORED_TESTS.append("{} (result ignored, reason: {})".format(
            test.testid, test.knownfail))
        ret = True
    elif ret and test.knownfail:
        log.warn("Test marked as known fail has been fixed (%s)",
                 test.knownfail)
    elif not ret:
        FAILED_TESTS.append(test.testid)

    log.info(separator)
    return ret
Example #9
0
 def search_list(self):
     """
     文章搜索列表
     :return:
     """
     inputs = self.get_input()
     page = int(inputs.get('page', 1))
     page_size = int(inputs.get('page_size', 20))
     keywords = inputs.get('keywords', None)
     self.private_data['article_list'] = []
     self.private_data['current_page'] = 1
     self.private_data['total_page'] = 0
     self.private_data['category_list'] = []
     self.private_data['keywords'] = keywords
     try:
         category_list = Categories.select().where(Categories.status == 0). \
             order_by(Categories.id.desc())
         if keywords:
             article_query = Articles.select().where(
                 Articles.name.contains(keywords))
             total_count = article_query.count()
             total_page = (total_count + page_size - 1) / page_size
             self.private_data['total_page'] = total_page
             self.private_data['current_page'] = page
             self.private_data['category_list'] = category_list
             self.private_data['article_list'] = article_query.\
                 paginate(page, page_size)
             return self.display('front/search_list')
     except Exception as e:
         log.info('Failed to get search result. Keywords is %s. Error msg is',
                  keywords, e)
         log.error(traceback.format_exc())
     return self.display('front/search_list')
Example #10
0
	def intercept_ownership(flow: HTTPFlow):
		if BaseAddon.host_and_path_match(
				flow,
				host=EpicAddon.api_host,
				path=r"^/epic/ecom/v1/platforms/EPIC/identities/\w+/ownership$"
		):
			log.info('Intercepted an Ownership request from Epic Games')

			url = urlparse(flow.request.url)
			params = parse_qs(url.query)['nsCatalogItemId']

			# Each nsCatalogItemId is formatted as '{namespace}:{item_id}'
			[log.debug(f'\t{param}') for param in params]

			def process_game(param: str):
				namespace, itemID = param.split(':')
				game = get_epic_game(namespace)
				blacklist = [dlc['id'] for dlc in game['blacklist']] if game is not None else []
				owned = True if game is None else itemID not in blacklist
				return {
					'namespace': namespace,
					'itemId': itemID,
					'owned': owned,
				}

			result = [process_game(param) for param in params]

			EpicAddon.modify_response(flow, result)
Example #11
0
    def run_recipe(self, session):
        """
        Recipe session
        :param Session session: the session of the import
        :rtype BaseRecipe
        """
        if session.get_recipe()['name'] not in self.registry:
            raise RecipeValidationException("Recipe '" + session.get_recipe()['name'] + "' not found; "
                                            "if it's a custom recipe, please put it in the "
                                            "'$RMANHOME/share/rasdaman/wcst_import/recipes_custom' folder.")
        else:
            recipe = self.registry[session.get_recipe()['name']](session)
            log.title("Initialization")
            log.info("Collected files: " + str(map(lambda f: str(f), session.get_files()[:10])) + "...")

            log.title("\nValidation")
            recipe.validate()
            recipe.describe()

            if not session.is_automated():
                raw_input("Press Enter to Continue...: ")
            t = Thread(target=run_status, args=(recipe,))
            t.daemon = True
            log.title("\nRunning")
            t.start()
            recipe.run()
            t.join()

            log.success("Recipe executed successfully")
Example #12
0
def disable_proxy():
    __set_reg('ProxyEnable', 0)
    __set_reg('ProxyOverride', orig_proxy_override)
    __set_reg('ProxyServer', orig_proxy_server)
    refresh_internet_settings()

    log.info(f'Internet proxy disabled')
Example #13
0
def mod_aconf(path):
    log.info("Editing {0}".format(path))
    a = aconf.ApacheHttpdConf()
    a.read_file(path)
    yield a
    log.debug("Writing to path {0}".format(path))
    a.write_file(path)
Example #14
0
 def print_elapsed_time(self):
     """
     Print how long it is to run some statements.
     """
     end_time = time.time()
     log.info("Elapsed time: " +
              "{:.3f}".format(end_time - self.start_time) + " s.")
Example #15
0
 def __load_imported_data_from_resume_file(self, coverage_id):
     """
     Try to load a resume file coverage_id.resume.json from input data folder.
     :param str coverage_id: coverage id of current importer to find the resume file.
     """
     if coverage_id not in Resumer.__IMPORTED_DATA_DICT:
         resume_file_path = ConfigManager.resumer_dir_path + coverage_id + Resumer.__RESUMER_FILE_SUFFIX
         Resumer.__RESUMER_FILE_NAME_DICT[coverage_id] = resume_file_path
         try:
             if os.path.isfile(resume_file_path) \
                     and os.access(resume_file_path, os.R_OK):
                 log.info(
                     "We found a resumer file in the ingredients folder. The slices listed in '"
                     + resume_file_path + "' will not be imported.")
                 file = open(Resumer.__RESUMER_FILE_NAME_DICT[coverage_id])
                 data = json.loads(file.read())
                 Resumer.__IMPORTED_DATA_DICT[coverage_id] = data
                 file.close()
         except IOError as e:
             raise RuntimeException(
                 "Could not read the resume file, full error message: " +
                 str(e))
         except ValueError as e:
             log.warn(
                 "The resumer JSON file could not be parsed. A new one will be created."
             )
Example #16
0
    def read_entitlements_from_cache(self):
        log.debug('Reading origin entitlements from cache')

        with open(self.entitlements_path, mode='r') as file:
            self.injected_entitlements = json.loads(file.read())

        log.info('Origin entitlements were successfully read from file')
Example #17
0
def enable_proxy(port: int):
    __set_reg('ProxyEnable', 1)
    __set_reg('ProxyOverride', '<local>')
    __set_reg('ProxyServer', f'127.0.0.1:{port}')
    refresh_internet_settings()

    log.info(f'Internet proxy enabled on 127.0.0.1:{port}')
Example #18
0
 def articles(self):
     inputs = self.get_input()
     url = API_URL + '/api/articles'
     try:
         log.info('len articles %s' % len(get(url, inputs)))
         return get(url, inputs)
     except Exception as e:
         log.error('execus articles %s' % traceback.format_exc())
Example #19
0
 def _warpper(*args, **kwargs):
     start_time = time.time()
     result = func(*args, **kwargs)
     spent_time = time.time() - start_time
     log.info("%s time spent is %f" % (func.__name__, spent_time))
     if spent_time > 0.01:
         timetoolong.append([spent_time])
     return result
Example #20
0
File: cfg.py Project: ASzc/nagoya
def mod_text(path):
    log.info("Editing {0}".format(path))
    with open(path, "r") as f:
        lines = list(f.readlines())
    yield lines
    log.debug("Writing to path {0}".format(path))
    with open(path, "w") as f:
        f.write("".join(lines))
Example #21
0
def mod_text(path):
    log.info("Editing {0}".format(path))
    with open(path, "r") as f:
        lines = list(f.readlines())
    yield lines
    log.debug("Writing to path {0}".format(path))
    with open(path, "w") as f:
        f.write("".join(lines))
Example #22
0
    def __run_hooks(self, session, hooks, after_ingestion=False):
        """
        Run some hooks before/after analyzing input files
        :param Session session:
        :param dict[str:str] hooks: dictionary of before and after ingestion hooks
        """
        # gdal (default), netcdf or grib
        recipe_type = GdalToCoverageConverter.RECIPE_TYPE
        if session.recipe["name"] == GeneralRecipe.RECIPE_TYPE:
            recipe_type = session.recipe["options"]["coverage"]["slicer"]["type"]

        for hook in hooks:
            abort_on_error = False if "abort_on_error" not in hook else bool(hook["abort_on_error"])

            if "description" in hook:
                log.info("Executing hook '{}'...".format(make_bold(hook["description"])))

            replace_paths = []
            replace_path_template = None
            if "replace_path" in hook:
                # All replaced input files share same template format (e.g: file:path -> file:path.projected)
                replace_path_template = hook["replace_path"][0]

            # Evaluate shell command expression to get a runnable shell command
            cmd_template = hook["cmd"]

            files = session.files
            if after_ingestion is True:
                files = session.imported_files

            for file in files:
                evaluator_slice = EvaluatorSliceFactory.get_evaluator_slice(recipe_type, file)
                cmd = self.sentence_evaluator.evaluate(cmd_template, evaluator_slice)
                self.__run_shell_command(cmd, abort_on_error)

                if FileExpressionEvaluator.PREFIX not in cmd_template:
                    # Only need to run hook once if ${...} does not exist in cmd command,
                    # otherwise it runs duplicate commands multiple times (!)
                    if replace_path_template is not None:
                        replace_path = self.sentence_evaluator.evaluate(replace_path_template, evaluator_slice)
                        replace_paths.append(FilePair(replace_path, file.filepath))
                    break

                if replace_path_template is not None:
                    # Evaluate replace path expression to get a valid file input path
                    replace_path = self.sentence_evaluator.evaluate(replace_path_template, evaluator_slice)
                    tmp_files = glob.glob(replace_path, recursive=True)
                    for tmp_file in tmp_files:
                        if not isinstance(file, FilePair):
                            # The first replacement (must keep original input file path)
                            replace_paths.append(FilePair(tmp_file, file.filepath))
                        else:
                            # From the second replacement
                            replace_paths.append(FilePair(tmp_file, file.original_file_path))

            if len(replace_paths) > 0:
                # Use replaced file paths instead of original input file paths to analyze and create coverage slices
                session.files = replace_paths
Example #23
0
	def patch_origin_client(self):
		origin = Client('Origin', 'Origin.exe', 'libeay32.dll', 'EVP_DigestVerifyFinal')
		eadesktop = Client('EA Desktop', 'EADesktop.exe', 'libcrypto-1_1-x64.dll', 'EVP_DigestVerifyFinal')

		client = origin

		try:
			client_process = Pymem(client.PROCESS_NAME)
		except ProcessNotFound:
			client = eadesktop
			try:
				client_process = Pymem(client.PROCESS_NAME)
			except ProcessNotFound:
				log.warning('Origin/EA Desktop process not found. Patching aborted')
				return

		if client_process.process_id == self.last_client_pid:
			log.debug(f'{client.NAME} client is already patched')
			return

		log.info(f'Patching {client.NAME} client')

		try:
			dll_module = next(m for m in client_process.list_modules() if m.name.lower() == client.DLL_NAME)
		except StopIteration:
			log.error(f'{client.DLL_NAME} is not loaded. Patching aborted')
			return

		# The rest should complete without issues in most cases.

		# Get the Export Address Table symbols
		# noinspection PyUnresolvedReferences
		dll_symbols = PE(dll_module.filename).DIRECTORY_ENTRY_EXPORT.symbols

		# Get the symbol of the EVP_DigestVerifyFinal function
		verify_func_symbol = next(s for s in dll_symbols if s.name.decode('ascii') == client.FUNCTION_NAME)

		# Calculate the final address in memory
		verify_func_addr = dll_module.lpBaseOfDll + verify_func_symbol.address

		# Instructions to patch. We return 1 to force successful response validation.
		patch_instructions = bytes([
			0x66, 0xB8, 0x01, 0,  # mov ax, 0x1
			0xC3  # ret
		])
		client_process.write_bytes(verify_func_addr, patch_instructions, len(patch_instructions))

		# Validate the written memory
		read_instructions = client_process.read_bytes(verify_func_addr, len(patch_instructions))

		if read_instructions != patch_instructions:
			log.error('Failed to patch the instruction memory')
			return

		# At this point we know that patching was successful

		self.last_client_pid = client_process.process_id
		log.info(f'Patching {client.NAME} was successful')
Example #24
0
	def intercept_entitlements(flow: HTTPFlow):
		if BaseAddon.host_and_path_match(
				flow, host=EpicAddon.ecom_host,
				path=r"^/ecommerceintegration/api/public/v2/identities/\w+/entitlements$"
		) or BaseAddon.host_and_path_match(
				flow, host=EpicAddon.api_host,
				path=r"^/epic/ecom/v1/identities/\w+/entitlements"
		):
			log.info('Intercepted an Entitlements request from Epic Games')

			url = urlparse(flow.request.url)
			sandbox_id = parse_qs(url.query)['sandboxId'][0]

			# Get the game in the config with namespace that matches the sandboxId
			game = get_epic_game(sandbox_id)

			try:
				# Get the entitlements from request params
				entitlementNames = parse_qs(url.query)['entitlementName']
			except KeyError:
				log.warning(
						'No entitlement names were provided, '
						'responding with entitlements defined in the config file'
				)

				# Get the game's entitlements
				entitlements = game['entitlements'] if game is not None and 'entitlements' in game else []

				# Map the list of objects to the list of string
				entitlementNames = [entitlement['id'] for entitlement in entitlements]

			[log.debug(f'\t{sandbox_id}:{entitlement}') for entitlement in entitlementNames]

			# Filter out blacklisted entitlements
			blacklist = [dlc['id'] for dlc in game['blacklist']] if game is not None and 'blacklist' in game else []
			entitlementNames = [e for e in entitlementNames if e not in blacklist]

			injected_entitlements: List[EpicEntitlement] = [{
				'id': entitlementName,  # Not true, but irrelevant
				'entitlementName': entitlementName,
				'namespace': sandbox_id,
				'catalogItemId': entitlementName,
				'entitlementType': "AUDIENCE",
				'grantDate': "2021-01-01T00:00:00.000Z",
				'consumable': False,
				'status': "ACTIVE",
				'useCount': 0,
				'entitlementSource': "LauncherWeb"
			} for entitlementName in entitlementNames]

			log.info(f'Injecting {len(injected_entitlements)} entitlements')

			original_entitlements: List[EpicEntitlement] = json.loads(flow.response.text)

			merged_entitlements = original_entitlements + injected_entitlements

			EpicAddon.modify_response(flow, merged_entitlements)
Example #25
0
def is_valid_video_file(file_path, file_name):
    # skip hidden files (possibly not valid video files)
    if file_name.startswith('.') or (not file_name.endswith('.mp4')):
        return False
    if os.path.getsize(file_path) == 0:
        log.info('Remove invalid video file: {0}'.format(file_path))
        os.remove(file_path)
        return False
    return True
Example #26
0
def mod_ini(path):
    log.info("Editing {0}".format(path))
    with open(path, "r") as f:
        ini_data = iniparse.INIConfig(f)
    yield ini_data
    # Printing is the only way the library supports writing
    log.debug("Writing to path {0}".format(path))
    with open(path, "w") as f:
        print(ini_data, end="", file=f)
Example #27
0
File: cfg.py Project: ASzc/nagoya
def mod_ini(path):
    log.info("Editing {0}".format(path))
    with open(path, "r") as f:
        ini_data = iniparse.INIConfig(f)
    yield ini_data
    # Printing is the only way the library supports writing
    log.debug("Writing to path {0}".format(path))
    with open(path, "w") as f:
        print(ini_data, end="", file=f)
Example #28
0
 def update_user_info(self):
     inputs = self.get_input()
     token = Users.get(Users.cellphone == self.is_login()).token
     inputs['token'] = token
     log.info('inputs %s ' % inputs)
     url = API_URL + '/api/update_user_info'
     try:
         return post(url, inputs)
     except Exception as e:
         log.error('execus update_user_info %s' % traceback.format_exc())
Example #29
0
    def __image_uuid(self, uuid):
        try:
            src = None
            image = Images.get(Images.uuid == uuid)

            src = ALI_CDNIMAGES_URL + "/%s" % ALI_OSS_DIR + '/%s.jpeg' % uuid
            log.info('src' + str(src))
            return src
        except Exception as e:
            log.error('__image_uuid %s' % traceback.format_exc())
Example #30
0
	def block_telemetry(flow: HTTPFlow):
		if config.block_telemetry and flow.request.path.startswith('/telemetry'):
			flow.request.text = '{}'  # Just in case

			flow.response = HTTPResponse.make(200, '{}')
			flow.response.headers.add('Content-Type', 'application/json')
			flow.response.headers.add('server', 'eos-gateway')
			flow.response.headers.add('access-control-allow-origin', '*')
			flow.response.headers.add('x-epic-correlation-id', '12345678-1234-1234-1234-123456789abc')

			log.info('Blocked telemetry request from Epic Games')
Example #31
0
 def __file_write(self, urls, content):
     try:
         log.info('into __file_write urls: %s,content: %s' %
                  (urls, content))
         file_object = file(urls, 'w')
         # file_object = open(urls,'w')
         file_object.truncate()
         file_object.write(content)
         file_object.close()
     except Exception as e:
         log.error(traceback.format_exc())
Example #32
0
def require_container(hostname, exit=True):
    address = address_in_hosts(hostname)
    if address is not None:
        log.info("Linked to container {0} at {1}".format(hostname, address))
        return True
    else:
        log.critical("Not linked to {0} container, exiting".format(hostname))
        if exit:
            sys.exit(11)
        else:
            return False
Example #33
0
 def describe(self):
     """
     Implementation of the base recipe describe method
     """
     super(Recipe, self).describe()
     importer = self._get_importer()
     log.info("A couple of files have been analyzed. Check that the coordinates are correct.")
     index = 1
     for slice in importer.get_slices_for_description():
         log.info("Slice " + str(index) + ": " + str(slice))
         index += 1
Example #34
0
 def execute(self, request):
     """
     Prints the service call that would be executed if a real executor would be used
     :param WCSTRequest request: the request to be executed
     :rtype str
     """
     request = self.prepare_request(request)
     service_call = self.base_url + "?" + request.get_query_string()
     log.info(
         make_bold(
             "This is just a mocked request, no data will be changed."))
     log.info(service_call)
Example #35
0
 def __init__(self, coverage_id):
     """
     The resumer keeps track of data providers that have been imported so that a record is kept if several
     runs are performed
     :param str coverage_id: the id of the coverage that is imported
     """
     if ConfigManager.track_files:
         self.__RESUMER_FILE_NAME__ = ConfigManager.resumer_dir_path + coverage_id + self.__RESUMER_FILE_SUFFIX__
         self.imported_data = []
         try:
             if os.path.isfile(self.__RESUMER_FILE_NAME__) and os.access(self.__RESUMER_FILE_NAME__, os.R_OK):
                 log.info(
                     "We found a resumer file in the ingredients folder. The slices listed in " + str(
                         self.__RESUMER_FILE_NAME__) + " will not be imported.")
                 self.resume_fp = open(self.__RESUMER_FILE_NAME__)
                 self.imported_data = json.loads(self.resume_fp.read())
                 self.resume_fp.close()
         except IOError as e:
             raise RuntimeException("Could not read the resume file, full error message: " + str(e))
         except ValueError as e:
             log.warn("The resumer JSON file could not be parsed. A new one will be created.")
Example #36
0
 def describe(self):
     """
     Implementation of the base recipe describe method
     """
     super(Recipe, self).describe()
     log.info("\033[1mWMS Import:\x1b[0m " + str(self.options['wms_import']))
     importer = self._get_importer()
     log.info("A couple of files have been analyzed. Check that the axis subsets are correct.")
     index = 1
     for slice in importer.get_slices_for_description():
         log.info("Slice " + str(index) + ": " + str(slice))
         index += 1
Example #37
0
def print_usage():
    """
    Prints the usage of the program
    """
    log.title("NAME")
    log.info("\tWCST Import - imports georeferenced files to a WCS service that supports the Transactional extension.")
    log.title("\nSYNOPSIS")
    log.info("\twcst_import.py ingredients.json")
    log.title("\nDESCRIPTION")
    log.info("\tThe WCST Import utility imports georeferenced files supported by GDAL into a WCS service that supports "
             "Transactional extension.\n\tThis utility is based on a recipe (custom code for a specific scenario) being"
             " used to transform the ingredients file (input files and configuration options)."
             "\n\n\tThe following recipes are supported as now:"
             "\n\t  * 2D Mosaic - Given a set of 2D GDAL files, the recipe will produce a 2D mosaic of the given files."
             "Ingredients example under ingredients/map_mosaic.json"
             "\n\t  * Regular Timeseries  - Given a set of 2D GDAL files, the recipe will build a timeseries out of "
             "them. The initial time of the series and the step for each additional file must be provided. "
             "Ingredients example under ingredients/time_series_regular"
             "\n\t  * Irregular - Given a set of 2D GDAL files, the recipe will produce  a timeseries out of "
             "them. The initial time of the series and the step for each additional file must be provided. "
             "Ingredients example under ingredients/time_series_irregular"
             )
Example #38
0
import util.cleanup as cleanup
import util.tail as tail
import util.system as system
import util.remote as remote
from util.log import log

import sys

kojidatabase_name = "kojidatabase"
remote.require_container(kojidatabase_name)
remote.wait_if_not_up(kojidatabase_name, 5432)

services = ["httpd"]
if not "nokojira" in sys.argv:
    services.append("kojira")

def shutdown(*args):
    for service in reversed(services):
        log.info("Stopping {0}".format(service))
        system.service(service, "stop")

cleanup.register_excepthook(shutdown)
cleanup.register_sig_handler(shutdown)

for service in services:
    log.info("Starting {0}".format(service))
    system.service(service, "start")

log.info("Monitoring httpd log")
tail.watch("/var/log/httpd/error_log")
Example #39
0
def shutdown(*args):
    log.info("Stopping")
Example #40
0
import util.cred as cred
import util.openssl as openssl
import util.remote as remote
from util.log import log
import sys
import subprocess
import os

kojihub_name = "koji"
remote.require_container(kojihub_name)

koji_ca = openssl.CA(cred.ca_key, cred.ca_crt, cred.ca_serial)
builder_name = system.hostname()
builder_user = cred.make_user(builder_name)
if not os.path.exists(builder_user.key):
    log.info("Creating builder credentials")
    openssl.make_user_certificate(builder_user, koji_ca)

    log.info("Configure Koji Builder")
    with cfg.mod_ini("/etc/kojid/kojid.conf") as i:
        i.kojid.cert = builder_user.pem
else:
    log.info("Builder credentials already exist")

def shutdown(*args):
    log.info("Stopping")

cleanup.register_excepthook(shutdown)
cleanup.register_sig_handler(shutdown)

remote.wait_if_not_up(kojihub_name, 80)
Example #41
0
def shutdown(*args):
    log.info("Stopping")
    log.info("Attempting to disable host {0}".format(builder_name))
    if remote.is_up(kojihub_name, 80):
        subprocess.call(["koji", "-d", "disable-host", builder_name])
Example #42
0
File: setup.py Project: ASzc/nagoya
#!/usr/bin/env python2

# References:
# https://fedoraproject.org/wiki/Koji/ServerHowTo
# https://github.com/sbadakhc/kojak/blob/master/scripts/install/install

import util.cfg as cfg
import util.pkg as pkg
import util.cred as cred
from util.log import log

#
# Setup
#

log.info("General update")
pkg.clean()
pkg.update()

log.info("Install EPEL")
pkg.install("https://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm")

#
# Kojid (Koji Builder)
#

log.info("Install Koji Builder")
pkg.install("koji-builder")

koji_url = dict()
koji_url["web"] = "http://koji/koji"
Example #43
0
File: setup.py Project: ASzc/nagoya
#!/usr/bin/env python2

import util.pkg as pkg
from util.log import log

log.info("Installing dependency")
pkg.install("pyOpenSSL")
pkg.clean()
Example #44
0
def wait_if_not_up(hostname, port, attempts=10, timeout=2, wait=2):
    if not is_up(hostname, port, timeout):
        log.info("Waiting for {0} to be ready".format(hostname))
        wait_up(hostname, port, attempts, timeout, wait)
Example #45
0
File: setup.py Project: ASzc/nagoya
# https://github.com/sbadakhc/kojak/blob/master/scripts/install/install

import util.cfg as cfg
import util.system as system
import util.pkg as pkg
import util.aconf as aconf
import util.cred as cred
from util.log import log
from os.path import basename
from os import rename

#
# Setup
#

log.info("General update")
pkg.clean()
pkg.update()

log.info("Install EPEL")
pkg.install("https://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm")

log.info("Modify initscripts' checkpid")
# checkpid doesn't handle defunct processes, alter so it does
with cfg.mod_text("/etc/init.d/functions") as f:
    checkpid_start = f.index("checkpid() {\n")
    checkpid_end = f.index("}\n", checkpid_start)
    test_index = f.index('\t\t[ -d "/proc/$i" ] && return 0\n', checkpid_start, checkpid_end)
    f[test_index] = '\t\t[ -e "/proc/$i/exe" ] && return 0\n'

# Note that the /etc/hosts file is not writable in docker images/containers
Example #46
0
 def describe(self):
     """
     This methods is called before insert or update is run. You should override the method and add any comments
     regarding the operations that you will perform via log.info to inform the user. You should explicitly state
     the information that you deduced (e.g. timestamps for a timeseries) so that the consequences are clear.
     """
     cov = CoverageUtil(self.session.get_coverage_id())
     operation_type = "UPDATE" if cov.exists() else "INSERT"
     log.info("The recipe has been validated and is ready to run.")
     log.info("\033[1mRecipe:\x1b[0m " + self.session.get_recipe()['name'])
     log.info("\033[1mCoverage:\x1b[0m " + self.session.get_coverage_id())
     log.info("\033[1mWCS Service:\x1b[0m " + ConfigManager.wcs_service)
     log.info("\033[1mOperation:\x1b[0m " + operation_type)
     log.info("\033[1mSubset Correction:\x1b[0m " + str(ConfigManager.subset_correction))
     log.info("\033[1mMocked:\x1b[0m " + str(ConfigManager.mock))
     if ConfigManager.track_files:
         log.info("\033[1mTrack files:\x1b[0m " + str(ConfigManager.track_files))
     if ConfigManager.skip:
         log.info("\033[1mSkip:\x1b[0m " + str(ConfigManager.skip))
     if ConfigManager.retry:
         log.info("\033[1mRetries:\x1b[0m " + str(ConfigManager.retries))
     if ConfigManager.slice_restriction is not None:
         log.info("\033[1mSlice Restriction:\x1b[0m " + str(ConfigManager.slice_restriction))
     pass
Example #47
0
def shutdown(*args):
    for service in reversed(services):
        log.info("Stopping {0}".format(service))
        system.service(service, "stop")
Example #48
0
File: setup.py Project: ASzc/nagoya
#!/usr/bin/env python2

import util.cfg as cfg
import util.sql as sql
import util.system as system
import util.pkg as pkg
from util.log import log

#
# Setup
#

log.info("General update")
pkg.clean()
pkg.update()

log.info("Install EPEL")
pkg.install("https://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm")

#
# Postgre SQL
#

log.info("Install PostgreSQL Server")
pkg.install("postgresql-server")
sql.init_db()

log.info("Modify PostgreSQL initscript")
with cfg.mod_text("/etc/init.d/postgresql") as p:
    # Can't write to /proc/self/oom_adj in docker, causes error message on start, so disable
    index = p.index("PG_OOM_ADJ=-17\n")
Example #49
0
#!/usr/bin/env python2

import util.cleanup as cleanup
import util.tail as tail
import util.system as system
from util.log import log

services = ["postgresql"]

def shutdown(*args):
    for service in reversed(services):
        log.info("Stopping {0}".format(service))
        system.service(service, "stop")

cleanup.register_excepthook(shutdown)
cleanup.register_sig_handler(shutdown)

for service in services:
    log.info("Starting {0}".format(service))
    system.service(service, "start")

log.info("Monitoring postgresql log")
log_files = []
for day in ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]:
    log_files.append("/var/lib/pgsql/data/pg_log/postgresql-{0}.log".format(day))
tail.watch(log_files)
Example #50
0
#!/usr/bin/env python2

import util.cred as cred
from util.log import log
import util.openssl as openssl

log.info("Creating CA credentials")
koji_ca = openssl.CA(cred.ca_key, cred.ca_crt, cred.ca_serial)

log.info("Creating user credentials")
for n,u in cred.user.items():
    openssl.make_user_certificate(u, koji_ca)

log.info("Done")
Example #51
0
#!/usr/bin/env python2

import util.cred as cred
import os
from util.log import log

log.info("Creating Koji shared volume")
subdirs = ["/mnt/koji/" + sub for sub in "packages", "repos", "work", "scratch"]
for subdir in subdirs:
    os.makedirs(subdir)

# Should be standard for EL6, as set by httpd package
apache_uid = 48
apache_gid = 48
# Set owner to apache:apache
for subdir in subdirs:
    os.chown(subdir, apache_uid, apache_gid)

log.info("Done")