def prepare_test_tender_data(procedure_intervals, mode): # Get actual intervals by mode name if mode in procedure_intervals: intervals = procedure_intervals[mode] else: intervals = procedure_intervals['default'] LOGGER.log_message(Message(intervals)) # Set acceleration value for certain modes if mode in ['openua', 'openeu']: assert isinstance(intervals['accelerator'], int), \ "Accelerator should be an 'int', " \ "not '{}'".format(type(intervals['accelerator']).__name__) assert intervals['accelerator'] >= 0, \ "Accelerator should not be less than 0" else: assert 'accelerator' not in intervals.keys(), \ "Accelerator is not available for mode '{0}'".format(mode) if mode == 'single': return munchify({'data': test_tender_data(intervals)}) elif mode == 'multi': return munchify({'data': test_tender_data_multiple_items(intervals)}) elif mode == 'reporting': return munchify({'data': test_tender_data_limited(intervals, 'reporting')}) elif mode == 'negotiation': return munchify({'data': test_tender_data_limited(intervals, 'negotiation')}) elif mode == 'negotiation.quick': return munchify({'data': test_tender_data_limited(intervals, 'negotiation.quick')}) elif mode == 'openua': return munchify({'data': test_tender_data_openua(intervals)}) elif mode == 'openeu': return munchify({'data': test_tender_data_openeu(intervals)}) raise ValueError("Invalid mode for prepare_test_tender_data")
def prepare_test_tender_data(procedure_intervals, tender_parameters): # Get actual intervals by mode name mode = tender_parameters['mode'] if mode in procedure_intervals: intervals = procedure_intervals[mode] else: intervals = procedure_intervals['default'] LOGGER.log_message(Message(intervals)) tender_parameters['intervals'] = intervals # Set acceleration value for certain modes assert isinstance(intervals['accelerator'], int), \ "Accelerator should be an 'int', " \ "not '{}'".format(type(intervals['accelerator']).__name__) assert intervals['accelerator'] >= 0, \ "Accelerator should not be less than 0" if mode == 'negotiation': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'negotiation.quick': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'openeu': return munchify({'data': test_tender_data_openeu(tender_parameters)}) elif mode == 'openua': return munchify({'data': test_tender_data_openua(tender_parameters)}) elif mode == 'open_competitive_dialogue': return munchify( {'data': test_tender_data_competitive_dialogue(tender_parameters)}) elif mode == 'reporting': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'belowThreshold': return munchify({'data': test_tender_data(tender_parameters)}) raise ValueError("Invalid mode for prepare_test_tender_data")
def prepare_test_tender_data(procedure_intervals, tender_parameters): # Get actual intervals by mode name mode = tender_parameters['mode'] if mode in procedure_intervals: intervals = procedure_intervals[mode] else: intervals = procedure_intervals['default'] LOGGER.log_message(Message(intervals)) tender_parameters['intervals'] = intervals # Set acceleration value for certain modes assert isinstance(intervals['accelerator'], int), \ "Accelerator should be an 'int', " \ "not '{}'".format(type(intervals['accelerator']).__name__) assert intervals['accelerator'] >= 0, \ "Accelerator should not be less than 0" if mode == 'negotiation': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'negotiation.quick': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'openeu': return munchify({'data': test_tender_data_openeu(tender_parameters)}) elif mode == 'openua': return munchify({'data': test_tender_data_openua(tender_parameters)}) elif mode == 'open_competitive_dialogue': return munchify({'data': test_tender_data_competitive_dialogue(tender_parameters)}) elif mode == 'reporting': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'belowThreshold': return munchify({'data': test_tender_data(tender_parameters)}) raise ValueError("Invalid mode for prepare_test_tender_data")
def log_object_data(data, file_name=None, format="yaml"): """Log object data in pretty format (JSON or YAML) Two output formats are supported: "yaml" and "json". If a file name is specified, the output is written into that file. If you would like to get similar output everywhere, use the following snippet somewhere in your code before actually using Munch. For instance, put it into your __init__.py, or, if you use zc.buildout, specify it in "initialization" setting of zc.recipe.egg. from munch import Munch Munch.__str__ = lambda self: Munch.toYAML(self, allow_unicode=True, default_flow_style=False) Munch.__repr__ = Munch.__str__ """ if not isinstance(data, Munch): data = munchify(data) if format.lower() == "json": data = data.toJSON(indent=2) else: data = data.toYAML(allow_unicode=True, default_flow_style=False) format = "yaml" LOGGER.log_message(Message(data.decode("utf-8"), "INFO")) if file_name: output_dir = BuiltIn().get_variable_value("${OUTPUT_DIR}") with open(os.path.join(output_dir, file_name + "." + format), "w") as file_obj: file_obj.write(data)
def prepare_test_tender_data(procedure_intervals, tender_parameters, submissionMethodDetails, accelerator, funders, plan_data): # Get actual intervals by mode name mode = tender_parameters['mode'] if mode in procedure_intervals: intervals = procedure_intervals[mode] else: intervals = procedure_intervals['default'] LOGGER.log_message(Message(intervals)) tender_parameters['intervals'] = intervals # Set acceleration value for certain modes assert isinstance(intervals['accelerator'], int), \ "Accelerator should be an 'int', " \ "not '{}'".format(type(intervals['accelerator']).__name__) assert intervals['accelerator'] >= 0, \ "Accelerator should not be less than 0" if mode == 'negotiation': return munchify({'data': test_tender_data_limited(tender_parameters, plan_data)}) elif mode == 'negotiation.quick': return munchify({'data': test_tender_data_limited(tender_parameters, plan_data)}) elif mode == 'openeu': return munchify({'data': test_tender_data_openeu( tender_parameters, submissionMethodDetails, plan_data)}) elif mode == 'openua': return munchify({'data': test_tender_data_openua( tender_parameters, submissionMethodDetails, plan_data)}) elif mode == 'openua_defense': return munchify({'data': test_tender_data_openua_defense( tender_parameters, submissionMethodDetails, plan_data)}) elif mode == 'open_competitive_dialogue': return munchify({'data': test_tender_data_competitive_dialogue( tender_parameters, submissionMethodDetails, plan_data)}) elif mode == 'reporting': return munchify({'data': test_tender_data_limited(tender_parameters, plan_data)}) elif mode == 'open_framework': return munchify({'data': test_tender_data_framework_agreement( tender_parameters, submissionMethodDetails, plan_data)}) elif mode == 'belowThreshold': return munchify({'data': test_tender_data( tender_parameters, plan_data, submissionMethodDetails=submissionMethodDetails, funders=funders, accelerator=accelerator, )}) elif mode == 'open_esco': return munchify({'data': test_tender_data_esco( tender_parameters, submissionMethodDetails, plan_data)}) elif mode == 'priceQuotation': return munchify({'data': test_tender_data_pq(tender_parameters, submissionMethodDetails, plan_data)}) # The previous line needs an explicit keyword argument because, # unlike previous functions, this one has three arguments. raise ValueError("Invalid mode for prepare_test_tender_data")
def write(msg, level, html=False): """Writes the message to the log file using the given level. Valid log levels are `TRACE`, `DEBUG`, `INFO` and `WARN`. Instead of using this method, it is generally better to use the level specific methods such as `info` and `debug`. """ if threading.currentThread().getName() in LOGGING_THREADS: LOGGER.log_message(Message(msg, level, html))
def wait_to_date(date_stamp): date = parse(date_stamp) LOGGER.log_message(Message("date: {}".format(date.isoformat()), "INFO")) now = get_now() LOGGER.log_message(Message("now: {}".format(now.isoformat()), "INFO")) wait_seconds = (date - now).total_seconds() wait_seconds += 2 if wait_seconds < 0: return 0 return wait_seconds
def prepare_test_tender_data(procedure_intervals, tender_parameters, submissionMethodDetails, accelerator, funders): # Get actual intervals by mode name mode = tender_parameters['mode'] if mode in procedure_intervals: intervals = procedure_intervals[mode] else: intervals = procedure_intervals['default'] LOGGER.log_message(Message(intervals)) tender_parameters['intervals'] = intervals # Set acceleration value for certain modes assert isinstance(intervals['accelerator'], int), \ "Accelerator should be an 'int', " \ "not '{}'".format(type(intervals['accelerator']).__name__) assert intervals['accelerator'] >= 0, \ "Accelerator should not be less than 0" if mode == 'negotiation': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'negotiation.quick': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'openeu': return munchify({'data': test_tender_data_openeu( tender_parameters, submissionMethodDetails)}) elif mode == 'openua': return munchify({'data': test_tender_data_openua( tender_parameters, submissionMethodDetails)}) elif mode == 'openua_defense': return munchify({'data': test_tender_data_openua_defense( tender_parameters, submissionMethodDetails)}) elif mode == 'open_competitive_dialogue': return munchify({'data': test_tender_data_competitive_dialogue( tender_parameters, submissionMethodDetails)}) elif mode == 'reporting': return munchify({'data': test_tender_data_limited(tender_parameters)}) elif mode == 'open_framework': return munchify({'data': test_tender_data_framework_agreement( tender_parameters, submissionMethodDetails)}) elif mode == 'belowThreshold': return munchify({'data': test_tender_data( tender_parameters, submissionMethodDetails=submissionMethodDetails, funders=funders, accelerator=accelerator)}) elif mode == 'open_esco': return munchify({'data': test_tender_data_esco( tender_parameters, submissionMethodDetails)}) # The previous line needs an explicit keyword argument because, # unlike previous functions, this one has three arguments. raise ValueError("Invalid mode for prepare_test_tender_data")
def compare_date(left, right, accuracy="minute", absolute_delta=True): '''Compares dates with specified accuracy Before comparison dates are parsed into datetime.datetime format and localized. :param left: First date :param right: Second date :param accuracy: Max difference between dates to consider them equal Default value - "minute" Possible values - "day", "hour", "minute" or float value of seconds :param absolute_delta: Type of comparison. If set to True, then no matter which date order. If set to False then right must be lower then left for accuracy value. Default value - True Possible values - True and False or something what can be casted into them :returns: Boolean value :error: ValueError when there is problem with converting accuracy into float value. When it will be catched warning will be given and accuracy will be set to 60. ''' left = parse(left) right = parse(right) if left.tzinfo is None: left = TZ.localize(left) if right.tzinfo is None: right = TZ.localize(right) delta = (left - right).total_seconds() if accuracy == "day": accuracy = 24 * 60 * 60 - 1 elif accuracy == "hour": accuracy = 60 * 60 - 1 elif accuracy == "minute": accuracy = 60 - 1 else: try: accuracy = float(accuracy) except ValueError: LOGGER.log_message( Message( "Could not convert from {} to float. Accuracy is set to 60 seconds." .format(accuracy), "WARN")) accuracy = 60 if absolute_delta: delta = abs(delta) if delta > accuracy: return False return True
def wait_to_date(date_stamp): date = parse(date_stamp) print date LOGGER.log_message(Message("date: {}".format(date.isoformat()), "INFO")) now = datetime.now(tzlocal()) print now LOGGER.log_message(Message("now: {}".format(now.isoformat()), "INFO")) if (date.isoformat() > now.isoformat()): wait_seconds = (date - now).total_seconds() wait_seconds += 2 print wait_seconds return wait_seconds else: return 0
def log_object_data(data, file_name="", format="yaml"): if not isinstance(data, Munch): data = munchify(data) if format == 'json': data = data.toJSON(indent=2) else: data = data.toYAML(allow_unicode=True, default_flow_style=False) format = 'yaml' LOGGER.log_message(Message(data, "INFO")) if file_name: output_dir = BuiltIn().get_variable_value("${OUTPUT_DIR}") with open(os.path.join(output_dir, file_name + '.' + format), "w") as file_obj: file_obj.write(data)
def run_keyword_and_ignore_keyword_definations(name, *args): """Runs the given keyword with given arguments and returns the status as a Boolean value. This keyword returns `True` if the keyword that is executed succeeds and `False` if it fails. This is useful, for example, in combination with `Run Keyword If`. If you are interested in the error message or return value, use `Run Keyword And Ignore Error` instead. The keyword name and arguments work as in `Run Keyword`. Example: | ${passed} = | `Run Keyword And Return Status` | Keyword | args | | `Run Keyword If` | ${passed} | Another keyword | New in Robot Framework 2.7.6. """ try: status, _ = BuiltIn().run_keyword_and_ignore_error(name, *args) except HandlerExecutionFailed, e: LOGGER.log_message(Message("Keyword {} not implemented", "ERROR")) return "FAIL", ""
def get_intervals(procedure_intervals, tender_parameters): # Get actual intervals by mode name mode = tender_parameters['mode'] if mode in procedure_intervals: intervals = procedure_intervals[mode] else: intervals = procedure_intervals['default'] LOGGER.log_message(Message(intervals)) tender_parameters['intervals'] = intervals # Set acceleration value for certain modes assert isinstance(intervals['accelerator'], int), \ "Accelerator should be an 'int', " \ "not '{}'".format(type(intervals['accelerator']).__name__) assert intervals['accelerator'] >= 0, \ "Accelerator should not be less than 0" return tender_parameters
def prepare_test_tender_data(procedure_intervals, tender_parameters, submissionMethodDetails): # Get actual intervals by mode name mode = tender_parameters['mode'] if mode in procedure_intervals: intervals = procedure_intervals[mode] else: intervals = procedure_intervals['default'] LOGGER.log_message(Message(intervals)) tender_parameters['intervals'] = intervals # Set acceleration value for certain modes assert isinstance(intervals['accelerator'], int), \ "Accelerator should be an 'int', " \ "not '{}'".format(type(intervals['accelerator']).__name__) assert intervals['accelerator'] >= 0, \ "Accelerator should not be less than 0" if mode == 'belowThreshold': return munchify({ 'data': test_tender_data(tender_parameters, submissionMethodDetails=submissionMethodDetails) }) elif mode == 'dgfFinancialAssets': return munchify({ 'data': test_tender_data_dgf_financial(tender_parameters, submissionMethodDetails) }) elif mode == 'dgfOtherAssets': return munchify({ 'data': test_tender_data_dgf_other(tender_parameters, submissionMethodDetails) }) elif mode == 'dgfInsider': return munchify({ 'data': test_tender_data_dgf_insider(tender_parameters, submissionMethodDetails) }) raise ValueError("Invalid mode for prepare_test_tender_data")
def prepare_test_tender_data(procedure_intervals, mode): # Get actual intervals by mode name if mode in procedure_intervals: intervals = procedure_intervals[mode] else: intervals = procedure_intervals['default'] LOGGER.log_message(Message(intervals)) # Set acceleration value for certain modes if mode in ['openua', 'openeu']: assert isinstance(intervals['accelerator'], int), \ "Accelerator should be an 'int', " \ "not '{}'".format(type(intervals['accelerator']).__name__) assert intervals['accelerator'] >= 0, \ "Accelerator should not be less than 0" else: assert 'accelerator' not in intervals.keys(), \ "Accelerator is not available for mode '{0}'".format(mode) if mode == 'single': return munchify({'data': test_tender_data(intervals)}) elif mode == 'multi': return munchify({'data': test_tender_data_multiple_items(intervals)}) elif mode == 'reporting': return munchify( {'data': test_tender_data_limited(intervals, 'reporting')}) elif mode == 'negotiation': return munchify( {'data': test_tender_data_limited(intervals, 'negotiation')}) elif mode == 'negotiation.quick': return munchify( {'data': test_tender_data_limited(intervals, 'negotiation.quick')}) elif mode == 'openua': return munchify({'data': test_tender_data_openua(intervals)}) elif mode == 'openeu': return munchify({'data': test_tender_data_openeu(intervals)}) raise ValueError("Invalid mode for prepare_test_tender_data")
def log_object_data(data, file_name=None, format="yaml", update=False, artifact=False): """Log object data in pretty format (JSON or YAML) Two output formats are supported: "yaml" and "json". If a file name is specified, the output is written into that file. If you would like to get similar output everywhere, use the following snippet somewhere in your code before actually using Munch. For instance, put it into your __init__.py, or, if you use zc.buildout, specify it in "initialization" setting of zc.recipe.egg. from munch import Munch Munch.__str__ = lambda self: Munch.toYAML(self, allow_unicode=True, default_flow_style=False) Munch.__repr__ = Munch.__str__ """ if not isinstance(data, Munch): data = munchify(data) if file_name: if artifact: file_path = os.path.join( os.path.dirname(__file__), 'data', file_name + '.' + format) else: output_dir = BuiltIn().get_variable_value("${OUTPUT_DIR}") file_path = os.path.join(output_dir, file_name + '.' + format) if update: try: with open(file_path, "r+") as file_obj: new_data = data.copy() data = munch_from_object(file_obj.read(), format) data.update(new_data) file_obj.seek(0) file_obj.truncate() except IOError as e: LOGGER.log_message(Message(e, "INFO")) LOGGER.log_message( Message("Nothing to update, " "creating new file.", "INFO")) data_obj = munch_to_object(data, format) with open(file_path, "w") as file_obj: file_obj.write(data_obj) data_obj = munch_to_object(data, format) LOGGER.log_message(Message(data_obj.decode('utf-8'), "INFO"))
def log_object_data(data, file_name=None, format="yaml", update=False, artifact=False): """Log object data in pretty format (JSON or YAML) Two output formats are supported: "yaml" and "json". If a file name is specified, the output is written into that file. If you would like to get similar output everywhere, use the following snippet somewhere in your code before actually using Munch. For instance, put it into your __init__.py, or, if you use zc.buildout, specify it in "initialization" setting of zc.recipe.egg. from munch import Munch Munch.__str__ = lambda self: Munch.toYAML(self, allow_unicode=True, default_flow_style=False) Munch.__repr__ = Munch.__str__ """ if not isinstance(data, Munch): data = munchify(data) if file_name: if artifact: file_path = os.path.join(os.path.dirname(__file__), 'data', file_name + '.' + format) else: output_dir = BuiltIn().get_variable_value("${OUTPUT_DIR}") file_path = os.path.join(output_dir, file_name + '.' + format) if update: try: with open(file_path, "r+") as file_obj: new_data = data.copy() data = munch_from_object(file_obj.read(), format) data.update(new_data) file_obj.seek(0) file_obj.truncate() except IOError as e: LOGGER.log_message(Message(e, "INFO")) LOGGER.log_message( Message("Nothing to update, " "creating new file.", "INFO")) data_obj = munch_to_object(data, format) with open(file_path, "w") as file_obj: file_obj.write(data_obj) data_obj = munch_to_object(data, format) LOGGER.log_message(Message(data_obj.decode('utf-8'), "INFO"))
def log(self,message,level="INFO"): LOGGER.log_message(Message(message,level))
def log_object_data(data): if not isinstance(data, Munch): data = munchify(data) LOGGER.log_message( Message(data.toYAML(allow_unicode=True, default_flow_style=False), "INFO"))
def emit(self, record): msg = record.getMessage() lvl = self.mapping[record.levelname] LOGGER.log_message(Message(msg, lvl))