Ejemplo n.º 1
0
 def _add(self, resource_type, _id, contents):
     try:
         res = self.request('post', f'/{resource_type}/add', json=contents)
         res.raise_for_status()
         LOG.info(f'UPDATED {resource_type}/{_id}')
     except HTTPError as her:
         LOG.info(f'Failed to UPDATE {resource_type}/{_id} : {her}')
Ejemplo n.º 2
0
 def _delete(self, resource_type, _id):
     try:
         res = self.request('get', f'/{resource_type}/delete?id={_id}')
         res.raise_for_status()
         LOG.info(f'DELETED {resource_type}/{_id}')
     except HTTPError:
         LOG.error(f'Failed to DELETE {resource_type}/{_id}')
def process_cloudwatch_alarm_event(event):
    """ Receive raw event from lambda invoke """
    message = parse_sns_message(event)
    standardised_data = cloudwatch_alarm_to_standard_health_data_model(message)
    response = send_to_health_monitor(standardised_data)
    LOG.debug("Lambda invoke status: %s", response.StatusCode)
    return response.StatusCode == 200
Ejemplo n.º 4
0
    def __save_conversation(self, communications):
        LOG.info('conversation: communications: ' + str(len(communications)))
        communication_ids = []
        conversation_participants = {}
        last_message_time = datetime.datetime.min
        conversation_matched_keywords = {}
        for c in communications:
            r = db.Communications.Add(
                communication_log_id=str(self.communication_log['_id']),
                from_=c['from'],
                to=c['to'],
                message=c['message'],
                message_time=c['message_time'],
                matched_keywords=c['matched_keywords'],
                source=self.communication_log['source'],
                enterprise_id=self.communication_log['enterprise_id'],
            )
            for k in c['matched_keywords']:
                conversation_matched_keywords[k] = 1
            communication_ids.append(str(r.inserted_id))
            conversation_participants[c['from']['name']] = 1
            for p in c['to']:
                conversation_participants[p['name']] = 1
            if last_message_time < c['message_time']:
                last_message_time = c['message_time']

        db.Conversations.Add(
            communication_log_id=str(self.communication_log['_id']),
            communication_ids=communication_ids,
            matched_keywords=conversation_matched_keywords.keys(),
            participants=list(conversation_participants.keys()),
            last_message_time=last_message_time,
            source=self.communication_log['source'],
            enterprise_id=self.communication_log['enterprise_id'],
        )
def process_message(message: Dict[str, Any]) -> Any:
    """
    Receive event body forwarded from lambda handler.
    """
    try:
        actions = {
            "register": register,
            "commit": commit,
            "usage": usage,
            "audit": audit.start,
            "log_org_membership": audit.log_org_membership,
            "log_org_teams": audit.log_org_teams,
            "log_org_team_membership": audit.log_org_team_membership,
            "log_org_team_repos": audit.log_org_team_repos,
            "log_org_repos": audit.log_org_repos,
            "log_org_repo_contributors": audit.log_org_repo_contributors,
            "log_org_repo_team_members": audit.log_org_repo_team_members,
        }
        action = message["action"]

        process_action = actions[action]
        success = process_action(message)
        if not success:
            LOG.error("Processing failed for %s", action)
    except (audit.IncompleteAuditError, github_api.GithubApiError):
        success = False
    return success
        def send_batch(sqs_entries):
            # http://boto3.readthedocs.org/en/latest/reference/services/sqs.html#SQS.Queue.sendentries
            result = self.queue.send_messages(Entries=sqs_entries)

            if len(result['Successful']) != len(sqs_entries):
                LOG.error('Some messages failed to send to SQS: {}'.format(
                    result))
Ejemplo n.º 7
0
def process_cloudwatch_metric_event():
    """ Trigger scheduled update of all configured alarm metrics """
    alarms = get_cloudwatch_alarms()

    stats = defaultdict(int)
    for alarm in alarms:
        alarm = Dict(alarm)
        current_state = alarm.StateValue
        statistics = None
        if current_state != "INSUFFICIENT_DATA":
            statistics = get_cloudwatch_metric_statistics(alarm)

        if statistics is not None:
            metric_event = cloudwatch_metric_to_standard_health_data_model(
                alarm, statistics)
            response = send_to_health_monitor(metric_event)
            LOG.debug("Lambda invoke status: %s", response.StatusCode)
            if response.StatusCode == 200:
                stats["sent"] += 1
            else:
                stats["failed"] += 1
        else:
            stats["no_data"] += 1
            LOG.debug("%s state is %s", alarm.MetricName, current_state)

    return stats
Ejemplo n.º 8
0
def get_session_var(name, default=None):
    try:
        value = session.get(name, default)
    except RuntimeError as err:
        LOG.error("Failed to get variable from session: " + str(err))
        value = None
    return value
Ejemplo n.º 9
0
def SetParsed(
	communication_log_id,
	error = None
):
	LOG.debug('CommunicationLogs:Save:', communication_log_id, error)
	set = {
		'parsed_time': datetime.datetime.now(),
	}
	if error:
		set['error'] = 'PARSING: ' + str(error)
		set['state'] = States.PARSING_ERROR
	else:
		set['error'] = None
		set['state'] = States.PARSED
	r = collection.update_one(
		{ 
			'_id': communication_log_id,
		},
		{ 
			'$set': set,
		},
		upsert = False 
	)
	LOG.debug(r)
	return r
Ejemplo n.º 10
0
def Add(
    communication_log_id,
    communication_ids,
    #source,
    #enterprise_id,
    matched_keywords,
    participants,
    last_message_time,
    source,
    enterprise_id,
    state=None,
    error=None,
):
    LOG.debug('Conversations:Add:', communication_log_id, communication_ids)
    r = collection.insert_one({
        'communication_log_id': communication_log_id,
        'communication_ids': communication_ids,
        #'source': source,
        #'enterprise_id': enterprise_id,
        'matched_keywords': matched_keywords,
        'participants': participants,
        'last_message_time': last_message_time,
        'source': source,
        'enterprise_id': enterprise_id,
        'state': state,
        'error': error,
    })
    LOG.debug(r)
    return r
Ejemplo n.º 11
0
def DeleteBySource(source, ):
    LOG.debug('Conversations:DeleteBySource:{0}'.format(source))
    r = collection.remove({
        'source': source,
    })
    LOG.debug(r)
    return r
def splunk_forwarder_event_handler(event, context):
    """ Receive and process Health Monitoring message """
    try:
        process_update_dashboard_event(event)

    except (ValueError, KeyError):
        LOG.error("Failed to build Splunk payload for health monitoring data")
 def count_all_topic_messages(self, topic):  # individual messages
     subtotals = []
     self.get_consumer(True, topic)
     while not self.consumer.poll():
         try:
             self.consumer.seek_to_beginning()
             break
         except AssertionError:
             LOG.info(f'{topic} waiting for consumer poll...')
             sleep(0.25)
     try:
         while True:
             messages = self.consumer.poll_and_deserialize(1000, 1000)
             if not messages:
                 return sum(subtotals)
             parts = [i for i in messages.keys()]
             for part in parts:
                 bundles = messages.get(part)
                 for bundle in bundles:
                     _msgs = bundle.get('messages')
                     subtotals.append(sum([1 for m in _msgs]))
     except Exception as err:
         raise err
     finally:
         self.consumer.close()
Ejemplo n.º 14
0
def ebs_usage(connect, volumes):
    """
    Checks for no usage in 24 hours
    """
    if type(volumes) is not list:
        raise TypeError("Not a list")

    unused_volumes = []

    if volumes and volumes is not None:
        try:
            for ebs in volumes:
                response = connect.cloudwatch.get_metric_statistics(Namespace='AWS/EBS',
                                                            MetricName='VolumeReadBytes',
                                                            StartTime=datetime.now() - timedelta(days=1),
                                                            EndTime=datetime.now(),
                                                            Period=86400, Statistics=['Average'],
                                                            Dimensions=[{'Name':'VolumeId','Value':ebs.id}])

                if 'Datapoints' in response and not response['Datapoints']:
                    LOG.info("INFO: {0} is not active".format(ebs.id))
                    unused_volumes.append(ebs)

        except Exception, err:
            LOG.error("ERROR: {0}".format(err))
def run_single_test_on_single_vm(vm, testcase):
    errcode = 2
    vm_username = vm.UserName
    vm_ip = vm.IP
    testid = testcase.TestID

    testcase_bash_script = testcase.ExecScript
    testcase_script_local_path = "./{0}/{1}".format("TestCases",
                                                    testcase_bash_script)
    testcase_script_remote_path = "{0}/{1}/{2}".format("/home", vm_username,
                                                       testcase_bash_script)

    testlog = testcase.LogFile
    testlog_local = "./_{0}-{1}-{2}".format(vm.Role,
                                            str(testid).zfill(2), testlog)
    testlog_remote = "{0}/{1}/{2}".format("/home", vm_username, testlog)

    if os.path.isfile(testcase_script_local_path) == False:
        LOG("             ERROR: The test script does not exist")
        return 2

    #copy the script to remote VM
    os.system("scp -q {0} {1}@{2}:".format(testcase_script_local_path,
                                           vm_username, vm_ip))
    os.system("ssh -q {0}@{1} 'chmod 755 {2}'".format(
        vm_username, vm_ip, testcase_script_remote_path))

    ssh = subprocess.Popen([
        "ssh", "{0}@{1}".format(vm_username, vm_ip),
        testcase_script_remote_path
    ],
                           shell=False,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
    result = ssh.stdout.readlines()

    os.system("scp -q {0}@{1}:{2} {3}".format(vm_username, vm_ip,
                                              testlog_remote, testlog_local))

    if result == []:
        error = ssh.stderr.readlines()
        LOG("             ERROR: %s" % error)
        errcode = 1
    else:
        for i, line in enumerate(result):
            if line.strip() == "":
                continue
            if "TEST PASSED" in line:
                errcode = 0
                break
            if "TEST FAILED" in line:
                errcode = 1
                break

    os.system("ssh -q {0}@{1} 'rm -rf {2}'".format(
        vm_username, vm_ip, testcase_script_remote_path))
    os.system("ssh -q {0}@{1} 'rm -rf {2}'".format(vm_username, vm_ip,
                                                   testlog_remote))

    return errcode
Ejemplo n.º 16
0
def fileDownload(self, *args):
    cl = client.TCPClient(LOG.getLogger("clientlog", "Agent"))
    testid = int(args[2])
    current_test = get_test(testid)
    test_logger = None
    try:
        if current_test:
            test_logger = LOG.gettestlogger(current_test, "STAT")
            lctx.debug("FILE DOWNLOAD | " + str(current_test.testid) +
                       " | START")
            lctx.debug("Preparing TAR file of system metric folder")
            test_logger.info("Preparing TAR file of system metric folder")
            common.make_tarfile(current_test.archivedir + "results.tgz",
                                current_test.resultsdir + "/")
            dest = current_test.tobj.testobj.TestInputData.stats_results_path[
                current_test.stathostip]
            download_file = current_test.archivedir + "results.tgz"
            test_logger.info("Sending TAR file to daytona host")
            cl.sendFile(current_test.serverip, current_test.serverport,
                        download_file, dest.strip())
            lctx.debug("FILE DOWNLOAD | " + str(current_test.testid) +
                       " | COMPLETE")
            return "SUCCESS"
        else:
            raise Exception("Invalid Test ID")

    except Exception as e:
        lctx.error(e)
        if test_logger:
            test_logger.error(e)
        return "ERROR"
Ejemplo n.º 17
0
def get_github_api_paged_data(url: str) -> List[Any]:
    token = get_github_access_token()
    page = 1
    page_size = 100
    page_items = 1
    items = []
    while page_items > 0:
        page_items = 0
        page_url = f"{url}?page={page}&per_page={page_size}"
        headers = {"authorization": f"token {token}"}
        response = requests.get(page_url, headers=headers)

        if response.status_code == 200:
            response_items = response.json()
            LOG.debug("Got item page %s for URL %s", page, url)
            page_items = len(response_items)
            # append page to parent array
            for item in response_items:
                items.append(item)
        else:
            raise GithubApiError(response.text)
        page += 1
        # this sleep is important to prevent rate limiting
        time.sleep(4)

    return items
Ejemplo n.º 18
0
 def on_moved(self, event):
     super(ChangeHandler, self).on_moved(event)
     if event.is_directory:
         return
     LOG.info("Moved %s: from %s to %s", 'file', event.src_path,
              event.dest_path)
     self.helper.dispatch_change(Event.CHANGE, event.dest_path)
    def _handle_multiple_messages(self, messages):
        """
        Train movement message comprises a `header` and a `body`. The `header`
        http://nrodwiki.rockshore.net/index.php/Train_Movement

        """
        def send_batch(sqs_entries):
            # http://boto3.readthedocs.org/en/latest/reference/services/sqs.html#SQS.Queue.sendentries
            result = self.queue.send_messages(Entries=sqs_entries)

            if len(result['Successful']) != len(sqs_entries):
                LOG.error('Some messages failed to send to SQS: {}'.format(
                    result))

        with batcher(send_batch, batch_size=10) as b:
            for raw_message in messages:
                message_id = str(uuid.uuid4())

                pretty_message = json.dumps(raw_message, indent=4)
                LOG.debug('Sending to queue with id {}: {}'.format(
                    message_id, pretty_message))

                b.push({
                    'Id': message_id,
                    'MessageBody': pretty_message
                })

                self.increment_message_counter(len(raw_message))
Ejemplo n.º 20
0
def main():
    queue = get_aws_queue(os.environ['AWS_SQS_QUEUE_URL'])

    try:
        handle_queue(queue)
    except KeyboardInterrupt:
        LOG.info("Quitting.")
Ejemplo n.º 21
0
async def flush_opens():
    """
    Code called when flushing the opens currently stored in state.

    This will fetch data from the State (without mutating it), format, attempt to POST
    to the URL, and remove it on success.
    """
    to_flush, number_being_flushed = open_state.get_records_to_pop(
        number_of_records=MAX_BATCH_TO_FLUSH)

    # Format data
    formatted_to_flush = [{
        'tracking_id': open_data.tracking_id,
        'timestamp': open_data.timestamp.isoformat()
    } for open_data in to_flush]

    # Attempt to make request
    try:
        await post_request(url=OPEN_DATA_POST_URL, body=formatted_to_flush)
    except RequestError:
        # TODO we could implement a retry policy here.
        LOG.getChild('flush_opens').exception('Failed to POST Opens')
        return

    # Remove the records from state on successful request
    open_state.pop_records(number_to_pop=number_being_flushed)
Ejemplo n.º 22
0
    def updateStatus(self, curStatus, newStatus):
        """
        Update test status from curStatus to newStatus in database for a given test.

        """
        lctx = LOG.getLogger("dblog", "DH")
        lctx.debug("setting status from %s to %s" % (curStatus, newStatus))
        if self.testobj.TestInputData.exec_results_path is not None:
            test_logger = LOG.gettestlogger(self, "EXEC")
            test_logger.info("Setting test status from %s to %s" %
                             (curStatus, newStatus))
        update_res = self.db.query(
            """update TestInputData SET end_status = %s where testid=%s""",
            (newStatus, self.testobj.TestInputData.testid), False, True)
        self.testobj.TestInputData.end_status = newStatus
        update_res = self.db.query(
            """update CommonFrameworkSchedulerQueue SET state = %s, message = %s, state_detail = %s where testid = %s""",
            (newStatus, newStatus, newStatus,
             self.testobj.TestInputData.testid), False, True)

        if newStatus == "finished clean" or newStatus == "failed" or newStatus == "abort" or newStatus == "kill" or newStatus == "timeout clean":
            update_res = self.db.query(
                """delete from CommonFrameworkSchedulerQueue where testid=%s""",
                (self.testobj.TestInputData.testid, ), False, True)
            lctx.debug(
                "Deleted entry from CommonFrameworkSchedulerQueue because of failure for : "
                + str(self.testobj.TestInputData.testid))

        return
Ejemplo n.º 23
0
def get_ssm_params(path: str) -> Dict[str, str]:
    """ Get parameter by path and return value """
    try:
        has_next_page = True
        next_token = None
        params = {}
        while has_next_page:
            client: SSMClient = boto3.client("ssm")  # type: ignore

            if next_token:
                response = client.get_parameters_by_path(Path=path,
                                                         Recursive=True,
                                                         WithDecryption=True,
                                                         NextToken=next_token)
            else:
                response = client.get_parameters_by_path(Path=path,
                                                         Recursive=True,
                                                         WithDecryption=True)

            # Iterate parameters in response and append to dictionary
            for param in response["Parameters"]:
                name = param["Name"].replace(path, "")
                params[name] = param["Value"]

            # Check for next page in results
            has_next_page = "NextToken" in response
            if has_next_page:
                next_token = response["NextToken"]
            else:
                next_token = None

    except ClientError as err:
        LOG.error("Failed to get SSM params on path: %s: %s", path, err)
        params = {}
    return params
Ejemplo n.º 24
0
 def closeBluetoothSocket(self):
     try:
         self.clientSocket.close()
         self.serverSocket.close()
         LOG.info("Bluetooth sockets successfully closed ...")
     except bluetooth.BluetoothError:
         LOG.error("Failed to close the bluetooth sockets ", exc_info=True)
Ejemplo n.º 25
0
 def acceptBluetoothConnection(self):
     try:
         self.clientSocket, clientInfo = self.serverSocket.accept()
         LOG.info("Accepted bluetooth connection from %s", clientInfo)
     except (bluetooth.BluetoothError, SystemExit, KeyboardInterrupt):
         LOG.error("Failed to accept bluetooth connection ... ",
                   exc_info=True)
Ejemplo n.º 26
0
 def __init__(
     self,
     message: Union[str, TypeError] = "Incomplete audit error",
 ):
     self.message = message
     super().__init__(self.message)
     LOG.error({"error": "GithubApiError", "message": message})
Ejemplo n.º 27
0
def log_org_repo_contributors(message: Dict[str, Any]) -> None:
    """ Audit github organization repository contributors """
    org = os.environ["GITHUB_ORG"]
    repo = message.get("repo", None)
    audit_id = message.get("audit_id")
    if audit_id:
        LOG.info({
            "action": "Audit organization org repo contributors",
            "org": org,
            "repo": repo,
            "audit_id": audit_id,
        })

        if repo:
            repo_name = repo["name"]
            members = github_api.get_github_org_repo_contributors(
                org, repo_name)
            for member in members:
                event = make_audit_event(
                    type="OrganizationRepoContributor",
                    org=org,
                    member=member,
                    repository=repo,
                    audit_id=audit_id,
                )
                LOG.info(event)
        else:
            raise IncompleteAuditError(audit_id=audit_id,
                                       source=message,
                                       message="Repo not specified")
Ejemplo n.º 28
0
def GetByEnterprise(enterprise_id, ):
    LOG.debug('Lexicon:GetByEnterprise:', enterprise_id)
    r = collection.find({
        'enterprise_id': enterprise_id,
    })
    LOG.debug(r)
    return r
Ejemplo n.º 29
0
def process_message(raw_message):
    header = raw_message['header']

    if not validate_header(header):
        return True  # Effectively drop the message

    decoded = TrainMovementsMessage(raw_message['body'])

    if (decoded.event_type == EventType.arrival
            and decoded.status == VariationStatus.late
            and decoded.location.is_public_station
            and decoded.operating_company
            and decoded.operating_company.is_delay_repay_eligible(
                decoded.minutes_late)):

        LOG.info('{} {} arrival at {} ({}) - eligible for '
                 'compensation from {}: {}'.format(
                     decoded.actual_datetime, decoded.early_late_description,
                     decoded.location.name, decoded.location.three_alpha,
                     decoded.operating_company, str(decoded)))

    else:
        LOG.debug('Dropping {} {} {} message'.format(
            decoded.status, decoded.event_type,
            decoded.early_late_description))

    return True
Ejemplo n.º 30
0
Archivo: main.py Proyecto: Gexeg/test
def main():
    args = parser.parse_args()

    try:
        uploader = ApplicantUploader(args.token)
    except AssertionError:
        LOG.error(f"Error durinng uploader initialization")
        sys.exit(1)

    if files := scan_directory(args.base_dir, BASE_FILENAME):
        for applicant in get_applicants_info(files, args.row):
            LOG.debug(f"Start uploading")
            # загружаем резюме
            resume_info = uploader.upload_file(applicant.file_path)

            # формируем json для загрузки кандидата
            body = uploader.collect_parsed_data(resume_info)
            # полученные из .xlsx файла данные имеют приоритет
            body.update(get_fio(applicant))
            body.update({"money": applicant.salary})

            # загружаем кандидата
            if response := uploader.upload_applicant(body):
                # устанавливаем его на вакансию
                applicant_id = response.get("id")
                uploader.set_vacancy(applicant, applicant_id)
def process_message(raw_message):
    header = raw_message['header']

    if not validate_header(header):
        return True  # Effectively drop the message

    decoded = TrainMovementsMessage(raw_message['body'])

    if (decoded.event_type == EventType.arrival and
            decoded.status == VariationStatus.late and
            decoded.location.is_public_station and
            decoded.operating_company and
            decoded.operating_company.is_delay_repay_eligible(
                decoded.minutes_late)):

        LOG.info('{} {} arrival at {} ({}) - eligible for '
                 'compensation from {}: {}'.format(
                     decoded.actual_datetime,
                     decoded.early_late_description,
                     decoded.location.name,
                     decoded.location.three_alpha,
                     decoded.operating_company,
                     str(decoded)))

    else:
        LOG.debug('Dropping {} {} {} message'.format(
            decoded.status, decoded.event_type,
            decoded.early_late_description))

    return True
Ejemplo n.º 32
0
    def metric_resource_exists(cls, metric):
        """
        Check the resource exists before defining an alarm
        aws cloudwatch list-metrics returns metrics for resources that
        no longer exists
        """
        region = cls.get_metric_region(metric)
        namespace = metric.Namespace
        resource_exists = True
        try:
            LOG.debug("Getting boto client for %s in %s", namespace, region)
            client = cls.get_client_from_namespace(namespace, region)
            if client:
                queue = cls.get_metric_dimension_value(metric, "QueueName")
                LOG.debug("Get tags for sqs queue: %s", queue)
                if queue:
                    client.get_queue_url(QueueName=queue)
                else:
                    resource_exists = False

        except AttributeError as err:
            LOG.debug(json.dumps(metric, indent=2))
            LOG.debug(str(err))
        except botocore.exceptions.ClientError as err:
            LOG.debug(str(err))
            resource_exists = False
        return resource_exists
def main():
    queue = get_aws_queue(os.environ['AWS_SQS_QUEUE_URL'])

    try:
        handle_queue(queue)
    except KeyboardInterrupt:
        LOG.info("Quitting.")
Ejemplo n.º 34
0
def test_consumer_connection() -> bool:
    try:
        HELPER.request('HEAD', '/job/describe')
        return True
    except HTTPError as her:
        LOG.error(f'Could not connect to consumer: {her}')
        return False
Ejemplo n.º 35
0
def cleanup(self, *args):
    (obj, command, params, actionID, sync) = (args[0], args[1], args[2],
                                              args[3], args[4])
    testid = int(params)
    current_test = get_test(testid)
    test_logger = None
    try:
        if current_test:
            test_logger = LOG.gettestlogger(current_test, "STAT")
            lctx.debug("CLEANUP | " + str(current_test.testid) + " | START")
            test_logger.info("Test cleanup")
            downloadTestLogs(testid)
            LOG.removeLogger(current_test.tobj)
            shutil.rmtree(current_test.resultsdir, ignore_errors=True)
            delete_test(testid)
            lctx.debug("CLEANUP | " + str(current_test.testid) + " | COMPLETE")
            return "SUCCESS"
        else:
            raise Exception("Invalid Test ID")

    except Exception as e:
        lctx.error(e)
        if test_logger:
            test_logger.error(e)
        return "ERROR"
Ejemplo n.º 36
0
 def on_modified(self, event):
     path = event.src_path
     dest_path = path.replace(".less", ".css")
     cmd = 'lessc "%s" "%s"' % (path, dest_path)
     status = os.system(cmd)
     LOG.info(cmd)
     LOG.info("Status : %d" % status)
    def increment_message_counter(self, num_bytes):
        self.sent_message_count += 1
        self.sent_bytes += num_bytes

        if self.sent_message_count % LOG_EVERY_N_MESSAGES == 0:
            LOG.info('Sent {} messages, ~{:.3f} MB'.format(
                self.sent_message_count,
                self.sent_bytes / (1024 * 1024)))
Ejemplo n.º 38
0
 def _run_lola(self, lola_file_name, formula):
     """
     Run LoLA for a certain file and formula
     """
     LOG.info("Running LoLA in temporal file for formula:")
     LOG.info("'{0}'".format(formula))
     command = ["lola", lola_file_name, "--formula={0}".format(formula)]
     (ret, _, stderr) = run(command)
     return check_result(stderr)
Ejemplo n.º 39
0
 def __init__(self, phi_1, phi_2):
     if not isinstance(phi_1, CTLFormula):
         err_message = "Phi provided is not an CTL formula"
         raise CTLException(err_message)
     if not isinstance(phi_2, CTLFormula):
         err_message = "Phi provided is not an CTL formula"
         raise CTLException(err_message)
     self._phi_1 = phi_1
     self._phi_2 = phi_2
     LOG.info("New negated exist until predicate created")
Ejemplo n.º 40
0
def authenticate(context, username, password):

    if not have_authentication:
        return True

    try:
        return authenticateRequest(username, password)
    except Exception, e:
        msg = 'Authentication failed (%s)' % e
        LOG.error(msg, exc_info=True)
        return xmlrpclib.Fault(123, msg)
Ejemplo n.º 41
0
def _check_fop():
    if not checkEnvironment('FOP_HOME'):
        return False

    exe_name = win32 and 'fop.bat' or 'fop'
    full_exe_name = os.path.join(fop_home, exe_name)
    if not os.path.exists(full_exe_name):
        LOG.debug('%s does not exist' % full_exe_name)
        return False

    return True
Ejemplo n.º 42
0
def _check_xinc():
    if not checkEnvironment("XINC_HOME"):
        return False

    exe_name = win32 and "\\bin\\windows\\xinc.exe" or "bin/unix/xinc"
    full_exe_name = os.path.join(xinc_home, exe_name)
    if not os.path.exists(full_exe_name):
        LOG.debug("%s does not exist" % full_exe_name)
        return False

    return True
Ejemplo n.º 43
0
 def model_checking(self, m_0, formula):
     """
     Perform model checking of the petri net for a certain marking and
     formula using lola.
     """
     lola_file_name = self._create_lola_file(m_0)
     result = self._run_lola(lola_file_name, formula.print_lola())
     LOG.info("Model Checking result: \"{0}\"".format(result))
     os.remove(lola_file_name)
     LOG.info("Removing LoLA temporal file")
     return result
Ejemplo n.º 44
0
 def _create_lola_file(self, m_0):
     """
     Create a LoLA file of the model.
     """
     LOG.info("Creating LoLA temporal file")
     lola_file_name = "__tmp_lola_model_.lola"
     file_object = open(lola_file_name, "wb")
     file_object.write(self.export_lola(m_0));
     file_object.close()
     LOG.info("LoLA temporal file created")
     return lola_file_name
Ejemplo n.º 45
0
def _check_xfc():
    if not checkEnvironment("XFC_DIR"):
        return False

    # check only for fo2rtf (we expect that all other fo2XXX
    # converters are also installed properly)
    full_exe_name = os.path.join(xfc_dir, "fo2rtf")
    if not os.path.exists(full_exe_name):
        LOG.debug("%s does not exist" % full_exe_name)
        return False

    return True
Ejemplo n.º 46
0
def convertZIPEmail(context, auth_token, zip_archive, converter_name='pdf-prince', sender=None, recipient=None, subject=None, body=None):

    if not authorizeRequest(auth_token):
        msg = 'Authorization failed'
        LOG.error(msg, exc_info=True)
        return xmlrpclib.Fault(123, msg)

    try:
        return context.convertZIPEmail(zip_archive, converter_name, sender, recipient, subject, body)
    except Exception, e:
        msg = 'Conversion failed (%s)' % e
        LOG.error(msg, exc_info=True)
        return xmlrpclib.Fault(123, msg)
Ejemplo n.º 47
0
    def _cleanup(self):
        """ Remove old and outdated files from the temporary and
            spool directory.
        """

        if time.time() - self.cleanup_last > self.cleanup_after:
            self._lock.acquire()
            try:
                self.__cleanup()
                self.cleanup_last = time.time()
            except Exception, e:
                LOG.error(e, exc_info=True)
            finally:
Ejemplo n.º 48
0
def convertZIP(context, auth_token, zip_archive, converter_name='pdf-prince'):

    if not authorizeRequest(auth_token):
        msg = 'Authorization failed'
        LOG.error(msg, exc_info=True)
        return xmlrpclib.Fault(123, msg)

    try:
        return context.convertZIP(zip_archive, converter_name)
    except Exception, e:
        msg = 'Conversion failed (%s)' % e
        LOG.error(msg, exc_info=True)
        return xmlrpclib.Fault(123, msg)
Ejemplo n.º 49
0
 def reachability_set(self, m_0):
     """
     Get the reachability set of the model for marking 'm'.
     """
     m_0 = self._fix_marking(m_0)
     msg = "Getting reachability set from '{0}'".format(m_0)
     LOG.info(msg)
     reach_graph = {str(m_0): []}
     open_set = [m_0]
     closed_set = []
     while len(open_set):
         m = open_set.pop(0)
         if m in closed_set:
             continue
         m_key = str(m)
         msg = "Adding new marking to reachability set: '{0}'".format(m)
         LOG.info(msg)
         reach_graph[m_key] = self._get_succesors(m)
         open_set.extend(reach_graph[m_key])
         closed_set.append(m)
     msg = "Reachability set calculated from: '{0}'".format(m_0)
     LOG.info(msg)
     msg = "Reachability set size is: '{0}'".format(len(reach_graph))
     LOG.info(msg)
     return reach_graph
Ejemplo n.º 50
0
 def load_file(self, file_name):
     """
     Load a Petri Net model dumped into a JSON file with name 'file_name'.
     """
     msg = "Loading Petri Net from file '{0}'".format(file_name)
     LOG.info(msg)
     with open(file_name) as in_file:
         in_dict = json.load(in_file)
     self._places = in_dict["P"]
     self._transitions = in_dict["T"]
     self._input = self._read_io_dict("I", in_dict["I"])
     self._output = self._read_io_dict("O", in_dict["O"])
     msg = "Loading completed"
     LOG.info(msg)
Ejemplo n.º 51
0
def delete_vol(connect, volumes):
    """
    Deletes Volumes Passed
    """
    if type(volumes) is not list:
        raise TypeError("Not a list")

    # Currently only printing id
    for ebs in volumes:
        try:
            LOG.info("INFO: {0} would have been deleted" .format(ebs))

        except Exception, err:
            LOG.error("ERROR: {0}".format(err))
Ejemplo n.º 52
0
 def save_file(self, file_name):
     """
     Dump the Petri Net model into a JSON file with name 'file_name'.
     """
     msg = "Dumping Petri Net into file '{0}'".format(file_name)
     LOG.info(msg)
     out_dict = {}
     out_dict["P"] = self._places
     out_dict["T"] = self._transitions
     out_dict["I"] = self._get_io_dict("I")
     out_dict["O"] = self._get_io_dict("O")
     with open(file_name, "w+") as out_file:
       json.dump(out_dict, out_file)
     msg = "Dumping completed"
     LOG.info(msg)
Ejemplo n.º 53
0
def handle_exc(text, obj, exc_info):
    """ Handle an exception. Currently we log the exception through  our own
        logger. 'obj' is currently unused. We might use it later to obtain
        detailed informations.
    """
    
    # Add some addition object info iff available.
    # XXX: this should be replaced with a more elegant solution
    
    try:
        text = text + ' (%s)' % obj.absolute_url(1)
    except:
        pass

    LOG.error(text, exc_info=exc_info)
Ejemplo n.º 54
0
 def add_transition(self, transition):
     """
     Function to add transition to T set if not already in it. If
     'transition' is not an string, this function will raise an exception.
     """
     if not isinstance(transition, basestring):
         err_message = "Transition provided is not an string"
         raise PetriNetException(err_message)
     if transition not in self._transitions:
         self._transitions.append(transition)
         for place in self._places:
             self.change_input_flow(place, transition, 0)
             self.change_output_flow(place, transition, 0)
         LOG.info("Transition '{0}' added to T set".format(transition))
     else:
         LOG.info("Transition '{0}' already in T set".format(transition))
Ejemplo n.º 55
0
def checkEnvironment(envname):
    """ Check if the given name of an environment variable exists and
        if it points to an existing directory.
    """

    dirname = os.environ.get(envname, None)
    if dirname is None:
        LOG.debug('Environment variable $%s is unset' % envname)
        return False

    if not os.path.exists(dirname):
        LOG.debug('The directory referenced through the environment '
                  'variable $%s does not exit (%s)' % 
                  (envname, dirname))
        return False
    return True
Ejemplo n.º 56
0
 def add_place(self, place):
     """
     Function to add places to P set if not already in it. If 'place' is not
     an string, this function will raise an exception.
     """
     if not isinstance(place, basestring):
         err_message = "Place provided is not an string"
         raise PetriNetException(err_message)
     if place not in self._places:
         self._places.append(place)
         for transition in self._transitions:
             self.change_input_flow(place, transition, 0)
             self.change_output_flow(place, transition, 0)
         LOG.info("Place '{0}' added to P set".format(place))
     else:
         LOG.info("Place '{0}' already in P set".format(place))
Ejemplo n.º 57
0
def run(command):
    cmd_string = " ".join(command)
    LOG.info("Runnin command \"{0}\"".format(cmd_string))
    process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    ret = process.wait()
    stdout, stderr = process.communicate()
    LOG.info("RC: \"{0}\"".format(ret))
    LOG.info("STDOUT: \"{0}\"".format(stdout))
    LOG.info("STDERR: \"{0}\"".format(stderr))
    return (ret, stdout, stderr)
Ejemplo n.º 58
0
    def _processZIP(self, zip_archive, converter_name):

        LOG.info('Incoming request (%s, %d bytes)' % (converter_name, len(zip_archive)))
        ts = time.time()

        # temp directory handling 
        now = datetime.now().strftime('%Y%m%d%Z%H%M%S')
        ident = '%s-%s' % (now, uuid.uuid4())
        tempdir = os.path.join(self.temp_directory, ident)
        os.makedirs(tempdir)

        # store zip archive first
        zip_temp = os.path.join(tempdir, 'input.zip')
        file(zip_temp, 'wb').write(base64.decodestring(zip_archive))
        ZF = zipfile.ZipFile(zip_temp, 'r')
        for name in ZF.namelist():
            destfile = os.path.join(tempdir, name)
            if not os.path.exists(os.path.dirname(destfile)):
                os.makedirs(os.path.dirname(destfile))
            file(destfile, 'wb').write(ZF.read(name))
        ZF.close()

        # find HTML file
        html_files = glob.glob(os.path.join(tempdir, '*.htm*'))
        if not html_files:
            raise IOError('Archive does not contain any html files')
        if len(html_files) > 1:
            raise RuntimeError('Archive contains more than one html file')
        html_filename = html_files[0]
        # inject BASE tag containing the full local path (required by PrinceXML)
        self._inject_base_tag(html_filename)
        result = self._convert(html_filename, 
                               converter_name=converter_name)
        output_filename = result['output_filename']
        basename, ext = os.path.splitext(os.path.basename(output_filename))

        # Generate result ZIP archive with base64-encoded result
        zip_out = os.path.join(tempdir, '%s.zip' % ident)
        ZF = zipfile.ZipFile(zip_out, 'w')
        ZF.writestr('output%s' % ext, file(output_filename, 'rb').read())
        ZF.writestr('conversion-output.txt', result['output'])
        ZF.close()

        LOG.info('Request end (%3.2lf seconds)' % (time.time() - ts))
        return zip_out, output_filename
def search_es(max_gens, pop_range, ad_mut_stp, mu_lambda):
    pop = init_population(pop_range)
    best = fitness_func(pop[0])
    p = 1.5
    for gen in range(0, max_gens-1):
        children = mutate(pop[0], pop[1], p)
        LOG.debug("children>{0}".format(children))
        fitness = fitness_func(children[0])
        if fitness <= best:
            best = fitness
            pop = children
            p = 1.5
        else:
            p = 1.5 ** (-1/4)
        if mu_lambda:
            pop = init_population(pop_range)
            best = fitness_func(pop[0])
        LOG.rbf("Generation>{0}:new best>{1}".format(gen, best))
    return best
def validate_header(header):
    """
    ```
    "header": {
        "user_id": "",
        "msg_type": "0003",
        "msg_queue_timestamp": "1455883630000",
        "source_dev_id": "",
        "original_data_source": "SMART",
        "source_system_id": "TRUST"
    }
    ```
    """

    if header['msg_type'] != '0003':
        LOG.debug('Dropping unsupported message type `{}`'.format(
            header['msg_type']))
        return False

    return True