Example #1
0
def FullyQualifiedTestSuites(exe, option_test_suite):
  """Return a fully qualified list

  Args:
    exe: if True, use the executable-based test runner.
    option_test_suite: the test_suite specified as an option.
  """
  # Assume the test suites are in out/Release.
  test_suite_dir = os.path.abspath(os.path.join(constants.CHROME_DIR,
                                                'out', 'Release'))
  if option_test_suite:
    all_test_suites = [option_test_suite]
  else:
    all_test_suites = _TEST_SUITES

  if exe:
    qualified_test_suites = [os.path.join(test_suite_dir, t)
                             for t in all_test_suites]
  else:
    # out/Release/$SUITE_apk/$SUITE-debug.apk
    qualified_test_suites = [os.path.join(test_suite_dir,
                                          t + '_apk',
                                          t + '-debug.apk')
                             for t in all_test_suites]
  for t, q in zip(all_test_suites, qualified_test_suites):
    if not os.path.exists(q):
      logging.critical('Test suite %s not found in %s.\n'
                       'Supported test suites:\n %s\n'
                       'Ensure it has been built.\n',
                       t, q, _TEST_SUITES)
      return []
  return qualified_test_suites
Example #2
0
    def change_column_attrs(self, model, mutation, field_name, new_attrs):
        """Returns the SQL for changing one or more column attributes.

        This will generate all the statements needed for changing a set
        of attributes for a column.

        The resulting AlterTableSQLResult contains all the SQL needed
        to apply these attributes.
        """
        field = model._meta.get_field(field_name)
        attrs_sql_result = AlterTableSQLResult(self, model)

        for attr_name, attr_info in new_attrs.iteritems():
            method_name = 'change_column_attr_%s' % attr_name
            evolve_func = getattr(self, method_name)

            try:
                sql_result = evolve_func(model, mutation, field,
                                         attr_info['old_value'],
                                         attr_info['new_value'])
                assert not sql_result or isinstance(sql_result, SQLResult)
            except Exception, e:
                logging.critical(
                    'Error running database evolver function %s: %s',
                    method_name, e,
                    exc_info=1)
                raise

            attrs_sql_result.add(sql_result)
Example #3
0
    def run(self, addr):
        for protocol in self.__protocols:
            protodef = SMBEXEC.KNOWN_PROTOCOLS[protocol]
            port = protodef[1]

            logging.info("Trying protocol %s..." % protocol)
            logging.info("Creating service %s..." % self.__serviceName)

            stringbinding = protodef[0] % addr

            rpctransport = transport.DCERPCTransportFactory(stringbinding)
            rpctransport.set_dport(port)

            if hasattr(rpctransport,'preferred_dialect'):
               rpctransport.preferred_dialect(SMB_DIALECT)
            if hasattr(rpctransport, 'set_credentials'):
                # This method exists only for selected protocol sequences.
                rpctransport.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey)
            rpctransport.set_kerberos(self.__doKerberos)

            self.shell = None
            try:
                if self.__mode == 'SERVER':
                    serverThread = SMBServer()
                    serverThread.start()
                self.shell = RemoteShell(self.__logger, self.__share, rpctransport, self.__mode, self.__serviceName, self.__noOutput)
                self.shell.onecmd(self.__command)
                self.shell.finish()
                if self.__mode == 'SERVER':
                    serverThread.stop()
            except  (Exception, KeyboardInterrupt), e:
                logging.critical(str(e))
                if self.shell is not None:
                    self.shell.finish()
Example #4
0
    def generate(self, view, errors, error_format):
        """Generate a dictionary that stores all errors along with their
        positions and descriptions. Needed to show these errors on the screen.

        Args:
            view (sublime.View): current view
            errors (list): list of unparsed errors in format @error_format
            error_format (str): either FORMAT_LIBCLANG or FORMAT_BINARY
        """
        log.debug(" generating error regions for view %s", view.id())
        # first clear old regions
        if view.id() in self.err_regions:
            log.debug(" removing old error regions")
            del self.err_regions[view.id()]
        # create an empty region dict for view id
        self.err_regions[view.id()] = {}

        if error_format == FORMAT_LIBCLANG:
            # expect a tu_diagnostics instance
            self.errors_from_tu_diag(view, errors)
        elif error_format == FORMAT_BINARY:
            # expect a list of strings for each line of cmd output
            self.errors_from_clang_output(view, errors)
        else:
            logging.critical(
                " error_format:'%s' should match '%s' or '%s'",
                error_format, FORMAT_LIBCLANG, FORMAT_BINARY)
        log.debug(" %s error regions ready", len(self.err_regions))
Example #5
0
 def get(self):
     if users.get_current_user() is not None:
         auth_uri = flow.step1_get_authorize_url()
         logging.critical(str(auth_uri))
         self.redirect(str(auth_uri))
     else:
         self.redirect(users.create_login_url("/login"))
    def addWorker(self, worker, idleTime = 60, parameters = None):
        """
        Adds a worker object and sets it running. Worker thread will sleep for
        idleTime seconds between runs. Parameters, if present, are passed into
        the worker thread's setup, algorithm and terminate methods
        """
        # Check type of worker
        if not isinstance(worker, BaseWorkerThread):
            msg = "Attempting to add worker that does not inherit from "
            msg += "BaseWorkerThread"
            logging.critical(msg)
            return

        # Prepare the new worker thread
        self.prepareWorker(worker, idleTime)
        workerThread = threading.Thread(target = worker, args = (parameters,))
        msg = "Created worker thread %s" % str(worker)
        logging.info(msg)

        # Increase the active thread count - note this must be done before
        # starting the thread so the callback can decrease back in case of
        # startup failure
        self.lock.acquire()
        self.activeThreadCount += 1
        workerThread.name = "threadmanager-slave%s" % worker.slaveid
        self.slavelist.append(workerThread.name)
        self.lock.release()

        # Actually start the thread
        workerThread.start()
Example #7
0
  def post(self):


    if 'X-AppEngine-TaskName' not in self.request.headers:
      logging.critical('Detected an attempted XSRF attack. The header '
                       '"X-AppEngine-Taskname" was not set.')
      self.response.set_status(403)
      return


    # TODO In AppScale we do not check for this XSRF attacks. 
    # We need some additional # auth like we do for taskqueue with a secret hash.
    #
    #in_prod = (
    #    not self.request.environ.get("SERVER_SOFTWARE").startswith("Devel"))
    #if in_prod and self.request.environ.get("REMOTE_ADDR") != "0.1.0.2":
    #  logging.critical('Detected an attempted XSRF attack. This request did '
    #                   'not originate from Task Queue.')
    #  self.response.set_status(403)
    #  return


    headers = ["%s:%s" % (k, v) for k, v in self.request.headers.items()
               if k.lower().startswith("x-appengine-")]
    try:
      run(self.request.body)
    except PermanentTaskFailure, e:

      logging.exception("Permanent failure attempting to execute task")
    def loadAndValidate( self, XMLFolder ):
        """
        Load the source XML file and remove the header from the tree.
        Also, extracts some useful elements from the header element.
        """
        if BibleOrgSysGlobals.verbosityLevel > 2: print( _("Loading from {}…").format( XMLFolder ) )
        self.XMLFolder = XMLFolder
        XMLFilepath = os.path.join( XMLFolder, GreekStrongsFileConverter.databaseFilename )
        try: self.XMLTree = ElementTree().parse( XMLFilepath )
        except FileNotFoundError:
            logging.critical( t("GreekStrongsFileConverter could not find database at {}").format( XMLFilepath ) )
            raise FileNotFoundError
        except ParseError as err:
            logging.critical( exp("Loader parse error in xml file {}: {} {}").format( GreekStrongsFileConverter.databaseFilename, sys.exc_info()[0], err ) )
            raise ParseError
        if BibleOrgSysGlobals.debugFlag: assert len( self.XMLTree ) # Fail here if we didn't load anything at all

        if self.XMLTree.tag == GreekStrongsFileConverter.treeTag:
            for segment in self.XMLTree:
                #print( segment.tag )
                if segment.tag == "prologue":
                    pass
                elif segment.tag == "entries":
                    self.validateEntries( segment )
                else: logging.error( "ks24 Unprocessed {!r} element ({}) in entry".format( segment.tag, segment.text ) )
        else: logging.error( "Expected to load {!r} but got {!r}".format( GreekStrongsFileConverter.treeTag, self.XMLTree.tag ) )
        if self.XMLTree.tail is not None and self.XMLTree.tail.strip(): logging.error( "vs42 Unexpected {!r} tail data after {} element".format( self.XMLTree.tail, self.XMLTree.tag ) )
 def checkCA():
     #Check CA exists
     keyFile = os.path.join(os.path.dirname(__file__), 'CA.key')
     crtFile = os.path.join(os.path.dirname(__file__), 'CA.crt')
     if not os.path.exists(keyFile):
         if not OpenSSL:
             logging.critical('CA.crt is not exist and OpenSSL is disabled, ABORT!')
             sys.exit(-1)
         key, crt = CertUtil.makeCA()
         CertUtil.writeFile(keyFile, key)
         CertUtil.writeFile(crtFile, crt)
         [os.remove(os.path.join('certs', x)) for x in os.listdir('certs')]
     #Check CA imported
     cmd = {
             'win32'  : r'cd /d "%s" && certmgr.exe -add CA.crt -c -s -r localMachine Root >NUL' % os.path.dirname(__file__),
             #'darwin' : r'sudo security add-trusted-cert -d �Cr trustRoot �Ck /Library/Keychains/System.keychain CA.crt',
           }.get(sys.platform)
     if cmd and os.system(cmd) != 0:
         logging.warning('GoAgent install trusted root CA certificate failed, Please run goagent by administrator/root.')
     if OpenSSL:
         keyFile = os.path.join(os.path.dirname(__file__), 'CA.key')
         crtFile = os.path.join(os.path.dirname(__file__), 'CA.crt')
         cakey = CertUtil.readFile(keyFile)
         cacrt = CertUtil.readFile(crtFile)
         CertUtil.CA = (CertUtil.loadPEM(cakey, 0), CertUtil.loadPEM(cacrt, 2))
def main():
    options = _parse_opts()

    loglevel = logging.INFO
    if options.verbose:
        loglevel = logging.DEBUG

    logging.basicConfig(
        format='%(asctime)s [%(levelname)s] %(message)s',
        datefmt='%d/%m/%Y %I:%M:%S %p',
        level=loglevel
    )

    if options.outfmt == 'stix':
        dump_report_to_stix(options)
    elif options.outfmt == 'maec':
        dump_report_to_maec(options)
    elif options.outfmt == 'stix-ol':
        dump_report_to_stix_ol(options)
    elif options.outfmt == 'stix-il':
        dump_report_to_stix_il(options)
    else:
        logging.critical('unhandled output format %s' % options.outfmt)
        sys.exit(1)

    sys.exit(0)
Example #11
0
    def upload_fastqc_counts(self, alignment_id):

        if not alignment_id:
            logging.critical("Could not upload fastqc_counts without an alignment id given")
            return

        self.get_alignment_counts(alignment_id)

        total = 0
        filtered = 0

        for fastqc_file, fastqc_counts in self.fastqc_counts.items():

            if not fastqc_counts:
                log.error("Could not get counts from %s for uploading" % fastqc_file)
                return

            total += fastqc_counts["total"]
            filtered += fastqc_counts["filtered"]

        # FastQC's definition of total differs from ours
        counts = {
            "total": total + filtered,
            "qc": filtered,
            "pf": total
        }

        for count_name, count in counts.items():
            self.upload_count(alignment_id, count_name, count)
Example #12
0
    def run(self):
        # Here we write a mini config for the server
        smbConfig = ConfigParser.ConfigParser()
        smbConfig.add_section('global')
        smbConfig.set('global','server_name','server_name')
        smbConfig.set('global','server_os','UNIX')
        smbConfig.set('global','server_domain','WORKGROUP')
        smbConfig.set('global','log_file',SMBSERVER_DIR + '/smb.log')
        smbConfig.set('global','credentials_file','')

        # Let's add a dummy share
        smbConfig.add_section(DUMMY_SHARE)
        smbConfig.set(DUMMY_SHARE,'comment','')
        smbConfig.set(DUMMY_SHARE,'read only','no')
        smbConfig.set(DUMMY_SHARE,'share type','0')
        smbConfig.set(DUMMY_SHARE,'path',SMBSERVER_DIR)

        # IPC always needed
        smbConfig.add_section('IPC$')
        smbConfig.set('IPC$','comment','')
        smbConfig.set('IPC$','read only','yes')
        smbConfig.set('IPC$','share type','3')
        smbConfig.set('IPC$','path')

        self.smb = smbserver.SMBSERVER(('0.0.0.0',445), config_parser = smbConfig)
        logging.info('Creating tmp directory')
        try:
            os.mkdir(SMBSERVER_DIR)
        except Exception, e:
            logging.critical(str(e))
            pass
Example #13
0
    def __init__(self, cfg, file_version=None):
        veris_logger.updateLogger(cfg)
        logging.debug("Initializing CSVtoJSON object.")

        if file_version is None:
            file_version = cfg.get("file_version", None)
        if file_version is None:
            file_version = self.get_file_schema_version(cfg['input'])
        if file_version is None:
            logging.warning("Could not determine veris version of {0}.  Please specify it as an argument to the class initialization, 'CSVtoJSON(cfg, file_version=<file version>)'".format(cfg['input']))
        elif file_version != self.script_version:
            logging.warning("File veris version {0} does not match script veris version {1}.".format(file_version, self.script_version))
        cfg['file_version'] = file_version

        if type(cfg["schemafile"]) == dict:
            self.jschema = cfg["schemafile"]
        else:
            try:
                self.jschema = self.openJSON(cfg["schemafile"])
            except IOError:
                logging.critical("ERROR: Schema file not found.")
                raise
                # exit(1)

        self.sfields = self.parseSchema(self.jschema)
        self.cfg = cfg
 def __init__(self, parent=None):
     """Init class."""
     super(Downloader, self).__init__(parent)
     self.setWindowTitle(__doc__)
     if not os.path.isfile(__file__) or not __source__:
         return
     if not os.access(__file__, os.W_OK):
         error_msg = ("Destination file permission denied (not Writable)! "
                      "Try again to Update but as root or administrator.")
         log.critical(error_msg)
         QMessageBox.warning(self, __doc__.title(), error_msg)
         return
     self._time, self._date = time.time(), datetime.now().isoformat()[:-7]
     self._url, self._dst = __source__, __file__
     log.debug("Downloading from {} to {}.".format(self._url, self._dst))
     if not self._url.lower().startswith("https:"):
         log.warning("Unsecure Download over plain text without SSL.")
     self.template = """<h3>Downloading</h3><hr><table>
     <tr><td><b>From:</b></td>      <td>{}</td>
     <tr><td><b>To:  </b></td>      <td>{}</td> <tr>
     <tr><td><b>Started:</b></td>   <td>{}</td>
     <tr><td><b>Actual:</b></td>    <td>{}</td> <tr>
     <tr><td><b>Elapsed:</b></td>   <td>{}</td>
     <tr><td><b>Remaining:</b></td> <td>{}</td> <tr>
     <tr><td><b>Received:</b></td>  <td>{} MegaBytes</td>
     <tr><td><b>Total:</b></td>     <td>{} MegaBytes</td> <tr>
     <tr><td><b>Speed:</b></td>     <td>{}</td>
     <tr><td><b>Percent:</b></td>     <td>{}%</td></table><hr>"""
     self.manager = QNetworkAccessManager(self)
     self.manager.finished.connect(self.save_downloaded_data)
     self.manager.sslErrors.connect(self.download_failed)
     self.progreso = self.manager.get(QNetworkRequest(QUrl(self._url)))
     self.progreso.downloadProgress.connect(self.update_download_progress)
     self.show()
     self.exec_()
Example #15
0
    def __init__(self, h5parmFile, readonly = True, complevel = 5, complib='zlib'):
        """
        Keyword arguments:
        h5parmFile -- H5parm filename
        readonly -- if True the table is open in readonly mode (default=True)
        complevel -- compression level from 0 to 9 (default=5) when creating the file
        complib -- library for compression: lzo, zlib, bzip2 (default=zlib)
        """
        if os.path.isfile(h5parmFile):
            if tables.is_pytables_file(h5parmFile) == None:
                logging.critical('Wrong HDF5 format for '+h5parmFile+'.')
                raise Exception('Wrong HDF5 format for '+h5parmFile+'.')
            if readonly:
                logging.debug('Reading from '+h5parmFile+'.')
                self.H = tables.openFile(h5parmFile, 'r')
            else:
                logging.debug('Appending to '+h5parmFile+'.')
                self.H = tables.openFile(h5parmFile, 'r+')
        else:
            if readonly:
                raise Exception('Missing file '+h5parmFile+'.')
            else:
                logging.debug('Creating '+h5parmFile+'.')
                # add a compression filter
                f = tables.Filters(complevel=complevel, complib=complib)
                self.H = tables.openFile(h5parmFile, filters=f, mode='w')

        self.fileName = h5parmFile
  def post(self):


    if 'X-AppEngine-TaskName' not in self.request.headers:
      logging.critical('Detected an attempted XSRF attack. The header '
                       '"X-AppEngine-Taskname" was not set.')
      self.response.set_status(403)
      return



    in_prod = (
        not self.request.environ.get("SERVER_SOFTWARE").startswith("Devel"))
    if in_prod and self.request.environ.get("REMOTE_ADDR") != "0.1.0.2":
      logging.critical('Detected an attempted XSRF attack. This request did '
                       'not originate from Task Queue.')
      self.response.set_status(403)
      return


    headers = ["%s:%s" % (k, v) for k, v in self.request.headers.items()
               if k.lower().startswith("x-appengine-")]
    logging.info(", ".join(headers))

    try:
      run(self.request.body)
    except PermanentTaskFailure, e:

      logging.exception("Permanent failure attempting to execute task")
Example #17
0
def TestLog():
    logging.debug('debug message')  
    logging.info('info message')  
    logging.warning('warning message')  
    logging.error('error message')  
    logging.critical('critical message')   
    return
Example #18
0
    def check_scheduled_actions(self):
        now = datetime.datetime.now()

        # Re-schedule random tasks at midnight
        if not hasattr(self, "last_random_schedule"):
            self.last_random_schedule = self.bot.load("last_random_schedule")

        if self.last_random_schedule is None or self.last_random_schedule.day != now.day:
            self.bot.save("last_random_schedule", now)
            self.last_random_schedule = now
            self._clear_random_tasks()
            for plugin_info, fn, function_name in self.bot.random_tasks:
                meta = fn.will_fn_metadata
                self.add_random_tasks(
                    plugin_info["full_module_name"],
                    plugin_info["name"],
                    function_name,
                    meta.start_hour,
                    meta.end_hour,
                    meta.day_of_week,
                    meta.num_times_per_day
                )
        try:
            if not self.bot.load("scheduler_add_lock", False) or not self.bot.load("scheduler_lock", False):
                self.bot.save("scheduler_lock", True)
                self._run_applicable_actions_in_list(now,)
                self._run_applicable_actions_in_list(now, periodic_list=True)
                self.bot.save("scheduler_lock", False)
        except:
            logging.critical("Scheduler run blew up.\n\n%s\nContinuing...\n" % (traceback.format_exc(), ))
Example #19
0
  def save_meeting(self, meeting):
    """
    Write meeting object to database. This means dereferencing all associated objects as DBrefs
    """
    meeting_stored = self.get_object('meeting', 'originalId', meeting.originalId)
    meeting_dict = meeting.dict()

    # setting body
    meeting_dict['body'] = DBRef(collection='body',id=self.body_uid)
    
    # ensure that there is an originalId
    if 'originalId' not in meeting_dict:
      logging.critical("Fatal error: no originalId avaiable at url %s", meeting_dict.originalUrl)
    
    # dereference items
    meeting_dict = self.dereference_object(meeting_dict, 'organization')
    meeting_dict = self.dereference_object(meeting_dict, 'agendaItem')
    meeting_dict = self.dereference_object(meeting_dict, 'invitation', 'file')
    meeting_dict = self.dereference_object(meeting_dict, 'resultsProtocol', 'file')
    meeting_dict = self.dereference_object(meeting_dict, 'verbatimProtocol', 'file')
    meeting_dict = self.dereference_object(meeting_dict, 'auxiliaryFile', 'file')
    
    
    # create slug
    # meeting_dict['slug'] = self.slugify(meeting_dict['identifier'])
    
    # save data
    return self.save_object(meeting_dict, meeting_stored, 'meeting')
    def from_index(f, index, root=False):
        """Given an index file of the kind built by indexbuilder.py, and an
        offset into that file, return the node that begins at that index.
        """
        f.seek(index)
        logging.debug("Sought to index %d" % (index))
        size = struct.unpack('i', f.read(POINTER_SIZE))[0]
        if size > 100:
            logging.warning("That sure is a big node...")
        remaining_bytes = size - POINTER_SIZE
        if not root:
            char = f.read(1)
            terminal = f.read(1) == 't'
            remaining_bytes -= 2
        else:
            char = None
            # Wouldn't it be cool if the empty string was a word in the dictionary?
            # I wonder what it would mean.
            terminal = False

        children = set([]) # Set of indices of child nodes
        while remaining_bytes > 0:
            try:
                child_pointer = struct.unpack('i', f.read(POINTER_SIZE))
            except struct.error:
                logging.critical("Pointers got messed up. Have %d remaining bytes on char %s at index %d" \
                 % (remaining_bytes, char, index))
                raise
            remaining_bytes -= POINTER_SIZE
            children.add(child_pointer[0])

        assert remaining_bytes == 0, "Something went wrong. Didn't get a clean read."

        return PrefixNode(char, f, terminal, children)
Example #21
0
    def _run_applicable_actions_in_list(self, now, periodic_list=False):
        times_list = self.bot.get_times_list(periodic_list=periodic_list)

        # Iterate through times_list first, before loading the full schedule_list into memory (big pickled stuff, etc)
        a_task_needs_run = False
        for task_hash, task_time in times_list.items():
            if task_time < now:
                a_task_needs_run = True
                break

        if a_task_needs_run:
            sched_list = self.bot.get_schedule_list(periodic_list=periodic_list)
            for item_hash, item in sched_list.items():
                running_task = False
                try:

                    if item["when"] < now:
                        running_task = True
                        self.run_action(item)
                except:
                    logging.critical(
                        "Error running task %s.  \n\n%s\n"
                        "Trying to delete it and recover...\n" % (item, traceback.format_exc())
                    )

                if running_task:
                    try:
                        self.bot.remove_from_schedule(item["hash"], periodic_list=periodic_list)
                    except:
                        logging.critical(
                            "Unable to remove task. Leaving it in, you'll have to clean it out by hand."
                            "Sorry! \n\n%s\nContinuing...\n" % (traceback.format_exc(),))
Example #22
0
def GetLogTimestamp(log_line):
  """Returns the timestamp of the given |log_line|."""
  try:
    return datetime.datetime.strptime(log_line[:18], '%m-%d %H:%M:%S.%f')
  except (ValueError, IndexError):
    logging.critical('Error reading timestamp from ' + log_line)
    return None
Example #23
0
 def post(self):
     if self.user:
         response_info = {'visibility': self.user.display_type}
         self.response.out.write(json.dumps(response_info))
     else:
         logging.critical('Unauthorized visibility check')
     return
Example #24
0
    def distributions(self, records=None):
        logging.info("Analysing %s ...", self.database)
        try:
            if records is not None and isinstance(records, pd.DataFrame):
                max_mut = np.max(records['MUT'])
                self.n_samples = records.shape[0]
            else:
                # load from file
                max_mut, self.n_samples = io.get_max_mut(self.database)

            lin = np.linspace(0, max_mut, min(self.n_samples / 15., 12))
            sets = [(0, 0)] + zip(lin[:-1], lin[1:])
            if len(sets) == 1:
                # no correction needs to be applied
                return None
            out_muts = [self.intra_donor_distance(
                records, i, j) for i, j in zip(sets, sets)]
        except StandardError as msg:
            logging.critical(msg)
            out_muts = []

        my_dict = dict()
        for f, m in out_muts:
            my_dict.setdefault(m, []).append(f)
        return my_dict
Example #25
0
    def mutation_histogram(self, records, mut, filename):
        """Records is a pd.Dataframe."""
        if os.path.exists(filename + '.npz'):
            logging.critical(filename + '.npz esists.')
            return filename
        if records.shape[0] < self.min_seqs:
            return ''

        igs = [IgRecord(x.to_dict()) for _, x in records.iterrows()]
        igsimilarity_learn = copy.deepcopy(self.igsimilarity)
        igsimilarity_learn.correct = self.correction
        igsimilarity_learn.rm_duplicates = True
        if not self.correction:
            igsimilarity_learn.tol = 1000
        else:
            igsimilarity_learn.correct_by = self.correction

        sim_func = igsimilarity_learn.pairwise
        logging.info("Computing %s", filename)
        dnearest = parallel_distance.dnearest_intra_padding(
            igs, sim_func, filt=lambda x: x > 0, func=max)

        if not os.path.exists(filename.split('/')[0]):
            os.makedirs(filename.split('/')[0])
        np.savez(filename, X=dnearest, mut=mut)

        # Plot distance distribution
        title = "Similarities for {:.3f}-{:.3f}%" \
                .format(np.min(records['MUT']), np.max(records['MUT']))
        plot_hist(dnearest, self.bins, title, filename)
        return filename
Example #26
0
 def pc_heartbeat(self):
     for plc in self.plcs:
         try:
             plc.blink_pc_heartbeat()
         except snap7.snap7exceptions.Snap7Exception:
             logging.critical("Connection to PLC: {plc} lost. Trying to re-establish connection.".format(plc=plc))
             plc.connect()
Example #27
0
 def sync_plcs_time_if_needed(self):
     for plc in self.plcs:
         try:
             plc.sync_time_if_needed()
         except snap7.snap7exceptions.Snap7Exception:
             logging.critical("Connection to PLC: {plc} lost. Trying to re-establish connection.".format(plc=plc))
             plc.connect()
Example #28
0
def sendEmail(config, subject, body, to=""):
    if config.getboolean('email', 'nomail'):
        logging.info("Not sending email with subject '" + subject + '" but pretending we did.\n' + body)
        return True

    FROM = config.get('email', 'user')
    PASS = config.get('email', 'pass')
    if not to:
        to = config.get('general', 'alertcontact')

    # Prepare actual message
    # Avoid gmail threading
    subject = "[" + config.get('general', 'servername') + "] " + subject + "       " 
    if config.getboolean('email', 'bustgmailthreading'):
        subject += str(random.random())
    message = """\From: %s\nTo: %s\nSubject: %s\n\n%s""" \
        % (FROM, ", ".join(to), subject, body)
    try:
        server = smtplib.SMTP(config.get('email', 'smtpserver'), config.get('email', 'smtpport'))
        server.ehlo()
        server.starttls()
        server.login(FROM, PASS)
        server.sendmail(FROM, to, message)
        server.close()
        return True
    except Exception as e:
        logging.critical("Caught an exception trying to send an email:" + str(e))
        return False
Example #29
0
def read_config_file(cfg_file, sites, env):
    """ Open and parse the config file, save the words in a list. """
    # Open config file
    logging.info("Reading configuration file: " + cfg_file)
    config = ConfigParser.SafeConfigParser()
    config.read(cfg_file)
    sections = config.sections()
    for section in sections:
        site = Site()
        if section == "General":
            # Save name of directory to download files to
            env.download_dir = config.get(section, "download_dir")
        else:
            # Save url to check
            site.login_url = config.get(section, "login_url")
            # Save url to check
            site.feed_url = config.get(section, "rss_url")
            # Save time interval (in seconds) for checking feeds
            site.time_interval = config.getfloat(section, "interval") * 60.0
            # Save list of words to look for
            keys_str = str(config.get(section, "keys"))
            site.keys = keys_str.split()
            # Save username to site
            site.username = config.get(section, "username")
            # Save password to site
            site.password = config.get(section, "password")
            # Add to array of sites
            sites.append(site)
    # safety check
    if len(sites) < 1:
        logging.critical("Can't read config file")
        exit(-1)
Example #30
0
  def StartMonitoringLogcat(self, clear=True, timeout=10, logfile=None,
                            filters=[]):
    """Starts monitoring the output of logcat, for use with WaitForLogMatch.

    Args:
      clear: If True the existing logcat output will be cleared, to avoiding
             matching historical output lurking in the log.
      timeout: How long WaitForLogMatch will wait for the given match
      filters: A list of logcat filters to be used.
    """
    if clear:
      self.RunShellCommand('logcat -c')
    args = ['logcat', '-v', 'threadtime']
    if filters:
      args.extend(filters)
    else:
      args.append('*:v')

    # Spawn logcat and syncronize with it.
    for _ in range(4):
      self._logcat = pexpect.spawn('adb', args, timeout=timeout,
                                   logfile=logfile)
      self.RunShellCommand('log startup_sync')
      if self._logcat.expect(['startup_sync', pexpect.EOF,
                              pexpect.TIMEOUT]) == 0:
        break
      self._logcat.close(force=True)
    else:
      logging.critical('Error reading from logcat: ' + str(self._logcat.match))
      sys.exit(1)
Example #31
0
from twython import Twython, TwythonError
import time
import logging
logging.debug('Debugging Information')
logging.info('Information')
logging.warning('Warnung:Datei %s nicht gefunden', ' server.conf')
logging.error('Fehler')
logging.critical('Kritischer Fehler!')

from auth import (consumer_key, consumer_secret, access_token,
                  access_token_secret)

twitter = Twython(consumer_key, consumer_secret, access_token,
                  access_token_secret)


def uhrzeit():
    try:
        #seconds = time.time()
        #print ("Sekunden vergangen seit: ", seconds)
        named_tuple = time.localtime()
        time_string = time.strftime("%H:%M:%S -- %H.%m.%Y", named_tuple)
        print time_string
    except TwythonError as e:
        print e


def timebot():
    try:

        print "BierbotTime Start"
Example #32
0
PATH = os.path.dirname(os.path.abspath(__file__))

def get_evak_time(inifile, trajfile):
    fps, N, traj = parse_file(trajfile)
    etime =  (max( traj[:, 1] ) - min(traj[:, 1])) / float(fps)
    return etime

def call_test(num, suffix):
    logging.info("copy master_ini_%s.xml"%suffix)
    logging.info("copy geometry/geometry_test11_%s.xml"%suffix,)
    copyfile("%s/master_ini_%s.xml"%(PATH, suffix), "%s/master_ini.xml"%PATH)
    copyfile("%s/geometry/geometry_test11_%s.xml"%(PATH, suffix), "%s/geometry.xml"%PATH)
    test = JPSRunTestDriver(num, argv0=sys.argv[0], testdir=sys.path[0], utestdir=utestdir, jpscore=sys.argv[1])
    return test.run_test(testfunction=get_evak_time)

if __name__ == "__main__":
    threashold = 0.001
    result_a = call_test(111, "a")
    result_b = call_test(112, "b")
    diff = [a - b for a, b in zip(result_a, result_b)]
    success = all(v <= threashold for v in diff)
    if success:
        logging.info("%s exits with SUCCESS" % (sys.argv[0]))
        sys.exit(SUCCESS)
    else:
        logging.critical("%s exits with FAILURE" % (sys.argv[0]))
        logging.debug("result_a {}".format(", ".join(map(str, result_a))))
        logging.debug("result_b {}".format(", ".join(map(str, result_a))))
        logging.debug("diff {} (threashold = {})".format(", ".join(map(str, diff)), threashold))
        sys.exit(FAILURE)
Example #33
0
    def fetchTemp(self):
        """ Retrieves temperature data from ESATAN output file. """
        timestep = 0
        self.data = {}
        self.extrema = {}
        self.components = []
        self.time = []
        lowerLim, upperLim = self.thresholds
        logging.info(
            "Ignoring values smaller than {} and higher than {}".format(
                lowerLim, upperLim))

        # This check might not be necessary
        if not os.path.isfile(self.filePath):
            logging.critical("Output file could not be found")
            return

        # Open ESATAN file
        logging.info('Reading temperature data from ESATAN logfile {}'.format(
            self.filePath))
        with open(self.filePath) as logFile:
            lines = logFile.readlines()

        # Scan all lines
        for i, line in enumerate(lines):
            # Search for timestamp
            if 'TIMEN' in line:
                time = float(line.split()[2])
                self.time.append(time)

            # Search for temperature data paragraph
            if '+MOVE' in line:
                # This if is necessary because in some files the data following the +MOVE flag is not temperature data
                if lines[i + 3].split()[2] == 'T':
                    for l in lines[i + 6:]:  #skip subheader
                        # Break at end of paragraph
                        if not l.strip():
                            break
                        else:
                            lWords = l.split()
                            comp = lWords[1]
                            try:
                                temp = float(lWords[2])
                            except Exception:
                                logging.error(
                                    "Temperature value seems to be faulty: {}".
                                    format(lWords[2]))
                                break

                            ## If this temperature doesn't pass a filter, set it to NaN
                            #if upperLim != None and temp > upperLim:
                            #    logging.info("Filtered too high value {}".format(temp))
                            #    break
                            #    #temp = np.NaN
                            #if lowerLim != None and temp < lowerLim:
                            #    logging.info("Filtered too low value {}".format(temp))
                            #    break
                            #    #temp = np.NaN
                            #if temp in self.ignoreValues:
                            #    logging.info("Filtered ignored value")
                            #    break
                            #    #temp = np.NaN

                            # Create new keys if necessary
                            if comp not in self.data.keys():
                                self.components.append(comp)
                                self.data[comp] = {}
                            if comp not in self.extrema.keys():
                                self.extrema[comp] = {}
                                self.extrema[comp]['glob_max'] = (0.0, -9999)
                                self.extrema[comp]['glob_min'] = (0.0, 9999)
                            if time not in self.data[comp].keys():
                                self.data[comp][time] = {}
                                self.data[comp][time]['Tmax'] = -9999
                                self.data[comp][time]['Tmin'] = 9999

                            # See if this temperature is a global extremum
                            if temp > self.extrema[comp]['glob_max'][1]:
                                self.extrema[comp]['glob_max'] = (time, temp)
                            elif temp < self.extrema[comp]['glob_min'][1]:
                                self.extrema[comp]['glob_min'] = (time, temp)

                            # Determine extrema at current time
                            if temp > self.data[comp][time]['Tmax']:
                                self.data[comp][time]['Tmax'] = temp
                            elif temp < self.data[comp][time]['Tmin']:
                                self.data[comp][time]['Tmin'] = temp

        # Check if for all times in array:times there are data points.
        # If filters are activated, that is sometimes not the case.
        # If there are no data points for a certain time, delete it
        print "Cleansing time array"
        print "Time array:\n", self.time
        for comp in self.data.keys():
            print "Data keys:\n", self.data[comp].keys()
            for i, time in enumerate(self.time):
                if time not in self.data[comp].keys():
                    print "Removing time", time
                    self.time.remove(time)
        print self.time
Example #34
0
import os
logging.info('Importing numpy')
import numpy as np

# Try to find UI file in temp folder created by exe. Works if UI file was included in the exe by tweaking the pyinstaller spec file
if hasattr(sys, '_MEIPASS'):
    ui_path = os.path.join(sys._MEIPASS, "evaltan.ui")
elif hasattr(sys, '_MEIPASS2'):
    ui_path = os.path.join(sys._MEIPASS2, "evatan.ui")
else:
    ui_path = "evatan.ui"

try:
    Ui_MainWindow, QMainWindow = loadUiType(ui_path)
except Exception:
    logging.critical(
        'GUI file could not be loaded from path {}!'.format(ui_path))
    exit()


class ApplicationWindow(QMainWindow, Ui_MainWindow):
    def __init__(self, ):
        logging.info('Initiating main window')
        super(ApplicationWindow, self).__init__()

        self.fileLoaded = False

        # Create application window
        logging.info("Setting up UI")
        self.setupUi(self)
        self.setWindowTitle('ESATAN Data Evaluation')
Example #35
0
def get_native_optimizer():
    if os.environ.get('EMCC_FAST_COMPILER') == '0':
        logging.critical(
            'Non-fastcomp compiler is no longer available, please use fastcomp or an older version of emscripten'
        )
        sys.exit(1)

    # Allow users to override the location of the optimizer executable by setting an environment variable EMSCRIPTEN_NATIVE_OPTIMIZER=/path/to/optimizer(.exe)
    if os.environ.get('EMSCRIPTEN_NATIVE_OPTIMIZER') and len(
            os.environ.get('EMSCRIPTEN_NATIVE_OPTIMIZER')) > 0:
        logging.debug('env forcing native optimizer at ' +
                      os.environ.get('EMSCRIPTEN_NATIVE_OPTIMIZER'))
        return os.environ.get('EMSCRIPTEN_NATIVE_OPTIMIZER')
    # Also, allow specifying the location of the optimizer in .emscripten configuration file under EMSCRIPTEN_NATIVE_OPTIMIZER='/path/to/optimizer'
    if hasattr(shared, 'EMSCRIPTEN_NATIVE_OPTIMIZER') and len(
            shared.EMSCRIPTEN_NATIVE_OPTIMIZER) > 0:
        logging.debug('config forcing native optimizer at ' +
                      shared.EMSCRIPTEN_NATIVE_OPTIMIZER)
        return shared.EMSCRIPTEN_NATIVE_OPTIMIZER

    FAIL_MARKER = shared.Cache.get_path('optimizer.building_failed')
    if os.path.exists(FAIL_MARKER):
        shared.logging.debug(
            'seeing that optimizer could not be built (run emcc --clear-cache or erase "optimizer.building_failed" in cache dir to retry)'
        )
        return None

    def get_optimizer(name, args, handle_build_errors=None):
        class NativeOptimizerCreationException(Exception):
            pass

        outs = []
        errs = []
        try:

            def create_optimizer_cmake():
                shared.logging.debug('building native optimizer via CMake: ' +
                                     name)
                output = shared.Cache.get_path(name)
                shared.try_delete(output)

                if NATIVE_OPTIMIZER == '1':
                    cmake_build_type = 'RelWithDebInfo'
                elif NATIVE_OPTIMIZER == '2':
                    cmake_build_type = 'Release'
                elif NATIVE_OPTIMIZER == 'g':
                    cmake_build_type = 'Debug'

                build_path = shared.Cache.get_path('optimizer_build_' +
                                                   cmake_build_type)
                shared.try_delete(os.path.join(build_path, 'CMakeCache.txt'))

                log_output = None if DEBUG else subprocess.PIPE
                if not os.path.exists(build_path):
                    os.mkdir(build_path)

                if WINDOWS:
                    # Poor man's check for whether or not we should attempt 64 bit build
                    if os.environ.get('ProgramFiles(x86)'):
                        cmake_generators = [
                            'Visual Studio 12 Win64', 'Visual Studio 12',
                            'Visual Studio 11 Win64', 'Visual Studio 11',
                            'MinGW Makefiles', 'Unix Makefiles'
                        ]
                    else:
                        cmake_generators = [
                            'Visual Studio 12', 'Visual Studio 11',
                            'MinGW Makefiles', 'Unix Makefiles'
                        ]
                else:
                    cmake_generators = ['Unix Makefiles']

                for cmake_generator in cmake_generators:
                    proc = subprocess.Popen([
                        'cmake', '-G', cmake_generator,
                        '-DCMAKE_BUILD_TYPE=' + cmake_build_type,
                        shared.path_from_root('tools', 'optimizer')
                    ],
                                            cwd=build_path,
                                            stdin=log_output,
                                            stdout=log_output,
                                            stderr=log_output)
                    proc.communicate()
                    make_env = os.environ.copy()
                    if proc.returncode == 0:
                        if 'Visual Studio' in cmake_generator:
                            ret = find_msbuild(
                                os.path.join(build_path,
                                             'asmjs_optimizer.sln'), make_env)
                            make = [
                                ret[0], '/t:Build',
                                '/p:Configuration=' + cmake_build_type,
                                '/nologo', '/verbosity:minimal',
                                'asmjs_optimizer.sln'
                            ]
                            make_env = ret[1]
                        elif 'MinGW' in cmake_generator:
                            make = ['mingw32-make']
                        else:
                            make = ['make']

                        proc = subprocess.Popen(make,
                                                cwd=build_path,
                                                stdin=log_output,
                                                stdout=log_output,
                                                stderr=log_output,
                                                env=make_env)
                        proc.communicate()
                        if proc.returncode == 0:
                            if WINDOWS and 'Visual Studio' in cmake_generator:
                                shutil.copyfile(
                                    os.path.join(build_path, cmake_build_type,
                                                 'optimizer.exe'), output)
                            else:
                                shutil.copyfile(
                                    os.path.join(build_path, 'optimizer'),
                                    output)
                            return output
                        else:
                            shared.try_delete(
                                os.path.join(build_path, 'CMakeCache.txt'))
                            # Proceed to next iteration of the loop to try next possible CMake generator.

                raise NativeOptimizerCreationException()

            def create_optimizer():
                shared.logging.debug('building native optimizer: ' + name)
                output = shared.Cache.get_path(name)
                shared.try_delete(output)
                for compiler in [
                        shared.CLANG, 'g++', 'clang++'
                ]:  # try our clang first, otherwise hope for a system compiler in the path
                    shared.logging.debug('  using ' + compiler)
                    try:
                        out, err = subprocess.Popen(
                            [
                                compiler,
                                shared.path_from_root('tools', 'optimizer',
                                                      'parser.cpp'),
                                shared.path_from_root('tools', 'optimizer',
                                                      'simple_ast.cpp'),
                                shared.path_from_root('tools', 'optimizer',
                                                      'optimizer.cpp'),
                                shared.path_from_root('tools', 'optimizer',
                                                      'optimizer-shared.cpp'),
                                shared.path_from_root('tools', 'optimizer',
                                                      'optimizer-main.cpp'),
                                '-O3', '-std=c++11', '-fno-exceptions',
                                '-fno-rtti', '-o', output
                            ] + args,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE).communicate()
                        outs.append(out)
                        errs.append(err)
                    except OSError:
                        if compiler == shared.CLANG:
                            raise  # otherwise, OSError is likely due to g++ or clang++ not being in the path
                    if os.path.exists(output): return output
                raise NativeOptimizerCreationException()

            use_cmake_to_configure = WINDOWS  # Currently only Windows uses CMake to drive the optimizer build, but set this to True to use on other platforms as well.
            if use_cmake_to_configure:
                return shared.Cache.get(name,
                                        create_optimizer_cmake,
                                        extension='exe')
            else:
                return shared.Cache.get(name,
                                        create_optimizer,
                                        extension='exe')
        except NativeOptimizerCreationException, e:
            shared.logging.debug('failed to build native optimizer')
            handle_build_errors(outs, errs)
            open(FAIL_MARKER, 'w').write(':(')
            return None
Example #36
0
def run_on_js(filename,
              passes,
              js_engine,
              source_map=False,
              extra_info=None,
              just_split=False,
              just_concat=False):
    with ToolchainProfiler.profile_block('js_optimizer.split_markers'):
        if type(passes) == str:
            passes = [passes]

        js = open(filename).read()
        if os.linesep != '\n':
            js = js.replace(os.linesep,
                            '\n')  # we assume \n in the splitting code

        # Find suffix
        suffix_marker = '// EMSCRIPTEN_GENERATED_FUNCTIONS'
        suffix_start = js.find(suffix_marker)
        suffix = ''
        if suffix_start >= 0:
            suffix_end = js.find('\n', suffix_start)
            suffix = js[suffix_start:suffix_end] + '\n'
            # if there is metadata, we will run only on the generated functions. If there isn't, we will run on everything.

        # Find markers
        start_funcs = js.find(start_funcs_marker)
        end_funcs = js.rfind(end_funcs_marker)

        if start_funcs < 0 or end_funcs < start_funcs or not suffix:
            logging.critical(
                'Invalid input file. Did not contain appropriate markers. (start_funcs: %s, end_funcs: %s, suffix_start: %s'
                % (start_funcs, end_funcs, suffix_start))
            sys.exit(1)

        minify_globals = 'minifyNames' in passes and 'asm' in passes
        if minify_globals:
            passes = map(lambda p: p
                         if p != 'minifyNames' else 'minifyLocals', passes)
            start_asm = js.find(start_asm_marker)
            end_asm = js.rfind(end_asm_marker)
            assert (start_asm >= 0) == (end_asm >= 0)

        closure = 'closure' in passes
        if closure:
            passes = filter(lambda p: p != 'closure',
                            passes)  # we will do it manually

        cleanup = 'cleanup' in passes
        if cleanup:
            passes = filter(lambda p: p != 'cleanup',
                            passes)  # we will do it manually

        split_memory = 'splitMemory' in passes

    if not minify_globals:
        with ToolchainProfiler.profile_block('js_optimizer.no_minify_globals'):
            pre = js[:start_funcs + len(start_funcs_marker)]
            post = js[end_funcs + len(end_funcs_marker):]
            js = js[start_funcs + len(start_funcs_marker):end_funcs]
            if 'asm' not in passes:  # can have Module[..] and inlining prevention code, push those to post

                class Finals:
                    buf = []

                def process(line):
                    if len(line) > 0 and (line.startswith(
                        ('Module[', 'if (globalScope)'))
                                          or line.endswith('["X"]=1;')):
                        Finals.buf.append(line)
                        return False
                    return True

                js = '\n'.join(filter(process, js.split('\n')))
                post = '\n'.join(Finals.buf) + '\n' + post
            post = end_funcs_marker + post
    else:
        with ToolchainProfiler.profile_block('js_optimizer.minify_globals'):
            # We need to split out the asm shell as well, for minification
            pre = js[:start_asm + len(start_asm_marker)]
            post = js[end_asm:]
            asm_shell = js[start_asm + len(start_asm_marker):start_funcs +
                           len(start_funcs_marker)] + '''
EMSCRIPTEN_FUNCS();
''' + js[end_funcs + len(end_funcs_marker):end_asm + len(end_asm_marker)]
            js = js[start_funcs + len(start_funcs_marker):end_funcs]

            # we assume there is a maximum of one new name per line
            minifier = Minifier(js, js_engine)

            def check_symbol_mapping(p):
                if p.startswith('symbolMap='):
                    minifier.symbols_file = p.split('=')[1]
                    return False
                if p == 'profilingFuncs':
                    minifier.profiling_funcs = True
                    return False
                return True

            passes = filter(check_symbol_mapping, passes)
            asm_shell_pre, asm_shell_post = minifier.minify_shell(
                asm_shell, 'minifyWhitespace' in passes,
                source_map).split('EMSCRIPTEN_FUNCS();')
            asm_shell_post = asm_shell_post.replace('});', '})')
            pre += asm_shell_pre + '\n' + start_funcs_marker
            post = end_funcs_marker + asm_shell_post + post

            minify_info = minifier.serialize()

            if extra_info:
                for key, value in extra_info.iteritems():
                    assert key not in minify_info or value == minify_info[
                        key], [key, value, minify_info[key]]
                    minify_info[key] = value

            #if DEBUG: print >> sys.stderr, 'minify info:', minify_info

    with ToolchainProfiler.profile_block(
            'js_optimizer.remove_suffix_and_split'):
        # remove suffix if no longer needed
        if suffix and 'last' in passes:
            suffix_start = post.find(suffix_marker)
            suffix_end = post.find('\n', suffix_start)
            post = post[:suffix_start] + post[suffix_end:]

        total_size = len(js)
        funcs = split_funcs(js, just_split)
        js = None

    with ToolchainProfiler.profile_block('js_optimizer.split_to_chunks'):
        # if we are making source maps, we want our debug numbering to start from the
        # top of the file, so avoid breaking the JS into chunks
        cores = 1 if source_map else int(
            os.environ.get('EMCC_CORES') or multiprocessing.cpu_count())

        if not just_split:
            intended_num_chunks = int(round(cores * NUM_CHUNKS_PER_CORE))
            chunk_size = min(
                MAX_CHUNK_SIZE,
                max(MIN_CHUNK_SIZE, total_size / intended_num_chunks))
            chunks = shared.chunkify(funcs, chunk_size)
        else:
            # keep same chunks as before
            chunks = map(lambda f: f[1], funcs)

        chunks = filter(lambda chunk: len(chunk) > 0, chunks)
        if DEBUG and len(chunks) > 0:
            print >> sys.stderr, 'chunkification: num funcs:', len(
                funcs), 'actual num chunks:', len(
                    chunks), 'chunk size range:', max(map(len,
                                                          chunks)), '-', min(
                                                              map(len, chunks))
        funcs = None

        if len(chunks) > 0:
            serialized_extra_info = suffix_marker + '\n'
            if minify_globals:
                serialized_extra_info += '// EXTRA_INFO:' + json.dumps(
                    minify_info)
            elif extra_info:
                serialized_extra_info += '// EXTRA_INFO:' + json.dumps(
                    extra_info)
            with ToolchainProfiler.profile_block('js_optimizer.write_chunks'):

                def write_chunk(chunk, i):
                    temp_file = temp_files.get('.jsfunc_%d.js' % i).name
                    f = open(temp_file, 'w')
                    f.write(chunk)
                    f.write(serialized_extra_info)
                    f.close()
                    return temp_file

                filenames = [
                    write_chunk(chunks[i], i) for i in range(len(chunks))
                ]
        else:
            filenames = []

    with ToolchainProfiler.profile_block('run_optimizer'):
        if len(filenames) > 0:
            if not use_native(passes,
                              source_map) or not get_native_optimizer():
                commands = map(
                    lambda filename: js_engine +
                    [JS_OPTIMIZER, filename, 'noPrintMetadata'] +
                    (['--debug'] if source_map else []) + passes, filenames)
            else:
                # use the native optimizer
                shared.logging.debug('js optimizer using native')
                assert not source_map  # XXX need to use js optimizer
                commands = map(
                    lambda filename: [get_native_optimizer(), filename] +
                    passes, filenames)
            #print [' '.join(command) for command in commands]

            cores = min(cores, len(filenames))
            if len(chunks) > 1 and cores >= 2:
                # We can parallelize
                if DEBUG:
                    print >> sys.stderr, 'splitting up js optimization into %d chunks, using %d cores  (total: %.2f MB)' % (
                        len(chunks), cores, total_size / (1024 * 1024.))
                with ToolchainProfiler.profile_block('optimizer_pool'):
                    pool = shared.Building.get_multiprocessing_pool()
                    filenames = pool.map(run_on_chunk, commands, chunksize=1)
            else:
                # We can't parallize, but still break into chunks to avoid uglify/node memory issues
                if len(chunks) > 1 and DEBUG:
                    print >> sys.stderr, 'splitting up js optimization into %d chunks' % (
                        len(chunks))
                filenames = [run_on_chunk(command) for command in commands]
        else:
            filenames = []

        for filename in filenames:
            temp_files.note(filename)

    with ToolchainProfiler.profile_block('split_closure_cleanup'):
        if closure or cleanup or split_memory:
            # run on the shell code, everything but what we js-optimize
            start_asm = '// EMSCRIPTEN_START_ASM\n'
            end_asm = '// EMSCRIPTEN_END_ASM\n'
            cl_sep = 'wakaUnknownBefore(); var asm=wakaUnknownAfter(global,env,buffer)\n'

            with temp_files.get_file('.cl.js') as cle:
                c = open(cle, 'w')
                pre_1, pre_2 = pre.split(start_asm)
                post_1, post_2 = post.split(end_asm)
                c.write(pre_1)
                c.write(cl_sep)
                c.write(post_2)
                c.close()
                cld = cle
                if split_memory:
                    if DEBUG:
                        print >> sys.stderr, 'running splitMemory on shell code'
                    cld = run_on_chunk(js_engine +
                                       [JS_OPTIMIZER, cld, 'splitMemoryShell'])
                    f = open(cld, 'a')
                    f.write(suffix_marker)
                    f.close()
                if closure:
                    if DEBUG:
                        print >> sys.stderr, 'running closure on shell code'
                    cld = shared.Building.closure_compiler(
                        cld, pretty='minifyWhitespace' not in passes)
                    temp_files.note(cld)
                elif cleanup:
                    if DEBUG:
                        print >> sys.stderr, 'running cleanup on shell code'
                    next = cld + '.cl.js'
                    temp_files.note(next)
                    proc = subprocess.Popen(
                        js_engine +
                        [JS_OPTIMIZER, cld, 'noPrintMetadata', 'JSDCE'] +
                        (['minifyWhitespace']
                         if 'minifyWhitespace' in passes else []),
                        stdout=open(next, 'w'))
                    proc.communicate()
                    assert proc.returncode == 0
                    cld = next
                coutput = open(cld).read()

            coutput = coutput.replace('wakaUnknownBefore();', start_asm)
            after = 'wakaUnknownAfter'
            start = coutput.find(after)
            end = coutput.find(')', start)
            pre = coutput[:start] + '(function(global,env,buffer) {\n' + pre_2[
                pre_2.find('{') + 1:]
            post = post_1 + end_asm + coutput[end + 1:]

    with ToolchainProfiler.profile_block('write_pre'):
        filename += '.jo.js'
        f = open(filename, 'w')
        f.write(pre)
        pre = None

    with ToolchainProfiler.profile_block('sort_or_concat'):
        if not just_concat:
            # sort functions by size, to make diffing easier and to improve aot times
            funcses = []
            for out_file in filenames:
                funcses.append(split_funcs(open(out_file).read(), False))
            funcs = [item for sublist in funcses for item in sublist]
            funcses = None

            def sorter(x, y):
                diff = len(y[1]) - len(x[1])
                if diff != 0: return diff
                if x[0] < y[0]: return 1
                elif x[0] > y[0]: return -1
                return 0

            if not os.environ.get('EMCC_NO_OPT_SORT'):
                funcs.sort(sorter)

            if 'last' in passes and len(funcs) > 0:
                count = funcs[0][1].count('\n')
                if count > 3000:
                    print >> sys.stderr, 'info: Output contains some very large functions (%s lines in %s), consider building source files with -Os or -Oz, and/or trying OUTLINING_LIMIT to break them up (see settings.js; note that the parameter there affects AST nodes, while we measure lines here, so the two may not match up)' % (
                        count, funcs[0][0])

            for func in funcs:
                f.write(func[1])
            funcs = None
        else:
            # just concat the outputs
            for out_file in filenames:
                f.write(open(out_file).read())

    with ToolchainProfiler.profile_block('write_post'):
        f.write('\n')
        f.write(post)
        # No need to write suffix: if there was one, it is inside post which exists when suffix is there
        f.write('\n')
        f.close()

    return filename
        print "State", state, "from client @", client_address, "name: '", name, "'"
        GPIO.setmode(GPIO.BCM)  ## Use BCM pin numbering
        GPIO.setup(int(sys.argv[3]), GPIO.OUT)  ## Setup GPIO Pin to OUTPUT
        GPIO.output(int(sys.argv[3]), state)  ## State is true/false
        return True


if __name__ == "__main__":
    # Startup the fauxmo server
    fauxmo.DEBUG = True
    p = fauxmo.poller()
    u = fauxmo.upnp_broadcast_responder()
    u.init_socket()
    p.add(u)

    # Register the device callback as a fauxmo handler
    d = device_handler()
    for trig, port in d.TRIGGERS.items():
        fauxmo.fauxmo(trig, u, p, None, port, d)

    # Loop and poll for incoming Echo requests
    logging.debug("Entering fauxmo polling loop")
    while True:
        try:
            # Allow time for a ctrl-c to stop the process
            p.poll(100)
            time.sleep(0.1)
        except Exception, e:
            logging.critical("Critical exception: " + str(e))
            break
Example #38
0
 def critical(msg, *args, **kwargs):
     if not EnodebdLogger._LOGGER.propagate:
         logging.critical(msg, *args, **kwargs)
     EnodebdLogger._LOGGER.critical(msg, *args, **kwargs)
Example #39
0
import logging

logging.basicConfig(filename='logs/logfile.log',
                    filemode='a',
                    level=logging.DEBUG,
                    format=f'%(levelname)s → %(asctime)s → %(message)s',
                    datefmt="%Y/%m/%d %H:%M:%S")

logging.info('Info')
logging.debug('Debug')
logging.warning('Warning')
logging.error('Error')
logging.critical('Critical')
logging.warning('WARNING')
Example #40
0
def dzlog(msg, lvl='info', force=False, reset=False, arcmsg=False):

    lfmat = '%(asctime)-15s: %(message)s '
    callerframerecord = inspect.stack()[1]
    frame = callerframerecord[0]
    info = inspect.getframeinfo(frame)
    msg_log = msg + ": " + info.function + ": " + str(info.lineno)

    ##........................................................................##
    if arcmsg:
        arcpy.AddMessage(msg)

    ##........................................................................##
    if reset:

        while len(logging.root.handlers) > 0:
            logging.root.removeHandler(logging.root.handlers[-1])

    ##........................................................................##
    if len(logging.root.handlers) == 0 or reset:

        logging.basicConfig(filename=g_pn + os.sep + 'toolbox.log',
                            format=lfmat,
                            level=g_logging)

    ##........................................................................##
    if force:

        if lvl == 'debug':
            level = logging.DEBUG

        elif lvl == 'info':
            level = logging.INFO

        elif lvl == 'warning':
            level = logging.WARNING

        elif lvl == 'error':
            level = logging.ERROR

        elif lvl == 'critical':
            level = logging.CRITICAL

        if not logging.getLogger().isEnabledFor(level):

            while len(logging.root.handlers) > 0:
                logging.root.removeHandler(logging.root.handlers[-1])

            logging.basicConfig(filename=g_pn + os.sep + 'toolbox.log',
                                format=lfmat,
                                level=level)

    ##........................................................................##
    if lvl == 'debug':
        logging.debug(msg_log)

    elif lvl == 'info':
        logging.info(msg_log)

    elif lvl == 'warning':
        logging.warning(msg_log)

    elif lvl == 'error':
        logging.error(msg_log)

    elif lvl == 'critical':
        logging.critical(msg_log)
Example #41
0
    def _ParseTestOutput(self, p):
        """Process the test output.

    Args:
      p: An instance of pexpect spawn class.

    Returns:
      A TestRunResults object.
    """
        results = base_test_result.TestRunResults()

        # Test case statuses.
        re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
        re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
        re_ok = re.compile('\[       OK \] ?(.*?) .*\r\n')

        # Test run statuses.
        re_passed = re.compile('\[  PASSED  \] ?(.*)\r\n')
        re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
        # Signal handlers are installed before starting tests
        # to output the CRASHED marker when a crash happens.
        re_crash = re.compile('\[ CRASHED      \](.*)\r\n')

        log = ''
        try:
            while True:
                full_test_name = None
                found = p.expect([re_run, re_passed, re_runner_fail],
                                 timeout=self._timeout)
                if found == 1:  # re_passed
                    break
                elif found == 2:  # re_runner_fail
                    break
                else:  # re_run
                    full_test_name = p.match.group(1).replace('\r', '')
                    found = p.expect([re_ok, re_fail, re_crash],
                                     timeout=self._timeout)
                    log = p.before.replace('\r', '')
                    if found == 0:  # re_ok
                        if full_test_name == p.match.group(1).replace(
                                '\r', ''):
                            results.AddResult(
                                base_test_result.BaseTestResult(
                                    full_test_name,
                                    base_test_result.ResultType.PASS,
                                    log=log))
                    elif found == 2:  # re_crash
                        results.AddResult(
                            base_test_result.BaseTestResult(
                                full_test_name,
                                base_test_result.ResultType.CRASH,
                                log=log))
                        break
                    else:  # re_fail
                        results.AddResult(
                            base_test_result.BaseTestResult(
                                full_test_name,
                                base_test_result.ResultType.FAIL,
                                log=log))
        except pexpect.EOF:
            logging.error('Test terminated - EOF')
            # We're here because either the device went offline, or the test harness
            # crashed without outputting the CRASHED marker (crbug.com/175538).
            if not self.adb.IsOnline():
                raise android_commands.errors.DeviceUnresponsiveError(
                    'Device %s went offline.' % self.device)
            if full_test_name:
                results.AddResult(
                    base_test_result.BaseTestResult(
                        full_test_name,
                        base_test_result.ResultType.CRASH,
                        log=p.before.replace('\r', '')))
        except pexpect.TIMEOUT:
            logging.error('Test terminated after %d second timeout.',
                          self._timeout)
            if full_test_name:
                results.AddResult(
                    base_test_result.BaseTestResult(
                        full_test_name,
                        base_test_result.ResultType.TIMEOUT,
                        log=p.before.replace('\r', '')))
        finally:
            p.close()

        ret_code = self.test_package.GetGTestReturnCode(self.adb)
        if ret_code:
            logging.critical(
                'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
                ret_code, p.before, p.after)

        return results
Example #42
0
import logging

# 通过下面的方式进行简单配置输出方式与日志级别
logging.basicConfig(filename='logger.log', level=logging.DEBUG)

logging.debug('debug message')
logging.info('info message')
logging.warn('warn message')
logging.error('error message')
logging.critical('critical message')
Example #43
0
def throttler(once=False, sleep_time=600):
    """
    Main loop to check rse transfer limits.
    """

    logging.info('Throttler starting')

    executable = 'conveyor-throttler'
    hostname = socket.getfqdn()
    pid = os.getpid()
    hb_thread = threading.current_thread()
    heartbeat.sanity_check(executable=executable, hostname=hostname)
    # Make an initial heartbeat so that all throttlers have the correct worker number on the next try
    heart_beat = heartbeat.live(executable, hostname, pid, hb_thread)
    prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'],
                                         heart_beat['nr_threads'])
    logging.info(prepend_str + 'Throttler started - timeout (%s)' %
                 (sleep_time))

    current_time = time.time()
    graceful_stop.wait(10)

    while not graceful_stop.is_set():

        try:
            heart_beat = heartbeat.live(executable,
                                        hostname,
                                        pid,
                                        hb_thread,
                                        older_than=3600)
            prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'],
                                                 heart_beat['nr_threads'])
            if heart_beat['assign_thread'] != 0:
                logging.info(
                    prepend_str +
                    'Throttler thread id is not 0, will sleep. Only thread 0 will work'
                )
                if once:
                    break
                if time.time() < current_time + sleep_time:
                    graceful_stop.wait(
                        int((current_time + sleep_time) - time.time()))
                current_time = time.time()
                continue

            logging.info(prepend_str + "Throttler - schedule requests")
            __schedule_requests()

            if once:
                break
            if time.time() < current_time + sleep_time:
                graceful_stop.wait(
                    int((current_time + sleep_time) - time.time()))
            current_time = time.time()
        except Exception:
            logging.critical(prepend_str + 'Throtter crashed %s' %
                             (traceback.format_exc()))

        if once:
            break

    logging.info(prepend_str + 'Throtter - graceful stop requested')

    heartbeat.die(executable, hostname, pid, hb_thread)

    logging.info(prepend_str + 'Throtter - graceful stop done')
Example #44
0
from datetime import date

#logging.basicConfig(filename="checker.log", level=logging.INFO)
FORMAT = "%(asctime)-15s %(message)s"
logging.basicConfig(filename="checker.log",
                    format=FORMAT,
                    level=logging.INFO,
                    datefmt='%Y-%m-%d %H:%M:%S')

# GET Target Version - from file "checker.conf".
try:
    result = open("checker.conf", 'r')
    target_version = result.readline().strip()
except Exception as e:
    #print("EXCEPTION READING: version.conf. EXCEPTION IS: %s" % (e))
    logging.critical("EXCEPTION READING: checker.conf")
    sys.exit(1)

# GET APP DNS NAME
state = 0
with open('terraform.tfstate', 'r') as searchfile:
    for line in searchfile:
        if re.search(r'instance_state', line, re.M | re.I):
            state = "running"

if state == "running":
    result = subprocess.run(['terraform', 'output', 'aws_instance_public_dns'],
                            stdout=subprocess.PIPE)
    name = result.stdout.decode('utf-8').strip()
else:
    logging.critical("EXCEPTION: terraform.tfstate file shows not running")
 def __import_algo(self, algo: str):
     try:
         return __import__(algo)
     except ImportError:
         logging.critical("Could not import algorithm " + algo)
         exit -1
Example #46
0
with extension_loader.DlopenGuard():
    try:
        from caffe2.python.caffe2_pybind11_state_gpu import *  # noqa
        if num_cuda_devices():  # noqa
            has_gpu_support = True
        else:
            has_gpu_support = False
    except ImportError as e:
        logging.warning('This caffe2 python run does not have GPU support. '
                        'Will run in CPU only mode.')
        logging.warning('Debug message: {0}'.format(str(e)))
        has_gpu_support = False
        try:
            from caffe2.python.caffe2_pybind11_state import *  # noqa
        except ImportError as e:
            logging.critical('Cannot load caffe2.python. Error: {0}'.format(
                str(e)))
            sys.exit(1)

# libcaffe2_python contains a global Workspace that we need to properly delete
# when exiting. Otherwise, cudart will cause segfaults sometimes.
atexit.register(on_module_exit)  # noqa


# Add functionalities for the TensorCPU interface.
def _TensorCPU_shape(self):
    return tuple(self._shape)


def _TensorCPU_reshape(self, shape):
    return self._reshape(list(shape))
Example #47
0
def log_exception():
    return logging.critical(
        ('Exception while Running ' if __name__ ==
         '__main__' else 'Exception while Importing/using ') +
        str(Path(__file__).resolve()) +
        str(": {} ({})".format(whoami(), whosparent())))
Example #48
0
def main(argv):
  # ANDROID_SDK_ROOT needs to be set to the location of the SDK used to launch
  # the emulator to find the system images upon launch.
  emulator_sdk = constants.ANDROID_SDK_ROOT
  os.environ['ANDROID_SDK_ROOT'] = emulator_sdk

  arg_parser = argparse.ArgumentParser(description='AVD script.')
  sub_parsers = arg_parser.add_subparsers(title='subparser', dest='command')
  sub_parsers.add_parser(
      'kill', help='Shutdown all existing emulators')
  sub_parsers.add_parser(
      'delete', help='Deleting all the avd files')
  run_parser = sub_parsers.add_parser('run', help='Run emulators')
  run_parser.add_argument('--name', help='Optinaly, name of existing AVD to '
                          'launch. If not specified, AVD\'s will be created')
  run_parser.add_argument('-n', '--num', dest='emulator_count',
                          help='Number of emulators to launch (default is 1).',
                          type=int, default='1')
  run_parser.add_argument('--abi', default='x86',
                          help='Platform of emulators to launch (x86 default)')
  run_parser.add_argument('--api-level', dest='api_level',
                          help='API level for the image',
                          type=int, default=constants.ANDROID_SDK_VERSION)
  run_parser.add_argument('--sdcard-size', dest='sdcard_size',
                          default=emulator.DEFAULT_SDCARD_SIZE,
                          help='Set sdcard size of the emulators'
                          ' e.g. --sdcard-size=512M')
  run_parser.add_argument('--partition-size', dest='partition_size',
                          default=emulator.DEFAULT_STORAGE_SIZE,
                          help='Default internal storage size'
                          ' e.g. --partition-size=1024M')
  run_parser.add_argument('--launch-without-kill', action='store_false',
                          dest='kill_and_launch', default=True,
                          help='Kill all emulators at launch')
  run_parser.add_argument('--enable-kvm', action='store_true',
                          dest='enable_kvm', default=False,
                          help='Enable kvm for faster x86 emulator run')

  arguments = arg_parser.parse_args(argv[1:])

  logging.root.setLevel(logging.INFO)

  devil_chromium.Initialize()

  if arguments.command == 'kill':
    logging.info('Killing all existing emulator and existing the program')
    emulator.KillAllEmulators()
    return
  if arguments.command == 'delete':
    emulator.DeleteAllTempAVDs()
    return

  # Check if SDK exist in ANDROID_SDK_ROOT
  if not install_emulator_deps.CheckSDK():
    raise Exception('Emulator SDK not installed in %s'
                     % constants.ANDROID_SDK_ROOT)

  # Check if KVM is enabled for x86 AVD
  if arguments.abi == 'x86':
    if not install_emulator_deps.CheckKVM():
      logging.warning('KVM is not installed or enabled')
      arguments.enable_kvm = False

  # Check if targeted system image exist
  if not install_emulator_deps.CheckSystemImage(arguments.abi,
                                                arguments.api_level):
    logging.critical('ERROR: System image for %s AVD not installed. Run '
                     'install_emulator_deps.py', arguments.abi)
    return 1

  # If AVD is specified, check that the SDK has the required target. If not,
  # check that the SDK has the desired target for the temporary AVD's.
  api_level = arguments.api_level
  if arguments.name:
    android = os.path.join(constants.ANDROID_SDK_ROOT, 'tools',
                           'android')
    avds_output = cmd_helper.GetCmdOutput([android, 'list', 'avd'])
    names = re.findall(r'Name: (\w+)', avds_output)
    api_levels = re.findall(r'API level (\d+)', avds_output)
    try:
      avd_index = names.index(arguments.name)
    except ValueError:
      logging.critical('ERROR: Specified AVD %s does not exist.',
                       arguments.name)
      return 1
    api_level = int(api_levels[avd_index])

  if not install_emulator_deps.CheckSDKPlatform(api_level):
    logging.critical('ERROR: Emulator SDK missing required target for API %d. '
                     'Run install_emulator_deps.py.')
    return 1

  if arguments.name:
    emulator.LaunchEmulator(
        arguments.name,
        arguments.abi,
        enable_kvm=arguments.enable_kvm,
        kill_and_launch=arguments.reset_and_launch,
        sdcard_size=arguments.sdcard_size,
        storage_size=arguments.partition_size
    )
  else:
    emulator.LaunchTempEmulators(
        arguments.emulator_count,
        arguments.abi,
        arguments.api_level,
        enable_kvm=arguments.enable_kvm,
        kill_and_launch=arguments.kill_and_launch,
        sdcard_size=arguments.sdcard_size,
        storage_size=arguments.partition_size,
        wait_for_boot=True
    )
Example #49
0
def data2bids(args):

    # prepare parameter and path
    orig_dir = pjoin(args.projectdir, 'data', 'bold', 'orig')
    dicom_dir = pjoin(args.projectdir, 'data', 'bold', 'dicom')
    nifti_dir = pjoin(args.projectdir, 'data', 'bold', 'nifti')
    info_dir = pjoin(args.projectdir, 'data', 'bold', 'info')

    # prepare logging
    log_config(pjoin(nifti_dir, 'data2bids.log'))

    # step 2 Information Reorganization
    print(bcolors.BOLD_NODE + "[Node] Re-organizing..." + bcolors.ENDC)
    # filter at first
    # traverse the scaninfo
    # generate session:input - determine
    # generate session:task
    # generate task:feature
    scaninfo_raw = pd.read_excel(pjoin(info_dir, args.scaninfo))
    # pandas:https://www.cnblogs.com/ech2o/p/11831488.html

    if args.subject or args.session:
        scaninfo = scaninfo_raw
        if args.subject:
            logging.info('DATA2BIDS: Selected subject is {}'.format(args.subject))
            scaninfo = scaninfo[scaninfo['sub'].isin(args.subject)]
            scaninfo.reset_index(drop=True)
        if args.session:
            logging.info('DATA2BIDS: selected session is {}'.format(args.session))
            scaninfo = scaninfo[scaninfo['ses'].isin(args.session)]
            scaninfo.reset_index(drop=True)
        if args.quality_filter != 'all':
            logging.info('DATA2BIDS: quality filter is {}'.format(args.quality_filter))
            logging.info('DATA2BIDS: filtered scaninfo is in {}'.format(pjoin(nifti_dir, 'scaninfo_filtered.xlsx')))
            scaninfo = scaninfo[scaninfo['quality'] == args.quality_filter]
            scaninfo.reset_index(drop=True)
            scaninfo.to_excel(pjoin(nifti_dir, 'scaninfo_filtered.xlsx'))
    else:
        if args.quality_filter != 'all':
            logging.info('DATA2BIDS: quality filter is {}'.format(args.quality_filter))
            logging.info('DATA2BIDS: filtered scaninfo is stored in {}'.format(pjoin(nifti_dir, 'scaninfo_filtered.xlsx')))
            scaninfo = scaninfo_raw[scaninfo_raw['quality'] == args.quality_filter]
            scaninfo.reset_index(drop=True)
            scaninfo.to_excel(pjoin(nifti_dir, 'scaninfo_filtered.xlsx'))
        else:
            logging.info('DATA2BIDS: process all parts in {}'.format(args.scaninfo))
            scaninfo = scaninfo_raw

    # determine input of each session -- {sub-*/ses-* : *.tar.gz}
    session_input = session_input_dict(scaninfo)
    print("[news] Find {:d} session(s) waiting for processing..".format(len(session_input)))
    for key, value in session_input.items():
        print(bcolors.BOLD + "    {:s} ".format(key) + bcolors.ENDC + "from: {:s}".format(value))
    logging.info('DATA2BIDS: session-input mapping is stored in {}'.format(pjoin(nifti_dir, 'session-input.json')))


    # detemine session-contained tasks --
    session_task = session_task_dict(scaninfo)
    print("[news] Tasks in each sub-session collected")
    heu_session_task = {}
    for key, value in session_task.items():
        print(bcolors.BOLD + "    {:s} ".format(key) + bcolors.ENDC + "contains: {}".format(value))
        s_key = (key.split('-')[-1]).strip(string.digits)
        if s_key not in heu_session_task.keys():
            heu_session_task[s_key] = value
        else:
            if set(value) - set(heu_session_task[s_key]):
                heu_session_task[s_key].extend(list(set(value) - set(heu_session_task[s_key])))
    print("[news] Found {} kinds of session:".format(len(heu_session_task)))
    for key, value in heu_session_task.items():
        print(bcolors.BOLD + "    {:s} ".format(key) + bcolors.ENDC + "contains: {}".format(value))
    logging.info('DATA2BIDS: session-task mapping is stored in {}'.format(pjoin(nifti_dir, 'session-task.json')))

    # determine task feature -- task : [protocolname dim]
    task_feature = task_feature_dict(scaninfo)
    print("[news] Task feature information collected..")
    for key, value in task_feature.items():
        print(bcolors.BOLD + "    {:s} : ".format(key) + bcolors.ENDC + "protocolname = " + \
              bcolors.BOLD + "{0[0]},".format(value) + bcolors.ENDC + " dim = " + \
              bcolors.BOLD + "{0[1]} ".format(value) + bcolors.ENDC)
    logging.info('DATA2BIDS: task-feature mapping is stored in {}'.format(pjoin(nifti_dir, 'task-feature.json')))

    # step 3 Unpack
    if not args.skip_unpack:
        print(bcolors.BOLD_NODE + "[Node] Unpacking..." + bcolors.ENDC)

        for _value in tqdm([__ for __ in session_input.values()]):
            # upack
            if not glob.glob(pjoin(dicom_dir, _value.replace('.tar.gz', ''))):
                cmd = "tar -xzvf {:s} -C {:s}".format(pjoin(orig_dir, _value), dicom_dir)
                logging.info('Unpack command: {:s}'.format(cmd))
                print("[news] Running command: {:s}".format(cmd))
                if not args.preview:
                    runcmd(cmd)

    # step 4 Heuristic.py generation
    print(bcolors.BOLD_NODE + "[Node] Heuristic.py Generating..." + bcolors.ENDC)
    # task_feature & heu_session_task will be used
    for key, value in heu_session_task.items():
        check_path(pjoin(nifti_dir, 'code', key))
        file = pjoin(nifti_dir, 'code', key, 'heuristic.py')

        heu_creation = heucreation(file)
        heu_creation.create_heuristic(value, task_feature)
    print("[news] Heuristic.py completion!")

    # step 5 heudiconv
    print(bcolors.BOLD_NODE + "[Node] BIDS converting..." + bcolors.ENDC)
    # session_input will be used
    for _key, _value in tqdm(session_input.items()):
        dicom_files = pjoin(dicom_dir, _value).replace('.tar.gz', '/*.IMA')
        subID, sesID = _key.split('/')[0].replace('sub-', ''), _key.split('/')[1].replace('ses-', '')

        if not args.skip_feature_validation:
            # feature validation
            if args.overwrite:
                cmd = "heudiconv --files {:s} -o {:s} -f convertall -s {:s} -ss {:s} -c none --overwrite" \
                    .format(dicom_files, nifti_dir, subID, sesID)
            else:
                cmd = "heudiconv --files {:s} -o {:s} -f convertall -s {:s} -ss {:s} -c none" \
                    .format(dicom_files, nifti_dir, subID, sesID)
            print("[news] inspecting task feature in dicominfo.tsv")
            logging.info('Heudiconv command: {:s}'.format(cmd))
            print("[news] command:" + bcolors.BOLD + " {}".format(cmd) + bcolors.ENDC)
            if not args.preview:
                runcmd(cmd)
                dicominfo = pd.read_csv("{:s}/.heudiconv/{:s}/info/dicominfo_ses-{:s}.tsv" \
                                        .format(nifti_dir, subID, sesID), sep='\t')
                dicominfo_scan_feature = list(
                    set([(dicominfo.iloc[_run, :]['protocol_name'], dicominfo.iloc[_run, :]['dim4']) \
                             if dicominfo.iloc[_run, :]['dim4'] != 1 else (
                        dicominfo.iloc[_run, :]['protocol_name'], dicominfo.iloc[_run, :]['dim3']) \
                         for _run in range(len(dicominfo))]))

                _check = []
                for _task in session_task[_key]:
                    _feature = (task_feature[_task][0], task_feature[_task][1])
                    if 'anat' in _task:
                        if not any([_feature[0] == __[0] for __ in dicominfo_scan_feature]):
                            _check.append(any([_feature[0] == __[0] for __ in dicominfo_scan_feature]))
                            logging.critical("'{:s}' protocol name mismtach! Found no {:s} in {:s}/.heudiconv/{:s}/info/dicominfo_ses-{:s}.tsv" \
                                             .format(_task, _feature[0], nifti_dir, subID, sesID))
                            print(bcolors.FAIL + \
                                  "[ERROR] '{:s}' protocol name mismtach! Found no {:s} in {:s}/.heudiconv/{:s}/info/dicominfo_ses-{:s}.tsv" \
                                  .format(_task, _feature[0], nifti_dir, subID, sesID) + bcolors.ENDC)
                    else:
                        if not _feature in dicominfo_scan_feature:
                            _check.append(_feature in dicominfo_scan_feature)
                            logging.critical('"'+_task+'" protocol name mismtach! Found no '+str(_feature)+' in '+nifti_dir+'/.heudiconv/'+subID+'/info/dicominfo_ses-'+sesID+'.tsv')
                            print('[ERROR] "'+_task+'" protocol name mismtach! Found no '+str(_feature)+' in '+nifti_dir+'/.heudiconv/'+subID+'/info/dicominfo_ses-'+sesID+'.tsv')
                if not all(_check):
                    logging.critical('Feature validation failure!')
                    raise AssertionError(
                        '[ERROR] Feature validation failure! Please read [ERROR] message above or log for more details!')
                print(bcolors.BOLD + "[news] Feature validation seuccess!" + bcolors.ENDC)
                del _task, _feature

        heuristicpy = pjoin(nifti_dir, 'code', sesID.strip(string.digits), 'heuristic.py')
        if args.overwrite:
            cmd = "heudiconv --files {:s} -o {:s} -f {:s} -s {:s} -ss {:s} -c dcm2niix -b --overwrite" \
                .format(dicom_files, nifti_dir, heuristicpy, subID, sesID)
        else:
            cmd = "heudiconv --files {:s} -o {:s} -f {:s} -s {:s} -ss {:s} -c dcm2niix -b" \
                .format(dicom_files, nifti_dir, heuristicpy, subID, sesID)
        print("[news] Processing sub-{:s}/ses-{:s}".format(subID, sesID))
        logging.info("Heudiconv command (overwrite): {:s}".format(cmd))
        print("command: " + bcolors.BOLD + "{:s}".format(cmd) + bcolors.ENDC)
        if not args.preview:
            runcmd(cmd, timeout=3600)

    # fill fmap json files
    print(bcolors.BOLD_NODE + "[Node] .json Filling up..." + bcolors.ENDC)

    if args.subject:
        subjects = ["{:02d}".format(int(_)) for _ in args.subject]
    else:
        subjects = [_.replace("sub-", "") for _ in os.listdir(nifti_dir) if "sub-" in _]

    sessions = {name: [] for name in (subjects)}  # {sub : [session]}
    jsonfiles = {name: {sesname: [] for sesname in sessions[name]} for name in
                 (subjects)}  # {sub:{session:[files]}}
    intendedfornii = {name: {sesname: [] for sesname in sessions[name]} for name in
                      (subjects)}  # {sub:{session:[files]}}

    # collect .json files waiting to fill & .nii.gz filenames
    if not args.preview:
        print("[news] collect .json files waiting to fill & .nii.gz filenames")
        for subname in sessions.keys():
            # get all the sessions under a subject
            subpth = pjoin(nifti_dir, 'sub-%s' % (subname))
            sessions[subname] = os.listdir(subpth)
            # collect jsonfiles & values
            for fold in sessions[subname]:
                # path preparation
                sesspth = pjoin(nifti_dir, subpth, fold)
                fmappth = pjoin(sesspth, 'fmap')
                funcpth = pjoin(sesspth, 'func')
                # if fmap exist then clollect
                if os.path.exists(fmappth):
                    jsonfiles[subname][fold] = [file for file in os.listdir(fmappth) if '.json' in file]
                    # the file path must be the relative path to sub- folder
                    intendedfornii[subname][fold] = ['%s/func/%s' % (fold, file) for file in os.listdir(funcpth) if
                                                     '.nii.gz' in file]

    # write key:value for each json
        print("[news] write key:value for each json")
        for sub, ses_fold in jsonfiles.items():
            for ses, files in ses_fold.items():
                for file in files:
                    # file path
                    file_path = os.path.join(nifti_dir, 'sub-%s/%s' % (sub, ses), 'fmap', file)
                    
                    # change mode
                    chmod_cmd = ' '.join(['chmod', '755', file_path])
                    runcmd(chmod_cmd, timeout=3600)

                    # load in & add IntendedFor
                    with open(file_path, 'r') as datafile:
                        data = json.load(datafile)
                    data['IntendedFor'] = intendedfornii[sub][ses]

                    # save out
                    with open(file_path, 'w') as datafile:
                        json.dump(data, datafile)
            print('[news] fill up json for %s ... done!' % sub)

    print('Log is saved in {}.'.format(pjoin(nifti_dir, 'data2bids.log')))
def _run_cmake_build(cmake_build_dir, args):
    """
    Run the Cmake build

    :param cmake_build_dir: Base directory for Cmake build
    :param args: Command-line arguments
    """

    cmake_cmd = ["cmake", "../", "-GUnix Makefiles"]

    if args.suite == "IPC":
        cmake_cmd.append("-DTARGET=" + PSA_API_TARGETS.get(args.mcu)[2])
        cmake_cmd.append("-DPLATFORM_PSA_ISOLATION_LEVEL=1")
        cmake_cmd.append("-DSP_HEAP_MEM_SUPP=0")
    else:
        cmake_cmd.append("-DTARGET=" + PSA_API_TARGETS.get(args.mcu)[1])

    cmake_cmd.append("-DCPU_ARCH=" + PSA_API_TARGETS.get(args.mcu)[0])

    cmake_cmd.append("-DSUITE=" + args.suite)

    if args.verbose:
        cmake_cmd.append("-DVERBOSE=" + str(args.verbose))

    if args.include:
        cmake_cmd.append("-DPSA_INCLUDE_PATHS=" + args.include)
    else:
        # Take in defaults
        suite_include = join(
            TF_M_BUILD_DIR, "trusted-firmware-m", "interface", "include"
        )

        if args.suite == "CRYPTO":
            crypto_include = join(
                TF_M_BUILD_DIR, "psa-arch-tests", "mbed-crypto", "include"
            )
            cmake_cmd.append("-DPSA_INCLUDE_PATHS=" + crypto_include)
        elif args.suite == "IPC":
            manifest_include = join(
                TF_M_BUILD_DIR,
                "psa-arch-tests",
                "api-tests",
                "platform",
                "manifests",
            )
            cmake_cmd.append(
                "-DPSA_INCLUDE_PATHS=" + manifest_include + ";" + suite_include
            )
        else:
            # Applicable for INITIAL_ATTESTATION, INTERNAL_TRUSTED_STORAGE
            # PROTECTED_STORAGE and STORAGE suites.
            cmake_cmd.append("-DPSA_INCLUDE_PATHS=" + suite_include)

    if args.range:
        cmake_cmd.append("-DSUITE_TEST_RANGE=" + args.range)

    retcode = run_cmd_output_realtime(cmake_cmd, cmake_build_dir)
    if retcode:
        msg = "Cmake configure failed for target %s using toolchain %s" % (
            args.mcu,
            args.toolchain,
        )
        logging.critical(msg)
        sys.exit(1)

    cmake_cmd = ["cmake", "--build", "."]

    retcode = run_cmd_output_realtime(cmake_cmd, cmake_build_dir)
    if retcode:
        msg = "Cmake build failed for target %s using toolchain %s" % (
            args.mcu,
            args.toolchain,
        )
        logging.critical(msg)
        sys.exit(1)
Example #51
0
def main(args=None):
    args = parser.parse_args(args)
    config.read(args.config)

    try:
        logging.basicConfig(
            format="%(levelname)s:%(message)s", level=config["logging"]["log-level"]
        )
    except ValueError:
        logging.warning(
            f"Incorrect log-level specified: {config['logging']['log-level']}. "
            "Falling back to INFO."
        )
        logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)

    try:
        realm = Realm[config["api"]["realm"]]
    except KeyError:
        logging.critical(
            f"Configured realm \"{config['api']['realm']}\" is unknown. "
            f"Choose one of: {', '.join(Realm.__members__.keys())}"
        )
        sys.exit(1)

    if not len(config["accounts"]):
        logging.warning(
            "There are no configured accounts, nothing to do. "
            'Check the "accounts" section in the config file.'
        )

    try:
        account_data = account_info(
            realm,
            config["api"]["application-id"],
            list(config["accounts"].keys()),
        ).values()
    except ValueError as e:
        logging.critical(e)
        sys.exit(1)

    flat_account_data = [
        timestamps_to_datetime(flatten(data, strip=True), keys=TIME_FIELDS)
        for data in account_data
    ]

    rows = [
        {column.name: data[column.name] for column in statistics.columns}
        for data in flat_account_data
    ]

    try:
        with sa.create_engine(config["db"]["url"]).connect() as conn:
            changed = False
            for row in rows:
                logging.info(
                    f"Attempting insert {row['nickname']} @ {row['updated_at']}"
                )
                try:
                    conn.execute(
                        statistics.insert()
                        .values(row)
                        .compile(dialect=postgresql.dialect())
                    )
                    changed = True
                    logging.info("Insert successful")
                except IntegrityError as e:
                    if not isinstance(e.orig, psycopg2.errors.UniqueViolation):
                        raise e from e
                    logging.info(f"Skipping, record exists")

            if config["plots"] and changed:
                logging.info("Change detected, updating plots")
                df = pd.read_sql(
                    "SELECT * from statistics ORDER BY updated_at",
                    conn,
                    index_col=["account_id", "updated_at"],
                )
                plt.style.use("Solarize_Light2")
                for path, interval_str in config["plots"].items():
                    if interval_str:
                        figure = create_plot(
                            df[
                                df.index.get_level_values(1)
                                > (
                                    pd.Timestamp.now(tz="UTC")
                                    - pd.Timedelta(interval_str)
                                )
                            ]
                        )
                    else:
                        figure = create_plot(df)
                    logging.info(f"Saving {path}")
                    figure.savefig(path)

    except sa.exc.OperationalError as e:
        logging.critical(f"Invalid database URL: {config['db']['url']} ({e})")
        sys.exit(1)
    except sa.exc.NoSuchModuleError:
        logging.critical(f"Invalid protocol in database URL: {config['db']['url']}")
        sys.exit(1)
Example #52
0
######################################################
# In-File Config, prework
######################################################
#Read Config
config = ConfigParser()
configpath = os.path.join(os.path.dirname(__file__), '..', 'config')
try:
    config.read(
        os.path.join(os.path.dirname(__file__), '..', 'config',
                     'config_main.ini'))
except Exception:
    try:
        shutil.copyfile(os.path.join(configpath, 'config_main.ini.back'),
                        os.path.join(configpath, 'config_main.ini'))
    except Exception:
        logging.critical("""config_main.ini not found, 
            backup couldn't be restored, aborting""")
    else:
        logging.warn("""copied config_main.ini.bak to config_main.ini 
            because python couldn't find a valid config""")
else:
    logging.debug("read config_main.ini successfully")
# format color dictionary
colors_raw = config._sections['colors']
colors = {}
for color in colors_raw:
    colors[color] = chr(0x1b) + colors_raw[color]


######################################################
# Functions, Methods (Code)
######################################################
Example #53
0
def main():
    argparser = argparse.ArgumentParser(description='Print APK size metrics.')
    argparser.add_argument(
        '--min-pak-resource-size',
        type=int,
        default=20 * 1024,
        help='Minimum byte size of displayed pak resources.')
    argparser.add_argument('--chromium-output-directory',
                           dest='out_dir',
                           help='Location of the build artifacts.')
    argparser.add_argument('--chartjson',
                           action='store_true',
                           help='Sets output mode to chartjson.')
    argparser.add_argument('--output-dir',
                           default='.',
                           help='Directory to save chartjson to.')
    argparser.add_argument(
        '--dump-static-initializers',
        action='store_true',
        dest='dump_sis',
        help='Run dump-static-initializers.py to get the list'
        'of static initializers (slow).')
    argparser.add_argument('--loadable_module',
                           action='append',
                           help='Use for libraries added via loadable_modules')
    argparser.add_argument(
        '--estimate-patch-size',
        action='store_true',
        help='Include patch size estimates. Useful for perf '
        'builders where a reference APK is available but adds '
        '~3 mins to run time.')
    argparser.add_argument(
        '--reference-apk-builder',
        default=apk_downloader.DEFAULT_BUILDER,
        help='Builder name to use for reference APK for patch '
        'size estimates.')
    argparser.add_argument('--reference-apk-bucket',
                           default=apk_downloader.DEFAULT_BUCKET,
                           help='Storage bucket holding reference APKs.')
    argparser.add_argument('apk', help='APK file path.')
    args = argparser.parse_args()

    chartjson = _BASE_CHART.copy() if args.chartjson else None
    out_dir, tool_prefix = _ConfigOutDirAndToolsPrefix(args.out_dir)
    if args.dump_sis and not out_dir:
        argparser.error(
            '--dump-static-initializers requires --chromium-output-directory')

    # Do not add any new metrics without also documenting them in:
    # //docs/speed/binary_size/metrics.md.

    PrintApkAnalysis(args.apk, tool_prefix, out_dir, chartjson=chartjson)
    _PrintDexAnalysis(args.apk, chartjson=chartjson)

    ignored_libs = args.loadable_module if args.loadable_module else []

    si_count = AnalyzeStaticInitializers(args.apk, tool_prefix, args.dump_sis,
                                         out_dir, ignored_libs)
    perf_tests_results_helper.ReportPerfResult(chartjson,
                                               'StaticInitializersCount',
                                               'count', si_count, 'count')

    if args.estimate_patch_size:
        _PrintPatchSizeEstimate(args.apk,
                                args.reference_apk_builder,
                                args.reference_apk_bucket,
                                chartjson=chartjson)
    if chartjson:
        results_path = os.path.join(args.output_dir, 'results-chart.json')
        logging.critical('Dumping json to %s', results_path)
        with open(results_path, 'w') as json_file:
            json.dump(chartjson, json_file)
Example #54
0
    def main(self, cfg=None):
        if cfg == None:
            cfg = self.cfg
        else:
            self.__init__(cfg)

        try:
            # Added to file read to catch multiple columns with same name which causes second column to overwrite first. - GDB
            file_handle = open(cfg["input"], 'rU')
            csv_reader = csv.reader(file_handle)
            l = csv_reader.next()
            if len(l) > len(set(l)):
                logging.error(l)
                raise KeyError(
                    "Input file has multiple columns of the same name.  Please create unique columns and rerun."
                )
                # exit(1)
            else:
                file_handle.seek(0)
                infile = csv.DictReader(file_handle)
            # infile = csv.DictReader(open(args.filename,'rU'))  # Old File Read - gdb
        except IOError:
            logging.critical("ERROR: Input file not found.")
            raise
            # exit(1)

        infile.fieldnames = [
            f.decode('unicode_escape').encode('ascii', 'ignore')
            for f in infile.fieldnames
        ]  # remove unicode - gdb 170130

        for f in infile.fieldnames:
            if f not in self.sfields:
                if f != "repeat":
                    logging.warning(
                        "column will not be used: %s. May be inaccurate for 'plus' columns.",
                        f)
        if 'plus.analyst' not in infile.fieldnames:
            logging.warning(
                "the optional plus.analyst field is not found in the source document"
            )
        if 'source_id' not in infile.fieldnames:
            logging.warning(
                "the optional source_id field is not found in the source document"
            )

        row = 0
        for incident in infile:
            row += 1
            # have to look for white-space only and remove it
            try:
                incident = {x: incident[x].strip() for x in incident}
            except AttributeError as e:
                logging.error(
                    "Error removing white space on row {1}.".format(row))
                raise e

            if 'incident_id' in incident:
                iid = incident['incident_id']
            else:
                iid = "srcrow_" + str(row)
            logging.debug("Starting incident {0} on row {1}.".format(iid, row))
            # logging.warning("This includes the row number")
            repeat = 1
            logging.info("-----> parsing incident %s", iid)
            if incident.has_key('repeat'):
                if incident['repeat'].lower(
                ) == "ignore" or incident['repeat'] == "0":
                    logging.info(
                        "Skipping row %s because 'repeat' is either 'ignore' or '0'.",
                        iid)
                    continue
                repeat = self.isnum(incident['repeat'])
                if not repeat:
                    repeat = 1
            if incident.has_key('security_incident'):
                if incident['security_incident'].lower() == "no":
                    logging.info(
                        "Skipping row %s because security_incident is 'no'.",
                        iid)
                    continue
            outjson = self.convertCSV(incident)

            while repeat > 0:
                if 'plus' not in outjson:
                    outjson['plus'] = {}
                outjson['plus']['master_id'] = str(uuid.uuid4()).upper()
                yield iid, outjson
                # outjson['incident_id'] = str(uuid.uuid4()).upper()     ### HERE
                # outjson['plus']['master_id'] = outjson['incident_id']  ###
                repeat -= 1
                if repeat > 0:
                    logging.info("Repeating %s more times on %s", repeat, iid)
Example #55
0
    def __call__(self, doc):
        """
        Overlays entity annotations over tokens in a Doc object. Requires that tokens in the Doc have the custom
        'gold_annotation_file' and 'file_name' extension.
        :param doc: a spaCy Doc object.
        :return: the same Doc object, but it now has 'gold_label' annotations.
        """

        file_name = doc._.file_name
        logging.debug(f"{file_name}: Called GoldAnnotator Component")

        failed_overlay_count = 0
        failed_identifying_span_count = 0

        # check if gold annotation file path has been set.
        if not hasattr(doc._, 'gold_annotation_file'):
            logging.warning(
                f"doc._.gold_annotation_file not defined for {file_name}; "
                f"it will not be possible to fit a model with this Doc")
            return doc

        gold_annotations = Annotations(doc._.gold_annotation_file)

        for ent in gold_annotations:
            if ent.start > ent.end:
                logging.critical(
                    f"{file_name}: Broken annotation - start is greater than end: {ent}"
                )
                continue

            span = doc.char_span(ent.start, ent.end)

            if span is None:
                failed_overlay_count += 1
                failed_identifying_span_count += 1

            fixed_span = self.find_span(ent.start, ent.end, doc)
            if fixed_span is not None:
                if span is None:
                    logging.warning(
                        f"{file_name}: Fixed {ent} into: {fixed_span.text}")
                    failed_identifying_span_count -= 1

                for token in fixed_span:
                    if ent.tag in self.labels or not self.labels:
                        token._.set('gold_label', ent.tag)

            else:
                # Annotation was not able to be fixed, it will be ignored - this is bad in evaluation.
                logging.warning(
                    f"{file_name}: Could not fix annotation: {ent}")

        logging.warning(
            f"{file_name}: Number of failed annotation overlays with current tokenizer: {failed_overlay_count}"
        )

        if failed_overlay_count > .3 * len(gold_annotations):
            logging.critical(
                f"{file_name}: More than 30% of annotations failed to overlay")

        return doc
Example #56
0
    def dump(self, remoteName, remoteHost):
        """Dumps the list of endpoints registered with the mapper
        listening at addr. remoteName is a valid host name or IP
        address in string format.
        """

        logging.info('Retrieving endpoint list from %s' % remoteName)

        entries = []

        stringbinding = self.KNOWN_PROTOCOLS[
            self.__port]['bindstr'] % remoteName
        logging.debug('StringBinding %s' % stringbinding)
        rpctransport = transport.DCERPCTransportFactory(stringbinding)
        rpctransport.set_dport(self.__port)

        if self.KNOWN_PROTOCOLS[self.__port]['set_host']:
            rpctransport.setRemoteHost(remoteHost)

        if hasattr(rpctransport, 'set_credentials'):
            # This method exists only for selected protocol sequences.
            rpctransport.set_credentials(self.__username, self.__password,
                                         self.__domain, self.__lmhash,
                                         self.__nthash)

        try:
            entries = self.__fetchList(rpctransport)
        except Exception as e:
            logging.critical('Protocol failed: %s' % e)

        # Display results.

        endpoints = {}
        # Let's groups the UUIDS
        for entry in entries:
            binding = epm.PrintStringBinding(entry['tower']['Floors'],
                                             rpctransport.getRemoteHost())
            tmpUUID = str(entry['tower']['Floors'][0])
            if (tmpUUID in endpoints) is not True:
                endpoints[tmpUUID] = {}
                endpoints[tmpUUID]['Bindings'] = list()
            if uuid.uuidtup_to_bin(
                    uuid.string_to_uuidtup(tmpUUID))[:18] in epm.KNOWN_UUIDS:
                endpoints[tmpUUID]['EXE'] = epm.KNOWN_UUIDS[
                    uuid.uuidtup_to_bin(uuid.string_to_uuidtup(tmpUUID))[:18]]
            else:
                endpoints[tmpUUID]['EXE'] = 'N/A'
            endpoints[tmpUUID]['annotation'] = entry['annotation'][:-1].decode(
                'utf-8')
            endpoints[tmpUUID]['Bindings'].append(binding)

            if tmpUUID[:36] in epm.KNOWN_PROTOCOLS:
                endpoints[tmpUUID]['Protocol'] = epm.KNOWN_PROTOCOLS[
                    tmpUUID[:36]]
            else:
                endpoints[tmpUUID]['Protocol'] = "N/A"
            #print "Transfer Syntax: %s" % entry['Tower']['Floors'][1]

        for endpoint in list(endpoints.keys()):
            print("Protocol: %s " % endpoints[endpoint]['Protocol'])
            print("Provider: %s " % endpoints[endpoint]['EXE'])
            print("UUID    : %s %s" %
                  (endpoint, endpoints[endpoint]['annotation']))
            print("Bindings: ")
            for binding in endpoints[endpoint]['Bindings']:
                print("          %s" % binding)
            print("")

        if entries:
            num = len(entries)
            if 1 == num:
                logging.info('Received one endpoint.')
            else:
                logging.info('Received %d endpoints.' % num)
        else:
            logging.info('No endpoints found.')
Example #57
0
        tgt_index_fpath = "{}.{}.ann".format(args.tgt, args.emb)
        tgt_index.load(tgt_index_fpath)
        logging.info("Loaded {}".format(tgt_index_fpath))

    args.src_index = src_index
    args.tgt_index = tgt_index
    args.w2v = w2v

    # Some checks of the size
    try:
        if src_index is not None:
            assert src_index.get_n_items() > 0
        if tgt_index is not None:
            assert tgt_index.get_n_items() > 0
    except AssertionError:
        logging.critical("Error loading Annoy indices. SRC size: {} TGT size: {}".format(
                           src_index.get_n_items(), tgt_index.get_n_items()))

    logging.warning("Starting alignment of {} source and {} target documents...".format(
        src_index.get_n_items() if src_index is not None else "LAZY", tgt_index.get_n_items() if tgt_index is not None else "LAZY"))
    logging.warning("Will use the {} metric for refinement.".format(args.refine))
    start_time = time.time()
    batch_time = time.time()

    args_dict = vars(args).copy()
    for key in ["emb", "level", "annoy_metric", "vec_size", "lazy_source", "lazy_target"]:
        del args_dict[key]

    aligner = None
    try:
        assert args.level in SUPPORTED_ALIGN_LEVELS
    except AssertionError as e:
def do_log_example():
    logging.debug('Debug Info')
    logging.info('Info')
    logging.warning('Warning')
    logging.error('Error')
    logging.critical('Critical Error')
Example #59
0
import os, time
import subprocess, select
import nexmo

# Initialization
log = logging.getLogger('sshalert')
log.setLevel(logging.INFO)

try:
    source_phone_number = os.getenv("SOURCE_PHONE_NUMBER")
    target_phone_number = os.getenv("TARGET_PHONE_NUMBER")
    nexmo_key = os.getenv("NEXMO_KEY")
    nexmo_secret = os.getenv("NEXMO_SECRET")
except:
    logging.critical(
        "ERROR: Have you exported all required environment variables? (TARGET_PHONE_NUMBER, NEXMO_KEY, NEXMO_SECRET)"
    )
    exit(1)

# Initialize the nexmo client
nexmo_client = nexmo.Client(key=nexmo_key, secret=nexmo_secret)


def poll_logfile(filename):
    """
    Polls a logfile for sudo commands or ssh logins.
    """
    f = subprocess.Popen(["tail", "-F", "-n", "0", filename],
                         encoding="utf8",
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
Example #60
0
 def blackHole(self, Ip):
     logging.critical("{} Has Been Block By IpTables!".format(Ip))
     # 屏蔽IP
     os.system("iptables -I INPUT -s {} -j DROP".format(Ip))
     return False