コード例 #1
0
    def _get_lib_depends(self, lib_dict, lib_list):
        for lib_name in listify(lib_list):
            lib_keys = listify(self._get_matching_lib_keys(lib_name))
            if len(lib_keys) == 1:
                # Matched internal library
                lib_key = lib_keys[0]
                # Search for the found library dependencies
                libs, cpp_paths, lib_paths = self._libs[lib_key][
                    'lib_obj'].GetLib()
                lib_dict['CPPPATH'].extend(cpp_paths)
                lib_dict['LIBS'].extend(libs)
                lib_dict['LIBPATH'].extend(lib_paths)

                # Get the dependencies
                self._get_lib_depends(lib_dict,
                                      self._libs[lib_key]['lib_deps'])
            elif len(lib_keys) > 1:
                # Matched multiple internal libraries - probably bad!
                msg = "Library identifier \'%s\' matched %d libraries (%s). Please use a fully qualified identifier instead!" % (
                    lib_name, len(lib_keys), ', '.join(lib_keys))
                Exit(msg)
            else:  # empty lib_keys
                msg = "Library identifier \'%s\' didn\'t match any library. Is it a typo?" % (
                    lib_name)
                Exit(msg)
コード例 #2
0
    def _build_alias_executable(self, alias, prog_name, module_name,
                                prog_version, sources, link_libs, *args,
                                **kwargs):
        lib_nodes = []
        # Store the kwarg flags
        cpp_paths = listify(kwargs.pop('CPPPATH', list()))
        ext_libs = listify(kwargs.pop('LIBS', list()))
        lib_paths = listify(kwargs.pop('LIBPATH', list()))

        # Get the dependencies for the executable
        libs_dict = self._shared_libs.GetLibraries(link_libs)

        # Extend the stored flags with the lib dependencies
        cpp_paths.extend(libs_dict['CPPPATH'])
        ext_libs.extend(libs_dict['LIBS'])
        lib_paths.extend(libs_dict['LIBPATH'])

        # Copy back the dependencies to the kwargs
        kwargs['CPPPATH'] = cpp_paths
        kwargs['LIBS'] = ext_libs
        kwargs['LIBPATH'] = lib_paths

        exec_path = self.env['BUILD_DIR'] + '/'
        obj_nodes = (self._build_default_objects(module_name, sources))
        prog_nodes = self.env.Program(exec_path + prog_name, obj_nodes,
                                      **kwargs)
        # Create an alias target for the lib
        #if self._target in BUILD_TARGETS:
        self.createAlias(alias, prog_nodes)
        self.createAlias(module_name, prog_nodes)
コード例 #3
0
    def update_task(self, task, properties=None, event_name=None,
                    event_type=EVENT_TYPE_INFO, message=None, details=None):
        """
            Updates the specified properties of the specified MBSTask object
        """
        task_doc = task.to_document()
        q = {
            "_id": task.id
        }

        u = {}
        # construct $set operator
        if properties:
            properties = listify(properties)
            u["$set"] = {}
            for prop in properties:
                u["$set"][prop] = task_doc.get(prop)


        # construct the $push

        if event_name or message:
            log_entry = task.log_event(name=event_name, event_type=event_type,
                                       message=message, details=details)
            u["$push"] = {"logs": log_entry.to_document()}


        self.update(spec=q, document=u)
コード例 #4
0
    def _build_alias_lib(self, alias, lib_name, module_name, module_version,
                         sources, headers, libs, *args, **kwargs):
        # TODO: Append the module version to the key, modifiy the querry to be able to search for specific version, or latest version (dont care version)
        # Store resulting library node in shared dictionary
        obj_targets = self._build_default_objects(module_name, sources)
        # Create a lib node.
        lib_node = self.env.Library(target=lib_name,
                                    source=obj_targets,
                                    *args,
                                    **kwargs)
        # Create unique library key from module and library name
        key = LibraryList.CreateLibraryKey(module_name, lib_name)
        # Store the library key in shared libs list
        self._shared_libs.add(key, InternalLibrary(lib_name, lib_node), libs)

        # Create an alias target for the lib
        self.createAlias(alias, lib_node)
        # Make sure headers are list
        headers = listify(headers)
        if len(headers) > 0:
            # Remove redundant elements
            headers = remove_redundant(headers)
            # Get the isntall directory for the active variant
            inc_path = self.env['INC_DIR'] + '/'
            # Install every header file to the shared <INC_DIR> keep the folder hierarchy for the header files
            inc_node = [
                self.env.Install(
                    self.createNode([os.path.split(inc_path + h)[0]]), h)
                for h in headers
            ]
            # Create alias targets for the header nodes
            self.createAlias(alias, inc_node)
            self.createAlias('install', inc_node)
コード例 #5
0
    def update_task(self, task, properties=None, event_name=None,
                    event_type=EventType.INFO, message=None, details=None,
                    error_code=None,
                    **update_kwargs):
        """
            Updates the specified properties of the specified MBSTask object
        """
        task_doc = task.to_document()
        q = {
            "_id": task.id
        }

        u = {}

        # log the event as needed
        if event_name or message:
            log_entry = task.log_event(name=event_name, event_type=event_type,message=message, details=details,
                                       error_code=error_code)
            # push if "logs" property is not included
            if not (properties and "logs" in properties):
                u["$push"] = {"logs": log_entry.to_document()}

        # construct $set operator
        if properties:
            properties = listify(properties)
            u["$set"] = {}
            for prop in properties:
                u["$set"][prop] = task_doc.get(prop)

        self.update(spec=q, document=u, **update_kwargs)
コード例 #6
0
 def _build_default_objects(self, module, sources):
     # Make sure headers are list
     sources = listify(sources)
     # Remove redundant elements
     sources = remove_redundant(sources)
     obj_targets = []
     if len(sources) > 0:
         obj_path = self.env['OBJ_PATH'] + '/' + module + '/'
         # TODO: Object files maybe should placed into $OBJ_DIR/module/path/src.o. Now it will be just placed to: $OBJ_DIR/module/src.o
         for item in sources:
             # '#' sign here is mandatory, otherwise the include path is wrong
             # TODO: Why this is not working without the '#' sign?
             # Create a node from the include directory
             inc_dir = [self.createNode(['#' + module])
                        ] + [self.createNode([self.env['CPPPATH']])]
             # Get the base fileanem from the hive source
             base_item = os.path.splitext(os.path.basename(item))[
                 0]  # strip relative path part and extension
             # TODO: Cannot create nodes from item,s Why?
             obj_node = self.env.Object(target=obj_path + base_item,
                                        source=item,
                                        CPPPATH=inc_dir)
             # Append the object node to targets
             obj_targets.append(obj_node)
             # Create an alias target from object node
             self.createAlias(module + '/' + item, obj_node)
     return obj_targets
コード例 #7
0
    def update_task(self,
                    task,
                    properties=None,
                    event_name=None,
                    event_type=EVENT_TYPE_INFO,
                    message=None,
                    details=None):
        """
            Updates the specified properties of the specified MBSTask object
        """
        task_doc = task.to_document()
        q = {"_id": task.id}

        u = {}
        # construct $set operator
        if properties:
            properties = listify(properties)
            u["$set"] = {}
            for prop in properties:
                u["$set"][prop] = task_doc.get(prop)

        # construct the $push

        if event_name or message:
            log_entry = task.log_event(name=event_name,
                                       event_type=event_type,
                                       message=message,
                                       details=details)
            u["$push"] = {"logs": log_entry.to_document()}

        self.update(spec=q, document=u)
コード例 #8
0
 def __init__(self,
              name,
              libs=list(),
              include_paths=list(),
              lib_paths=list()):
     """Initialize external library instance.
     @param lib_name       Symbolic name of library (or library-group)
     @param libs           Identifiers of libraries to link with
                           (if not specified, `lib_name` is used)
     @param include_paths  Additional include search paths
     @param lib_paths      Additional library search paths
     """
     super(ExternalLibrary, self).__init__(name)
     self._libs = listify(libs) if len(libs) else [name]
     self._cpp_paths = listify(include_paths)
     self._lib_paths = listify(lib_paths)
コード例 #9
0
def scan_modules(modules_list, max_depth, file_skip_list):
    _CACHED_MODULES = listify(modules_list)

    def modules():
        """Generate modules to build.
        Each module is a directory with a SConscript file.
        """
        if not _CACHED_MODULES:
            # Build the cache
            def build_dir_skipper(dirpath):
                """Return True if `dirpath` is the build base dir."""
                return os.path.normpath('out') == os.path.normpath(dirpath)

            def hidden_dir_skipper(dirpath):
                """Return True if `dirpath` last dir component begins with '.'"""
                last_dir = os.path.basename(dirpath)
                return last_dir.startswith('.')

            for module_path in module_dirs_generator(
                    max_depth=max_depth,
                    followlinks=False,
                    dir_skip_list=[build_dir_skipper, hidden_dir_skipper],
                    file_skip_list=file_skip_list):
                _CACHED_MODULES.append(module_path)
        # Yield modules from cache
        for module in _CACHED_MODULES:
            yield module

    return modules
コード例 #10
0
ファイル: sample_mori.py プロジェクト: dtmori/HWW
 def __init__(self, name, title, files, stype, cfg, trees,
              cuts = DEFAULT_TRUE, weights = '1.0', scale = 1.0, lumi = 1.0,
              channel = '', roottree = 'HWWTree'):
     assert(type(name) is str)
     assert(type(title) is str)
     assert(type(cuts) is str)
     assert(type(weights) is str)
     assert(type(scale) is float)
     self.__root = roottree # << the root tree (all other trees will be friends)
     self.__trees = None # << list of TChain's controlled by this sample 
     self.__tree_names = utils.uniqify(utils.listify(trees) + [self.__root])
     self.__name = name # << name of the sample 
     self.__title = title # << title (used in legends, &c.)
     self.__cuts = cuts.replace(' ','') # << cuts that define sample selection
     self.__cuts_base = self.__cuts
     self.__applied_cuts = None
     self.__weights = weights.replace(' ','') # << event weights 
     self.__weights_base = self.__weights
     self.__scale = scale # << product of scale factors
     self.__counters = {} # << counters maintained by this sample
     self.__histograms = {} # << histograms produced by this sample
     self.__internals = [] # << other internal objects to keep 
     self.__locked = False # << whether the sample is mutable (ie., can produce histograms)
     self.__selections = {} # << named selections of this sample
     self.__uid = utils.uuid()
     samplebase.__init__(self, files, lumi, stype, cfg, channel, title)
コード例 #11
0
ファイル: sample_mori.py プロジェクト: dtmori/HWW
 def configure(self):
     ''' Load trees contained in this sample, set root tree (all other trees are friends) '''
     self.set_style()
     if len(self.__tree_names) == 0:
         self.__locked = True
         return
     trees = { }
     for tname in utils.listify(self.__tree_names): # << load the TChain's using all files in sample
         nfiles = 0
         t = ROOT.TChain(tname, tname)
         ROOT.SetOwnership(t, False)
         for fpattern in self.get_files():
             for fname in glob(os.sep.join([self.get_cfg().base,fpattern])):
                 n = t.Add(fname.replace('//','/'))
                 if n <= 0:
                     raise IOError('[%s] does not exist or does not contain [%s]'%(fname,tname))
                 nfiles += n
         if nfiles != 0:
             trees[tname] = t
         else:
             self.log.info('no files matching patterns [%s] exist, sample not defined'%(str(self.get_files())) )
             self.__locked = True
             return
     trees[self.__root].LoadTree(0)
     self.__trees = trees
     if self.__root not in self.__trees.keys():
         self.log.warning('it seems that [%s] is not in list of input trees'%(self.__root))
         self.__root = self.__tree_names[0]
     for k, v in trees.items():
         if k == self.__root:
             continue
         self.log.verbose('setting tree [%s] as friend to root tree [%s]'%(tname, self.__root))
         trees[self.__root].AddFriend(v)
     return 
コード例 #12
0
    def send_notification(self, subject, message, recipient=None):

        try:

            logger.info("Sending notification email...")
            msg = MIMEText(message.encode('utf-8'), 'plain', 'UTF-8')

            to_address = listify(recipient or self._to_address)
            msg['From'] = self.from_address
            msg['To'] = ",".join(to_address)

            if subject:
                msg['Subject'] = subject


            smtp = smtplib.SMTP(self.smtp_host)
            if (self.smtp_username is not None or
                self.smtp_password is not None):
                smtp.login(self.smtp_username, self.smtp_password)
            smtp.sendmail(self.from_address, to_address, msg.as_string())
            smtp.quit()
            logger.info("Email sent successfully!")
        except Exception, e:
            logger.error("Error while sending email:\n%s" %
                         traceback.format_exc())
コード例 #13
0
    def _resolve_sources(self, sources):
        """
        Add a file, glob pattern or another bundle to this bundle's
        contents.
        """
        sources = listify(sources)

        contents = []

        for source in sources:
            # Glob pattern, so expand into files
            if isinstance(source, str) and '*' in source:
                contents.extend(self._resolve_sources(glob(join(self.static_folder, source))))
            # Nested bundles are cool
            elif type(source) is Bundle:
                contents.append(source)
            # Files are fine; check the static_folder
            elif isfile(realpath(join(self.static_folder, source))):
                contents.append(realpath(join(self.static_folder, source)))
            # Check for direct path
            elif isfile(realpath(source)):
                contents.append(realpath(source))
            else:
                raise Exception('Cannot find file \'%s\'' % source)

        return contents
コード例 #14
0
ファイル: Browser.py プロジェクト: chenhao1215/crawler
    def links(self, page, exclude = False, require = None):
        exclude, require = map(lambda m: make_chain(map(matcher, listify(m)),
                                       merge = lambda returns: reduce(lambda a, b: bool(a) | bool(b), returns, False)),
                               [exclude, require])

        return BeautifulSoup(page).findAll(
            name = 'a',
            href = lambda value: value != None and (require(value) and not exclude(value)))
コード例 #15
0
    def links(self, page, exclude=False, require=None):
        exclude, require = map(
            lambda m: make_chain(map(matcher, listify(m)),
                                 merge=lambda returns: reduce(
                                     lambda a, b: bool(a) | bool(b), returns,
                                     False)), [exclude, require])

        return BeautifulSoup(page).findAll(
            name='a',
            href=lambda value: value != None and
            (require(value) and not exclude(value)))
コード例 #16
0
ファイル: sample_mori.py プロジェクト: dtmori/HWW
 def __init__(self, files, lumi, stype, cfg, chan, title):
     self.log = msg.msglog(title, 'debug')
     if stype not in self.alltypes:
         self.log.error('invalid sample mode: [%s] not one of %s'%(stype, str(self.alltypes)))
         raise RuntimeError
     self.__files = utils.listify(files) # << list of files
     self.__stype = stype # << type of sample: bkg, sig, or dat
     self.__cfg = cfg # << configuration tag object
     self.__lumi = lumi # << luminosity of the sample
     self.__channel = chan # << channel identifier
     self.configure()
コード例 #17
0
ファイル: sample_mori.py プロジェクト: dtmori/HWW
 def __init__(self, samples = [], cfg = None, name = 'samplelist', fromfile = None):
     self.__name = name
     self.__cfg = cfg
     self.__samples = {}
     self.__file = None
     self.log = msg.msglog(name, 'debug')
     if fromfile != None:
         self.load(fromfile)
     else:
         for item in utils.listify(samples):
             self.__samples[item.get_name()] = item
     pass
コード例 #18
0
def combine_scheds(pcts, scheds):
    assert sum(pcts) == 1.
    pcts = tensor([0] + listify(pcts))
    assert torch.all(pcts >= 0)
    pcts = torch.cumsum(pcts, 0)

    def _inner(pos):
        idx = (pos >= pcts).nonzero().max()
        actual_pos = (pos - pcts[idx]) / (pcts[idx + 1] - pcts[idx])
        return scheds[idx](actual_pos)

    return _inner
コード例 #19
0
ファイル: models.py プロジェクト: alexksikes/mlt-play
 def get_document_text(self, ids):
     text = ''
     for doc in self.get_document_by_ids(ids):
         for f in self.more_like_this_params['fields']:
             if f not in doc:
                 continue
             doc[f] = utils.listify(doc[f])
             # keep intact for tagger analyzer
             if f.endswith('.terms') or f.endswith('.raw'):
                 text += ' ' + ' '.join('"%s"' % term for term in doc[f])
             else:
                 text += ' ' + ' '.join(doc[f])
     return text.strip()
コード例 #20
0
ファイル: bsets.py プロジェクト: alexksikes/SimSearch
    def get_detailed_scores(self, item_ids, query_item_ids=None, max_terms=20):
        """Returns detailed statistics about the matched items.

        This will assume the same items previously queried unless otherwise
        specified by 'query_item_ids'.
        """
        item_ids = utils.listify(item_ids)

        logger.info('Computing detailed scores ...')
        scores = self._compute_detailed_scores(item_ids, query_item_ids, max_terms)
        
        self._update_time_taken()
        return scores
コード例 #21
0
ファイル: models.py プロジェクト: humanlongevity/tredparse
 def sparsify(self, P, epsilon=SMALL_VALUE):
     """
     Returns the sparsified distribution, anything smaller than epsilon is
     considered as zero and NOT recorded.
     """
     Z = {}
     total = sum(v for v in P.values())
     for k, v in P.items():
         if v < SMALL_VALUE:
             continue
         k = [x / self.period for x in listify(k)]
         k = ",".join(str(x) for x in k)
         Z[k] = v / total
     return Z
コード例 #22
0
ファイル: repository.py プロジェクト: enzolamzo/mongoctl
def config_lookup_cluster_by_server(server, lookup_type=LOOKUP_TYPE_ANY):
    clusters = get_configured_clusters()
    lookup_type = listify(lookup_type)

    for t in lookup_type:
        result = None
        if t == LOOKUP_TYPE_MEMBER:
            result = filter(lambda c: c.has_member_server(server), clusters.values())
        elif t == LOOKUP_TYPE_CONFIG_SVR:
            result = filter(lambda c: cluster_has_config_server(c, server), clusters.values())
        elif t == LOOKUP_TYPE_SHARDS:
            result = filter(lambda c: cluster_has_shard(c, server), clusters.values())
        if result:
            return result[0]
コード例 #23
0
ファイル: bsets.py プロジェクト: wsgan001/SimSearch
    def get_detailed_scores(self, item_ids, query_item_ids=None, max_terms=20):
        """Returns detailed statistics about the matched items.

        This will assume the same items previously queried unless otherwise
        specified by 'query_item_ids'.
        """
        item_ids = utils.listify(item_ids)

        logger.info('Computing detailed scores ...')
        scores = self._compute_detailed_scores(item_ids, query_item_ids,
                                               max_terms)

        self._update_time_taken()
        return scores
コード例 #24
0
ファイル: bsets.py プロジェクト: wsgan001/SimSearch
    def query(self, item_ids, max_results=100):
        """Queries the given computed against the given item ids.
        """
        item_ids = utils.listify(item_ids)
        if not self.is_valid_query(item_ids):
            return self.empty_results

        logger.info('Computing the query vector ...')
        self._make_query_vector()
        logger.info('Computing log scores ...')
        self._compute_scores()
        logger.info('Get the top %s log scores ...', max_results)
        self._order_indexes_by_scores(max_results)

        return self.results
コード例 #25
0
ファイル: bsets.py プロジェクト: alexksikes/SimSearch
    def query(self, item_ids, max_results=100):
        """Queries the given computed against the given item ids.
        """
        item_ids = utils.listify(item_ids)
        if not self.is_valid_query(item_ids):
            return self.empty_results

        logger.info('Computing the query vector ...')
        self._make_query_vector()
        logger.info('Computing log scores ...')
        self._compute_scores()
        logger.info('Get the top %s log scores ...', max_results)
        self._order_indexes_by_scores(max_results)

        return self.results
コード例 #26
0
    def __init__(self,
                 wrapper,
                 data,
                 loss_func,
                 lr=1e-2,
                 cbs=None,
                 cb_funcs=None):
        self.wrapper, self.data, self.loss_func, self.lr = wrapper, data, loss_func, lr
        self.print, self.logger, self.opt = False, print, None

        self.model = self.wrapper.model
        self.cbs = []
        self.add_cb(TrainEvalCallback())
        self.add_cbs(cbs)
        self.add_cbs(cbf() for cbf in listify(cb_funcs))
コード例 #27
0
ファイル: repository.py プロジェクト: enzolamzo/mongoctl
def db_lookup_cluster_by_server(server, lookup_type=LOOKUP_TYPE_ANY):
    cluster_collection = get_mongoctl_cluster_db_collection()
    lookup_type = listify(lookup_type)
    type_query = []
    for t in lookup_type:
        prop_query = {"%s.server.$id" % t: server.id}
        type_query.append(prop_query)

    query = {"$or": type_query}

    cluster_doc = cluster_collection.find_one(query)

    if cluster_doc is not None:
        return new_cluster(cluster_doc)
    else:
        return None
コード例 #28
0
ファイル: repository.py プロジェクト: zenglzh/mongoctl
def db_lookup_cluster_by_server(server, lookup_type=LOOKUP_TYPE_ANY):
    cluster_collection = get_mongoctl_cluster_db_collection()
    lookup_type = listify(lookup_type)
    type_query = []
    for t in lookup_type:
        prop_query = {"%s.server.$id" % t: server.id}
        type_query.append(prop_query)

    query = {"$or": type_query}

    cluster_doc = cluster_collection.find_one(query)

    if cluster_doc is not None:
        return new_cluster(cluster_doc)
    else:
        return None
コード例 #29
0
    def update_task(self,
                    task,
                    properties=None,
                    event_name=None,
                    event_type=EventType.INFO,
                    message=None,
                    details=None,
                    error_code=None,
                    **update_kwargs):
        """
            Updates the specified properties of the specified MBSTask object
        """
        task_doc = task.to_document()
        q = {"_id": task.id}

        u = {}

        # log the event as needed
        if event_name or message:
            log_entry = task.log_event(name=event_name,
                                       event_type=event_type,
                                       message=message,
                                       details=details,
                                       error_code=error_code)
            # push if "logs" property is not included
            if not (properties and "logs" in properties):
                u["$push"] = {"logs": log_entry.to_document()}

        # construct $set operator
        if properties:
            properties = listify(properties)
            u["$set"] = {}
            for prop in properties:
                u["$set"][prop] = task_doc.get(prop)

        if not u or ("$set" not in u and "$push" not in u):
            import mbs
            import notification.handler
            mbs.get_mbs().notifications.send_event_notification(
                "BAD UPDATE",
                "BAD UPDATE for task %s: %s , %s" %
                (task.id, u, traceback.format_exc()),
                priority=notification.handler.NotificationPriority.CRITICAL)
            raise Exception("BAD UPDATE!!!!!")

        self.update(spec=q, document=u, **update_kwargs)
コード例 #30
0
ファイル: repository.py プロジェクト: zenglzh/mongoctl
def config_lookup_cluster_by_server(server, lookup_type=LOOKUP_TYPE_ANY):
    clusters = get_configured_clusters()
    lookup_type = listify(lookup_type)

    for t in lookup_type:
        result = None
        if t == LOOKUP_TYPE_MEMBER:
            result = filter(lambda c: c.has_member_server(server),
                            clusters.values())
        elif t == LOOKUP_TYPE_CONFIG_SVR:
            result = filter(lambda c: cluster_has_config_server(c, server),
                            clusters.values())
        elif t == LOOKUP_TYPE_SHARDS:
            result = filter(lambda c: cluster_has_shard(c, server),
                            clusters.values())
        if result:
            return result[0]
コード例 #31
0
ファイル: learner.py プロジェクト: Abhimanyu08/image_caption
 def __init__(self,
              model,
              data,
              loss_func,
              opt_func=sgd_opt,
              lr=1e-2,
              splitter=param_getter,
              cbs=None,
              cb_funcs=None,
              path=None):
     self.model, self.data, self.loss_func, self.opt_func, self.lr, self.splitter = model, data, loss_func, opt_func, lr, splitter
     self.in_train, self.logger, self.opt = False, print, None
     self.path = Path(ifNone(path, self.data.path))
     self.cbs = []
     self.add_cb(TrainEvalCallback())
     self.add_cbs(cbs)
     self.add_cbs(cbf() for cbf in listify(cb_funcs))
コード例 #32
0
    def _urlize_paths(self, paths):
        """
        Return a list of URLs for the given paths.
        """
        paths = listify(paths)
        static_folder = realpath(self.static_folder)

        url_paths = []
        for path in paths:
            # Ensure this is a full, real path (ie. not relative)
            path = realpath(path)
            # Strip off path to the static folder from the beginning
            if path.startswith(static_folder):
                path = path[len(static_folder):]
            # Prefix with the static folder URL path
            path = self.static_url_path + path
            url_paths.append(path)

        return url_paths
コード例 #33
0
    def upload_backup(self, backup, file_name, target, destination_path=None):
        targets = listify(target)
        workspace = self.get_task_workspace_dir(backup)
        file_path = os.path.join(workspace, file_name)
        metadata = {
            "Content-Type": "application/x-compressed"
        }
        uploaders = multi_target_upload_file(targets, file_path, destination_path=destination_path, metadata=metadata)

        errored_uploaders = filter(lambda uploader: uploader.error is not None,
                                   uploaders)

        if errored_uploaders:
            raise errored_uploaders[0].error

        target_references = map(lambda uploader: uploader.target_reference, uploaders)

        if isinstance(target, list):
            return target_references
        else:
            return target_references[0]
コード例 #34
0
    def update_task(self, task, properties=None, event_name=None,
                    event_type=EventType.INFO, message=None, details=None,
                    error_code=None,
                    **update_kwargs):
        """
            Updates the specified properties of the specified MBSTask object
        """
        task_doc = task.to_document()
        q = {
            "_id": task.id
        }

        u = {}

        # log the event as needed
        if event_name or message:
            log_entry = task.log_event(name=event_name, event_type=event_type,message=message, details=details,
                                       error_code=error_code)
            # push if "logs" property is not included
            if not (properties and "logs" in properties):
                u["$push"] = {"logs": log_entry.to_document()}

        # construct $set operator
        if properties:
            properties = listify(properties)
            u["$set"] = {}
            for prop in properties:
                u["$set"][prop] = task_doc.get(prop)

        if not u or ("$set" not in u and "$push" not in u):
            import mbs
            import notification.handler
            mbs.get_mbs().notifications.send_event_notification(
                "BAD UPDATE", "BAD UPDATE for task %s: %s , %s" % (task.id, u, traceback.format_exc()),
                priority=notification.handler.NotificationPriority.CRITICAL)
            raise Exception("BAD UPDATE!!!!!")

        self.update(spec=q, document=u, **update_kwargs)
コード例 #35
0
    def upload_backup(self, backup, file_name, target, destination_path=None):
        targets = listify(target)
        workspace = self.get_task_workspace_dir(backup)
        file_path = os.path.join(workspace, file_name)
        metadata = {"Content-Type": "application/x-compressed"}
        uploaders = multi_target_upload_file(targets,
                                             file_path,
                                             destination_path=destination_path,
                                             metadata=metadata)

        errored_uploaders = filter(lambda uploader: uploader.error is not None,
                                   uploaders)

        if errored_uploaders:
            raise errored_uploaders[0].error

        target_references = map(lambda uploader: uploader.target_reference,
                                uploaders)

        if isinstance(target, list):
            return target_references
        else:
            return target_references[0]
コード例 #36
0
ファイル: sched_algs.py プロジェクト: alexandrasandulescu/tss
def hlfet(dag, no_nodes):
    scheduling = {}
    # initialize empty task list for each processor
    for node in range(0, no_nodes):
        scheduling[node] = []

    L = listify(dag)
    L.sort(key=lambda task: task.static_level)

    while len(L) > 0:
        task = L.pop()
        min_node = 0
        min_val = processor_finish(scheduling[0])
        for node in scheduling:
            t = processor_finish(scheduling[node])
            m = max(t, task.est)
            if m < min_val:
                min_val = m
                min_node = node
        scheduling[min_node].append(task)
        task.start = min_val

    return scheduling
コード例 #37
0
    def send_notification(self, subject, message, recipient=None):

        try:

            logger.info("Sending notification email...")
            msg = MIMEText(message.encode('utf-8'), 'plain', 'UTF-8')

            to_address = listify(recipient or self._to_address)
            msg['From'] = self.from_address
            msg['To'] = ",".join(to_address)

            if subject:
                msg['Subject'] = subject

            smtp = smtplib.SMTP(self.smtp_host)
            if (self.smtp_username is not None
                    or self.smtp_password is not None):
                smtp.login(self.smtp_username, self.smtp_password)
            smtp.sendmail(self.from_address, to_address, msg.as_string())
            smtp.quit()
            logger.info("Email sent successfully!")
        except Exception, e:
            logger.error("Error while sending email:\n%s" %
                         traceback.format_exc())
コード例 #38
0
ファイル: compile.py プロジェクト: fish2000/halogen
def test(MAXIMUM_GENERATORS=255):
    
    """ Run the inline tests for the halogen.compile module """
    
    import tempfile
    from contextlib import ExitStack
    from pprint import pprint
    
    if __package__ is None or __package__ == '':
        import api # type: ignore
        from utils import terminal_width
    else:
        from . import api
        from .utils import terminal_width
    
    directory = Directory(pth="/Users/fish/Dropbox/halogen/tests/generators")
    destination = Directory(pth=os.path.join(tempfile.gettempdir(), "yodogg"))
    zip_destination = os.path.realpath("/tmp")
    
    with TemporaryDirectory(prefix='yo-dogg-') as td:
        
        if not td.exists:
            print("X> TemporaryDirectory DOES NOT EXIST:")
            print(f"X> {td}")
        
        # We use a contextlib.ExitStack instance to separate out the construction
        # of the halogen.compile.Generators instance (q.v. immediately below) and
        # the call to __enter__ (q.v. right after that) so as to trap any and all
        # exceptions that may be thrown individually in either the constructor call
        # -- e.g. Generators.__init__ -- or Generators.__enter__ …
        
        stack = ExitStack()
        
        gens = Generators(CONF, directory=directory,
                                destination=td,
                                intermediate=td.subdirectory(".intermediate"),
                                maximum=MAXIMUM_GENERATORS,
                                verbose=DEFAULT_VERBOSITY,
                                use_cdb=True)
        
        # Preserve compilation artifacts:
        # td.do_not_destroy()
        
        try:
            # Calls Generators.__enter__(self=gens):
            stack.enter_context(gens)
        except CompilerError as exc:
            print_exception(exc)
            # gens.precompile() and gens.compile()
        except CompileDatabaseError as exc:
            print_exception(exc)
            # gens.precompile() and gens.compile()
        except LinkerError as exc:
            print_exception(exc)
            # if gens.compiled and gens.do_static:
            #     gens.arch()
        except ArchiverError as exc:
            print_exception(exc)
            # if gens.compiled and gens.do_shared:
            #     gens.link()
            # if gens.linked and gens.do_preload:
            #     gens.preload_all()
        except GeneratorLoaderError as exc:
            print_exception(exc)
        except GenerationError as exc:
            print_exception(exc)
        else:
            with stack: # Exiting this scope calls Generators.__exit__(self=gens):
                
                precompiled = gens.precompiled and "YES" or "no"
                compiled = gens.compiled and "YES" or "no"
                postcompiled = gens.postcompiled and "YES" or "no"
                linked = gens.linked and "YES" or "no"
                archived = gens.archived and "YES" or "no"
                preloaded = gens.preloaded and "YES" or "no"
                
                print("")
                print(f"IS IT PRECOMPILED? -- {precompiled}")
                print(f"IS IT COMPILED? -- {compiled}")
                print(f"IS IT POSTCOMPILED? -- {postcompiled}")
                print(f"IS IT LINKED? -- {linked}")
                print(f"IS IT ARCHIVED? -- {archived}")
                print(f"IS IT PRELOADED? -- {preloaded}")
                print("")
                
                print(f"LIBRARY: {gens.library}")
                if gens.linked and os.path.exists(gens.library):
                    print("LIBRARY FILE EXISTS")
                
                print(f"ARCHIVE: {gens.archive}")
                if gens.archived and os.path.exists(gens.archive):
                    print("ARCHIVE FILE EXISTS")
                
                print(f"REGISTERED GENERATORS: {api.registered_generators()}")
                
                # loaded_generators = gens.loaded_generators()
                
                if DEFAULT_VERBOSITY:
                    if gens.loaded_count > 0:
                        print(f"... SUCCESSFULLY LOADED GENERATORS FROM LIBRARY {gens.library}")
                        print(f"... THERE ARE {gens.loaded_count} GENERATORS LOADED FROM THAT LIBRARY, DOGG")
                    else:
                        print(f"... NO GENERATORS COULD BE LOADED FROM LIBRARY {gens.library}")
                
                # Run generators:
                generated = gens.run(emit='expanded')
                
                print('')
                pprint(generated, indent=4,
                                  width=terminal_width)
                print('')
                
                # Copy the library and archive files to $TMP/yodogg:
                if destination.exists:
                    if DEFAULT_VERBOSITY:
                        print(f"Removing destination: {destination} …")
                    rm_rf(destination)
                
                if DEFAULT_VERBOSITY:
                    print(f"Copying from {td} to {destination} …")
                td.copy_all(destination)
                
                with TemporaryName(suffix="zip", parent=zip_destination) as tz:
                    if DEFAULT_VERBOSITY:
                        print(f"Zip-archiving destination contents to zipfile: {tz} …")
                    destination.zip_archive(tz)
                
                if gens.intermediate.exists:
                    if CDBJsonFile.in_directory(gens.intermediate):
                        if DEFAULT_VERBOSITY:
                            print("")
                            print(f"Found compilation DB file “{CDBJsonFile.filename}” in intermediate: {gens.intermediate}:")
                            with CDBJsonFile(directory=gens.intermediate) as cdb:
                                pprint(cdb.entries, indent=4,
                                                    width=terminal_width)
                    if DEFAULT_VERBOSITY:
                        print("")
                        print(f"Listing files at intermediate: {gens.intermediate} …")
                    intermediate_list = OCDList(gens.intermediate.subpath(listentry) \
                                                for listentry in gens.intermediate.ls())
                    pprint(listify(*intermediate_list), indent=4,
                                                        width=terminal_width)
                else:
                    print("X> Intermediate directory DOES NOT EXIST")
                    print(f"X> {gens.intermediate}")
                
                if destination.exists:
                    if DEFAULT_VERBOSITY:
                        print("")
                        print(f"Listing files at destination: {destination} …")
                    destination_list = OCDList(destination.subpath(listentry) \
                                               for listentry in destination.ls())
                    pprint(listify(*destination_list), indent=4,
                                                       width=terminal_width)
                    if DEFAULT_VERBOSITY:
                        print(f"Removing destination: {destination} …")
                    rm_rf(destination)
                else:
                    print("X> Destination directory DOES NOT EXIST")
                    print(f"X> {destination}")
コード例 #39
0
ファイル: sample_mori.py プロジェクト: dtmori/HWW
 def set_files(self, files):
     self.__files = utils.listify(files)
     self.configure()
コード例 #40
0
def compose(x, funcs, *args, order_key='_order', **kwargs):
    key = lambda o: getattr(o, order_key, 0)
    for f in sorted(listify(funcs), key=key): x = f(x, **kwargs)
    return x
コード例 #41
0
 def obj(self, items, idx, procs):
     isint = isinstance(idx, int) or (isinstance(idx,torch.LongTensor) and not idx.ndim)
     item = items[idx]
     for proc in reversed(listify(procs)):
         item = proc.deproc1(item) if isint else proc.deprocess(item)
     return item
コード例 #42
0
ファイル: learner.py プロジェクト: Abhimanyu08/image_caption
 def remove_cbs(self, cbs):
     for cb in listify(cbs):
         self.cbs.remove(cb)
コード例 #43
0
ファイル: learner.py プロジェクト: Abhimanyu08/image_caption
 def add_cbs(self, cbs):
     for cb in listify(cbs):
         self.add_cb(cb)
コード例 #44
0
 def __init__(self, items): self.items = listify(items)
 def __getitem__(self, idx):
コード例 #45
0
def process_sz(sz):
    sz = listify(sz)
    return tuple(sz if len(sz) == 2 else [sz[0], sz[0]])
コード例 #46
0
 def __init__(self, metrics, in_train):
     self.metrics, self.in_train = listify(metrics), in_train
コード例 #47
0
 def add(self, key, lib_obj, lib_deps=list()):
     assert key not in self._libs
     lib_deps = {'lib_obj': lib_obj, 'lib_deps': listify(lib_deps)}
     self._libs[key] = lib_deps
コード例 #48
0
 def __init__(self, param, sched_funcs):
     self.param = param
     self.sf = listify(sched_funcs)
コード例 #49
0
ファイル: script.py プロジェクト: evilbungle/motley
def main():

    # Arseparse
    parser = argparse.ArgumentParser(description='Manage some builds')
    parser.add_argument('action', metavar='action', nargs='?', action="store")
    parser.add_argument('environments', metavar='environment', nargs='*', action="store",default=[])
    parser.add_argument('-c', dest='config_path', action='store',help='Specify a config file')
    args = parser.parse_args()

    # Dig out the config path
    if args.config_path is None:
        # TODO: Search environ too
        args.config_path = "motley.cfg"

    # Read the raw config
    config = ConfigParser()
    config.read(args.config_path)

    base_path = config.get('motley','directory')
    
    # Build the list of jobs to run
    jobs = []
    for section in config.sections():

        if section.startswith('environment:'):
            
            # If no arguments were specified we want all environments
            if not args.environments or section[12:] in args.environments:
                options = expand_section(config,section)
                options['environment'] = section[12:]
                
                # Inject the path
                if 'path' not in options:
                    options['path'] = os.path.join(base_path,options['environment'])
                
                # Iterate through the actions looking for 
                for action_name in listify(options.get('actions','')):
                    
                    # Expand each action (this duplicates a lot unfortunately)
                    action = expand_section(config,'action:%s' % action_name)
                    key = action.get('name',action_name)

                    
                    # Add a job when we find the first action whose name matches
                    if key == args.action:
                        
                        job = {'options':options,
                               'actions':[]}
                               
                        for sn in expand_dependencies(config,action_name):
                            command = expand_section(config,'action:%s' % sn)['command']
                            job['actions'].append(command)
                        
                        jobs.append(job)
                        break
                
    
    batch = Batch(jobs)
    batch.start()
    for result in batch:
        if result is None:
            print "Something bad has happened"
        else:
            code,stdout,stderr = result
            if int(code) != 0:
                print "============================================"
                print "A job failed"
                print stdout
                print stderr
            else:
                pass
コード例 #50
0
def setify(o): return o if isinstance(o,set) else set(listify(o))

def _get_files(p, fs, extensions=None):