예제 #1
0
파일: parser.py 프로젝트: minqf/repoman
 def __init__(self, config, stores):
     """
     :param config: configuration for the parser
     :param stores: instances of the available stores
     """
     self.config = config
     self.stores = stores
     self.filters = config.getarray('filters')
     self.filters = dict([
         (cname,
          cls(stores=listvalues(stores),
              config=config.get_section(section='filter.' +
                                        cls.CONFIG_SECTION, )))
         for cname, cls in iteritems(filters.FILTERS)
         if cname in self.filters or 'all' in self.filters
     ])
     self.sources = config.getarray('sources')
     self.sources = dict([
         (cname,
          cls(stores=listvalues(stores),
              config=config.get_section(section='source.' +
                                        cls.CONFIG_SECTION, )))
         for cname, cls in iteritems(sources.SOURCES)
         if cname in self.sources or 'all' in self.sources
     ])
예제 #2
0
    def updatePileupLocation(self):
        dataItems = self.backend.getActivePileupData()

        # fullResync incorrect with multiple dbs's - fix!!!
        dataLocations = DataLocationMapper.__call__(self, dataItems)
        self.logger.info("Found %d unique pileup data to update location",
                         len(dataItems))

        # Given that there might be multiple data items to be updated
        # handle it like a dict such that element lookup becomes easier
        modified = {}
        for dataMapping in listvalues(dataLocations):
            for data, locations in viewitems(dataMapping):
                elements = self.backend.getElementsForPileupData(data)
                for element in elements:
                    if element.get('NoPileupUpdate', False):
                        continue
                    if element.id in modified:
                        element = modified[element.id]
                    for pData in element['PileupData']:
                        if pData == data:
                            if sorted(locations) != sorted(
                                    element['PileupData'][pData]):
                                self.logger.info("%s, setting location to: %s",
                                                 data, locations)
                                element['PileupData'][pData] = locations
                                modified[element.id] = element
                                break
        self.logger.info("Updating %d elements for Pileup location update",
                         len(modified))
        self.backend.saveElements(*listvalues(modified))

        return len(modified)
예제 #3
0
def applySiiQcValidation(siiContainer, specfile):
    """Iterates over all Sii entries of a specfile in siiContainer and validates
    if they surpass a user defined quality threshold. The parameters for
    validation are defined in ``siiContainer.info[specfile]``:

        - ``qcAttr``, ``qcCutoff`` and ``qcLargerBetter``

    In addition to passing this validation a ``Sii`` has also to be at the first
    list position in the ``siiContainer.container``. If both criteria are met
    the attribute ``Sii.isValid`` is set to ``True``.

    :param siiContainer: instance of :class:`maspy.core.SiiContainer`
    :param specfile: unambiguous identifier of a ms-run file. Is also used as
        a reference to other MasPy file containers.
    """
    attr = siiContainer.info[specfile]['qcAttr']
    cutOff = siiContainer.info[specfile]['qcCutoff']
    if siiContainer.info[specfile]['qcLargerBetter']:
        evaluator = lambda sii: getattr(sii, attr) >= cutOff and sii.rank == 1
    else:
        evaluator = lambda sii: getattr(sii, attr) <= cutOff and sii.rank == 1

    for itemList in listvalues(siiContainer.container[specfile]):
        #Set the .isValid attribute of all Sii to False
        for sii in itemList:
            sii.isValid = False

        #Validate the first Sii
        sii = itemList[0]
        if evaluator(sii):
            sii.isValid = True
예제 #4
0
파일: skim.py 프로젝트: yjx4131/activitysim
    def __init__(self, skim_dict):

        self.offset_mapper = skim_dict.offset_mapper
        self.skim_dict = skim_dict

        # - key1_blocks dict maps key1 to block number
        # DISTWALK: 0,
        # DRV_COM_WLK_BOARDS: 0, ...
        key1_block_offsets = skim_dict.skim_info['key1_block_offsets']
        self.key1_blocks = {k: v[0] for k, v in iteritems(key1_block_offsets)}

        # - skim_dim3 dict maps key1 to dict of key2 absolute offsets into block
        # DRV_COM_WLK_BOARDS: {'MD': 4, 'AM': 3, 'PM': 5}, ...
        block_offsets = skim_dict.skim_info['block_offsets']
        skim_dim3 = OrderedDict()
        for skim_key in block_offsets:

            if not isinstance(skim_key, tuple):
                continue

            key1, key2 = skim_key
            block, offset = block_offsets[skim_key]

            assert block == self.key1_blocks[key1]

            skim_dim3.setdefault(key1, OrderedDict())[key2] = offset

        self.skim_dim3 = skim_dim3

        logger.info("SkimStack.__init__ loaded %s keys with %s total skims" %
                    (len(self.skim_dim3),
                     sum([len(d) for d in listvalues(self.skim_dim3)])))

        self.usage = set()
예제 #5
0
파일: cmdset.py 프로젝트: 325975/evennia
    def make_unique(self, caller):
        """
        Remove duplicate command-keys (unsafe)

        Args:
            caller (object): Commands on this object will
                get preference in the duplicate removal.

        Notes:
            This is an unsafe command meant to clean out a cmdset of
            doublet commands after it has been created. It is useful
            for commands inheriting cmdsets from the cmdhandler where
            obj-based cmdsets always are added double. Doublets will
            be weeded out with preference to commands defined on
            caller, otherwise just by first-come-first-served.

        """
        unique = {}
        for cmd in self.commands:
            if cmd.key in unique:
                ocmd = unique[cmd.key]
                if (hasattr(cmd, 'obj') and cmd.obj == caller) and not \
                        (hasattr(ocmd, 'obj') and ocmd.obj == caller):
                    unique[cmd.key] = cmd
            else:
                unique[cmd.key] = cmd
        self.commands = listvalues(unique)
예제 #6
0
def load_graph_toro(filename):
    from collections import OrderedDict
    vertices = OrderedDict()
    edges = []

    with open(filename) as f:
        for linenum, line in enumerate(f):
            d = line.split()

            i = lambda idx: int(d[idx])
            f = lambda idx: float(d[idx])

            if d[0] == 'VERTEX2':
                id_ = i(1)
                xyt = np.array([f(2), f(3), f(4)])
                vertices[id_] = VertexXYT(xyt)

            elif d[0] == 'EDGE2':
                ndx_out, ndx_in = i(1), i(2)
                v_out, v_in = vertices[ndx_out], vertices[ndx_in]

                xyt = np.array([f(3), f(4), f(5)])
                P = np.array([[f(6), f(7), f(10)], [f(7), f(8),
                                                    f(11)],
                              [f(10), f(11), f(9)]])
                g = MultiVariateGaussian(xyt, P)
                edges.append(XYTConstraint(v_out, v_in, g))

            else:
                msg = "Unknown edge or vertex type %s in line %d" % (d[0],
                                                                     linenum)
                raise Exception(msg)

    return Graph(listvalues(vertices), edges)
예제 #7
0
 def _pre_run(self):
     assert (self.LPU_obj)
     assert (all(
         [var in self.memory_manager.variables for var in self.variables]))
     self.get_inds_func = {}
     for var, d in self.variables.items():
         v_dict = self.memory_manager.variables[var]
         if not d['uids']:
             uids = list(v_dict['uids'])
             inds = listvalues(v_dict['uids'])
             o = np.argsort(inds)
             d['uids'] = [uids[i] for i in o]
             self.src_inds[var] = garray.to_gpu(np.arange(len(d['uids'])))
         else:
             uids = []
             inds = []
             for uid in d['uids']:
                 try:
                     inds.append(v_dict['uids'][uid])
                     uids.append(uid)
                 except:
                     pass
             inds = np.array(inds, np.int32)
             o = np.argsort(inds)
             self.src_inds[var] = garray.to_gpu(inds[o])
             d['uids'] = [uids[i] for i in o]
         self._d_output[var] = garray.empty(len(d['uids']),
                                            v_dict['buffer'].dtype)
         d['output'] = np.zeros(len(d['uids']), v_dict['buffer'].dtype)
         self.get_inds_func[var] = get_inds_kernel(self.src_inds[var].dtype,
                                                   v_dict['buffer'].dtype)
     self.pre_run()
예제 #8
0
파일: graph.py 프로젝트: svbar/OSCE-Exploit
    def find_node(self, attribute, value):
        """
        Find and return the node with the specified attribute / value pair.

        @type  attribute: str
        @param attribute: Attribute name we are looking for
        @type  value:     mixed
        @param value:     Value of attribute we are looking for

        @rtype:  Mixed
        @return: Node, if attribute / value pair is matched. None otherwise.
        """

        # if the attribute to search for is the id, simply return the node from the internal hash.
        if attribute == "id" and value in self.nodes:
            return self.nodes[value]

        # step through all the nodes looking for the given attribute/value pair.
        else:
            for node in listvalues(self.nodes):
                if hasattr(node, attribute):
                    if getattr(node, attribute) == value:
                        return node

        return None
예제 #9
0
파일: utils.py 프로젝트: CoAxLab/radd
def pandaify_results(gort, ssrt, tb=.68, clmap=None, bootstrap=False, bootinfo={'nsubjects':25, 'ntrials':1000, 'groups':['ssd']}, ssd=np.array([[.2, .25, .3, .35, .4]])):

    nlevels = gort.shape[0]
    if nlevels==1:
        clmap = {'flat': ['flat']}
    conds = list(clmap)
    cond_levels = list(product(*[list(levels) for levels in listvalues(clmap)]))
    dfColumns=['ttype', 'ssd', 'response', 'acc', 'rt', 'ssrt', 'trial']
    dfColumns = conds + dfColumns

    # bootinfo['groups'] = bootinfo['groups'] + conds
    dfList = []
    for i in range(nlevels):
        df = create_sim_dataframe(gort[i], ssrt[i], tb=tb, ssd=ssd)
        for ii, c in enumerate(conds):
            df[c] = cond_levels[i][ii]
        dfList.append(df[dfColumns])

    resultsdf = pd.concat(dfList)
    resultsdf.reset_index(drop=True, inplace=True)

    if bootstrap:
        resultsdf = bootstrap_data(resultsdf, nsubjects=bootinfo['nsubjects'], n=bootinfo['ntrials'], groups=bootinfo['groups'])

    return resultsdf
예제 #10
0
파일: task_graph.py 프로젝트: nesl/Heliot
def _gen_update_info(result_mapping, Gt, Gd):
    node_info = {}
    for task, device in iteritems(result_mapping):
        if Gt.node[task][GtInfo.DEVICE]:
            assert device == Gt.node[task][GtInfo.DEVICE]
        device_type = Gd.node[device][GdInfo.DEVICE_TYPE]
        latency_info = Gt.node[task][GtInfo.LATENCY_INFO]
        exec_cmd = deepcopy(Gt.node[task][GtInfo.EXEC_CMD])
        compute_latency = 0 if not latency_info else (listvalues(
            latency_info[device_type])[0])
        node_info[task] = {
            GtInfo.CUR_DEVICE: device,
            GtInfo.CUR_LATENCY: compute_latency,
            GtInfo.EXEC_CMD: exec_cmd
        }
    edge_info = {}
    for edge in Gt.edges():
        t1, t2 = edge
        d1 = node_info[t1][GtInfo.CUR_DEVICE]
        d2 = node_info[t2][GtInfo.CUR_DEVICE]
        transmission_latency = Gd[d1][d2][GdInfo.LATENCY]
        edge_info[LinkHelper.get_edge(t1, t2)] = {
            GtInfo.CUR_LATENCY: transmission_latency,
        }
    return node_info, edge_info
예제 #11
0
 def _parseDataBySchema(self, dataRoot, fileSchema, outGlbData):
   """ first sort the schema Root by location """
   locFieldDict = sortSchemaByLocation(fileSchema)
   # for each data entry, parse data by location
   floatKey = getKeys(dataRoot, float)
   for ien in floatKey:
     if float(ien) <=0:
       continue
     dataEntry = dataRoot[ien]
     outDataEntry = FileManDataEntry(fileSchema.getFileNo(), ien)
     dataKeys = [x for x in dataEntry]
     sortedKey = sorted(dataKeys, key=functools.cmp_to_key(sortDataEntryFloatFirst))
     for locKey in sortedKey:
       if locKey == '0' and fileSchema.getFileNo() == '1':
         self._parseFileDetail(dataEntry[locKey], ien)
       if locKey in locFieldDict:
         fieldDict = locFieldDict[locKey] # a dict of {pos: field}
         curDataRoot = dataEntry[locKey]
         if len(fieldDict) == 1:
           fieldAttr = listvalues(fieldDict)[0]
           if fieldAttr.isSubFilePointerType(): # Multiple
             self._parseSubFileField(curDataRoot, fieldAttr, outDataEntry)
           else:
             self._parseSingleDataValueField(curDataRoot, fieldAttr,
                                             outDataEntry)
         else:
           self._parseDataValueField(curDataRoot, fieldDict, outDataEntry)
     outGlbData.addFileManDataEntry(ien, outDataEntry)
     if fileSchema.getFileNo() == self._curFileNo:
       self._addFileKeyIndex(self._curFileNo, ien, outDataEntry.name)
예제 #12
0
파일: skim.py 프로젝트: UDST/activitysim
    def __init__(self, skim_dict):

        self.offset_mapper = skim_dict.offset_mapper
        self.skim_dict = skim_dict

        # - key1_blocks dict maps key1 to block number
        # DISTWALK: 0,
        # DRV_COM_WLK_BOARDS: 0, ...
        key1_block_offsets = skim_dict.skim_info['key1_block_offsets']
        self.key1_blocks = {k: v[0] for k, v in iteritems(key1_block_offsets)}

        # - skim_dim3 dict maps key1 to dict of key2 absolute offsets into block
        # DRV_COM_WLK_BOARDS: {'MD': 4, 'AM': 3, 'PM': 5}, ...
        block_offsets = skim_dict.skim_info['block_offsets']
        skim_dim3 = OrderedDict()
        for skim_key in block_offsets:

            if not isinstance(skim_key, tuple):
                continue

            key1, key2 = skim_key
            block, offset = block_offsets[skim_key]

            assert block == self.key1_blocks[key1]

            skim_dim3.setdefault(key1, OrderedDict())[key2] = offset

        self.skim_dim3 = skim_dim3

        logger.info("SkimStack.__init__ loaded %s keys with %s total skims"
                    % (len(self.skim_dim3),
                       sum([len(d) for d in listvalues(self.skim_dim3)])))

        self.usage = set()
예제 #13
0
 def cap_cache(self):
     if self._total > self.cap:
         for ent in listvalues(self._entries):
             if self._total < self.cap or len(
                     self._entries) < self.min_entries:
                 break
             self._remove(ent, True)
예제 #14
0
파일: reader.py 프로젝트: hollenstein/maspy
def applySiiQcValidation(siiContainer, specfile):
    """Iterates over all Sii entries of a specfile in siiContainer and validates
    if they surpass a user defined quality threshold. The parameters for
    validation are defined in ``siiContainer.info[specfile]``:

        - ``qcAttr``, ``qcCutoff`` and ``qcLargerBetter``

    In addition to passing this validation a ``Sii`` has also to be at the first
    list position in the ``siiContainer.container``. If both criteria are met
    the attribute ``Sii.isValid`` is set to ``True``.

    :param siiContainer: instance of :class:`maspy.core.SiiContainer`
    :param specfile: unambiguous identifier of a ms-run file. Is also used as
        a reference to other MasPy file containers.
    """
    attr = siiContainer.info[specfile]['qcAttr']
    cutOff = siiContainer.info[specfile]['qcCutoff']
    if siiContainer.info[specfile]['qcLargerBetter']:
        evaluator = lambda sii: getattr(sii, attr) >= cutOff and sii.rank == 1
    else:
        evaluator = lambda sii: getattr(sii, attr) <= cutOff and sii.rank == 1

    for itemList in listvalues(siiContainer.container[specfile]):
        #Set the .isValid attribute of all Sii to False
        for sii in itemList:
            sii.isValid = False

        #Validate the first Sii
        sii = itemList[0]
        if evaluator(sii):
            sii.isValid = True
예제 #15
0
파일: graph.py 프로젝트: svbar/OSCE-Exploit
    def update_node_id(self, current_id, new_id):
        """
        Simply updating the id attribute of a node will sever the edges to / from the given node. This routine will
        correctly update the edges as well.

        @type  current_id: long
        @param current_id: Current ID of node whose ID we want to update
        @type  new_id:     long
        @param new_id:     New ID to update to.
        """

        if current_id not in self.nodes:
            return

        # update the node.
        node = self.nodes[current_id]
        del self.nodes[current_id]
        node.id = new_id
        self.nodes[node.id] = node

        # update the edges.
        for edge in [
                edge for edge in listvalues(self.edges)
                if current_id in (edge.src, edge.dst)
        ]:
            del self.edges[edge.id]

            if edge.src == current_id:
                edge.src = new_id
            if edge.dst == current_id:
                edge.dst = new_id

            edge.id = (edge.src << 32) + edge.dst

            self.edges[edge.id] = edge
예제 #16
0
def calcDistMatchArr(matchArr, tKey, mKey):
    """Calculate the euclidean distance of all array positions in "matchArr".

    :param matchArr: a dictionary of ``numpy.arrays`` containing at least two
        entries that are treated as cartesian coordinates.
    :param tKey: #TODO: docstring
    :param mKey: #TODO: docstring

    :returns: #TODO: docstring

            {'eucDist': numpy.array([eucDistance, eucDistance, ...]),
             'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...])
             }
    """
    #Calculate all sorted list of all eucledian feature distances
    matchArrSize = listvalues(matchArr)[0].size

    distInfo = {'posPairs': list(), 'eucDist': list()}
    _matrix = numpy.swapaxes(numpy.array([matchArr[tKey], matchArr[mKey]]), 0,
                             1)

    for pos1 in range(matchArrSize - 1):
        for pos2 in range(pos1 + 1, matchArrSize):
            distInfo['posPairs'].append((pos1, pos2))
    distInfo['posPairs'] = numpy.array(distInfo['posPairs'])
    distInfo['eucDist'] = scipy.spatial.distance.pdist(_matrix)

    distSort = numpy.argsort(distInfo['eucDist'])
    for key in list(viewkeys(distInfo)):
        distInfo[key] = distInfo[key][distSort]

    return distInfo
예제 #17
0
파일: ilp_solver.py 프로젝트: nesl/Heliot
def _get_path_length(path, Gt, Gd, result_mapping):
    log.info('path: {}'.format(path))
    path_vars = []
    # first node: src
    ti = path[0]
    di = Gt.node[ti][GtInfo.DEVICE]
    # device_mapping start from path[1] to path[N-1]
    for j in range(1, len(path) - 1):
        tj = path[j]
        dj = result_mapping[tj]
        # get transmission latency from di -> dj
        Ld_di_dj = Gd[di][dj][GdInfo.LATENCY]
        path_vars.append(Ld_di_dj)
        log.debug('Ld_di_dj (move {} -> {}) = {}'.format(di, dj, Ld_di_dj))
        # get computation latency for task tj at dj
        dj_type = Gd.node[dj][GdInfo.DEVICE_TYPE]
        # get latency of the default build flavor
        Lt_tj_dj = listvalues(Gt.node[tj][GtInfo.LATENCY_INFO][dj_type])[0]
        path_vars.append(Lt_tj_dj)
        log.debug('Lt_tj_dj (do {} at {}) = {}'.format(tj, dj, Lt_tj_dj))
        ti = tj
        di = dj
    # last node: dst
    tj = path[len(path) - 1]
    dj = Gt.node[tj][GtInfo.DEVICE]
    Ld_di_dj = Gd[di][dj][GdInfo.LATENCY]
    path_vars.append(Ld_di_dj)
    log.debug('Ld_di_dj (move {} -> {}) = {}'.format(di, dj, Ld_di_dj))
    path_length = sum(path_vars)
    log.info('\tlength: {}, {}'.format(path_length, path_vars))
    return path_length
예제 #18
0
 def __init__(self, path_response, context_name, context_id,
              req_sort_order_type):
     # common.debug('VideoListSorted data: {}', path_response)
     self.perpetual_range_selector = path_response.get(
         '_perpetual_range_selector')
     self.data = path_response
     self.context_name = context_name
     has_data = bool(
         (context_id and path_response.get(context_name)
          and path_response[context_name].get(context_id))
         or (not context_id and path_response.get(context_name)))
     self.data_lists = {}
     self.videos = OrderedDict()
     self.artitem = None
     self.contained_titles = None
     self.videoids = None
     if has_data:
         self.data_lists = path_response[context_name][context_id][req_sort_order_type] \
             if context_id else path_response[context_name][req_sort_order_type]
         self.videos = OrderedDict(resolve_refs(self.data_lists, self.data))
         if self.videos:
             # self.artitem = next(itervalues(self.videos))
             self.artitem = listvalues(self.videos)[0]
             self.contained_titles = _get_titles(self.videos)
             try:
                 self.videoids = _get_videoids(self.videos)
             except KeyError:
                 self.videoids = None
예제 #19
0
파일: graph.py 프로젝트: svbar/OSCE-Exploit
    def find_edge(self, attribute, value):
        """
        Find and return the edge with the specified attribute / value pair.

        @type  attribute: str
        @param attribute: Attribute name we are looking for
        @type  value:     Mixed
        @param value:     Value of attribute we are looking for

        @rtype:  Mixed
        @return: Edge, if attribute / value pair is matched. None otherwise.
        """

        # if the attribute to search for is the id, simply return the edge from the internal hash.
        if attribute == "id" and value in self.edges:
            return self.edges[value]

        # step through all the edges looking for the given attribute/value pair.
        else:
            # TODO: Verify that this actually works? Was broken when I got here ;-P
            for node_edge in listvalues(self.edges):
                if hasattr(node_edge, attribute):
                    if getattr(node_edge, attribute) == value:
                        return node_edge

        return None
예제 #20
0
def loadConfigurationFile(filename):
    """
    _loadConfigurationFile_

    Load a Configuration File

    """

    cfgBaseName = os.path.basename(filename).replace(".py", "")
    cfgDirName = os.path.dirname(filename)
    if not cfgDirName:
        modPath = imp.find_module(cfgBaseName)
    else:
        modPath = imp.find_module(cfgBaseName, [cfgDirName])
    try:
        modRef = imp.load_module(cfgBaseName, modPath[0], modPath[1],
                                 modPath[2])
    except Exception as ex:
        msg = "Unable to load Configuration File:\n"
        msg += "%s\n" % filename
        msg += "Due to error:\n"
        msg += str(ex)
        msg += str(traceback.format_exc())
        raise RuntimeError(msg)

    for attr in listvalues(modRef.__dict__):
        if isinstance(attr, Configuration):
            return attr

    # //
    # //  couldnt find a Configuration instance
    # //
    msg = "Unable to find a Configuration object instance in file:\n"
    msg += "%s\n" % filename
    raise RuntimeError(msg)
예제 #21
0
파일: CORE.py 프로젝트: CoAxLab/radd
    def set_conditions(self, depends_on=None, bwfactors=None):
        data = self.data.copy()
        self.depends_on = depends_on
        self.conds = np.unique(np.hstack(listvalues(self.depends_on))).tolist()
        self.nconds = len(self.conds)
        if 'flat' in self.conds:
            self.is_flat = True
            data['flat'] = 'flat'
            self.data = data.copy()
        else:
            self.is_flat = False
        clevels = [np.sort(data[c].unique()) for c in self.conds]
        clevels = [np.array([str(lvl) for lvl in levels]) for levels in clevels]
        self.clmap = {c: lvls for c, lvls in zip(self.conds, clevels)}
        self.cond_matrix = np.array([lvls.size for lvls in clevels])
        self.nlevels = np.cumprod(self.cond_matrix)[-1]
        self.groups = np.hstack([['idx'], self.conds]).tolist()

        self.__format_pcmap__()
        if hasattr(self, 'ssdDF') and not self.is_nested:
            self.__set_ssd_info__()

        if hasattr(self, 'fitparams') and not self.is_nested:
            self.generate_model_id()
            self.set_fitparams(nlevels=self.nlevels, clmap=self.clmap, model_id=self.model_id)
예제 #22
0
def set_skim_wrapper_targets(df, skims):
    """
    Add the dataframe to the SkimDictWrapper object so that it can be dereferenced
    using the parameters of the skims object.

    Parameters
    ----------
    df : pandas.DataFrame
        Table to which to add skim data as new columns.
        `df` is modified in-place.
    skims : SkimDictWrapper or SkimStackWrapper object, or a list or dict of skims
        The skims object is used to contain multiple matrices of
        origin-destination impedances.  Make sure to also add it to the
        locals_d below in order to access it in expressions.  The *only* job
        of this method in regards to skims is to call set_df with the
        dataframe that comes back from interacting choosers with
        alternatives.  See the skims module for more documentation on how
        the skims object is intended to be used.
    """

    if isinstance(skims, list):
        for skim in skims:
            assert isinstance(skim, SkimDictWrapper) or isinstance(skim, SkimStackWrapper)
            skim.set_df(df)
    elif isinstance(skims, dict):
        # it it is a dict, then check for known types, ignore anything we don't recognize as a skim
        # (this allows putting skim column names in same dict as skims for use in locals_dicts)
        for skim in listvalues(skims):
            if isinstance(skim, SkimDictWrapper) or isinstance(skim, SkimStackWrapper):
                skim.set_df(df)
    else:
        assert isinstance(skims, SkimDictWrapper) or isinstance(skims, SkimStackWrapper)
        skims.set_df(df)
예제 #23
0
def calcDistMatchArr(matchArr, tKey, mKey):
    """Calculate the euclidean distance of all array positions in "matchArr".

    :param matchArr: a dictionary of ``numpy.arrays`` containing at least two
        entries that are treated as cartesian coordinates.
    :param tKey: #TODO: docstring
    :param mKey: #TODO: docstring

    :returns: #TODO: docstring

            {'eucDist': numpy.array([eucDistance, eucDistance, ...]),
             'posPairs': numpy.array([[pos1, pos2], [pos1, pos2], ...])
             }
    """
    #Calculate all sorted list of all eucledian feature distances
    matchArrSize = listvalues(matchArr)[0].size

    distInfo = {'posPairs': list(), 'eucDist': list()}
    _matrix = numpy.swapaxes(numpy.array([matchArr[tKey], matchArr[mKey]]), 0, 1)

    for pos1 in range(matchArrSize-1):
        for pos2 in range(pos1+1, matchArrSize):
            distInfo['posPairs'].append((pos1, pos2))
    distInfo['posPairs'] = numpy.array(distInfo['posPairs'])
    distInfo['eucDist'] = scipy.spatial.distance.pdist(_matrix)

    distSort = numpy.argsort(distInfo['eucDist'])
    for key in list(viewkeys(distInfo)):
        distInfo[key] = distInfo[key][distSort]

    return distInfo
예제 #24
0
 def __init__(self, path_response, list_id=None):
     # common.debug('VideoList data: {}', path_response)
     self.perpetual_range_selector = path_response.get(
         '_perpetual_range_selector')
     self.data = path_response
     has_data = bool(path_response.get('lists'))
     self.videos = OrderedDict()
     self.artitem = None
     self.contained_titles = None
     self.videoids = None
     if has_data:
         # Generate one videoid, or from the first id of the list or with specified one
         self.videoid = common.VideoId(videoid=(
             list_id if list_id else next(iter(self.data['lists']))))
         self.videos = OrderedDict(
             resolve_refs(self.data['lists'][self.videoid.value],
                          self.data))
         if self.videos:
             # self.artitem = next(itervalues(self.videos))
             self.artitem = listvalues(self.videos)[0]
             self.contained_titles = _get_titles(self.videos)
             try:
                 self.videoids = _get_videoids(self.videos)
             except KeyError:
                 self.videoids = None
예제 #25
0
 def __init__(self, path_response, list_id):
     # common.debug('VideoListLoCo data: {}', path_response)
     self.perpetual_range_selector = path_response.get(
         '_perpetual_range_selector')
     self.data = path_response
     self.list_id = list_id
     self.videoids = None
     # Set a 'UNSPECIFIED' type videoid (special handling for menus see parse_info in infolabels.py)
     self.videoid = common.VideoId(videoid=list_id)
     self.contained_titles = None
     self.artitem = None
     if 'lists' not in path_response:
         # No data in path response
         return
     # Set videos data for the specified list id
     self.videos = OrderedDict(
         resolve_refs(self.data['lists'][list_id], self.data))
     if not self.videos:
         return
     # Set first videos titles (special handling for menus see parse_info in infolabels.py)
     self.contained_titles = _get_titles(self.videos)
     # Set art data of first video (special handling for menus see parse_info in infolabels.py)
     self.artitem = listvalues(self.videos)[0]
     try:
         self.videoids = _get_videoids(self.videos)
     except KeyError:
         self.videoids = None
예제 #26
0
파일: stack.py 프로젝트: jpza/ekscli
    def create(self):
        self._validate_creation()
        self._create_cfn_template()
        tags = [{'Key': k, 'Value': v} for (k, v) in viewitems(self.tags)]

        cf = boto3.session.Session().resource('cloudformation')
        reporter = ResourceReporter()
        try:
            stack = cf.create_stack(StackName=self.stack_name, TemplateBody=self.tpl.to_yaml(),
                                    Capabilities=['CAPABILITY_NAMED_IAM'], Tags=tags)

            stack = reporter.report_stack_creation(self.stack_name, listvalues(self.resources), stack.stack_id)
            role = {o.get('OutputKey'): o.get('OutputValue') for o in stack.outputs}.get(self.RESOURCE_NG_ROLE.name)
        except Exception as e:
            ec2 = boto3.session.Session().resource('ec2')
            if self.keypair_imported:
                r = copy(self.RESOURCE_NG_KEYPAIR)
                r.resource_id = self.keypair
                ec2.KeyPair(self.keypair).delete()
                r.status = Status.deleted
                reporter.succeed(resource=r)

            raise e

        self._update_configmap(os.path.expanduser(self.kubeconf), role)
        return NodeGroupInfo(self.name, self.instance, self.min, self.max, role)
예제 #27
0
    def make_unique(self, caller):
        """
        Remove duplicate command-keys (unsafe)

        Args:
            caller (object): Commands on this object will
                get preference in the duplicate removal.

        Notes:
            This is an unsafe command meant to clean out a cmdset of
            doublet commands after it has been created. It is useful
            for commands inheriting cmdsets from the cmdhandler where
            obj-based cmdsets always are added double. Doublets will
            be weeded out with preference to commands defined on
            caller, otherwise just by first-come-first-served.

        """
        unique = {}
        for cmd in self.commands:
            if cmd.key in unique:
                ocmd = unique[cmd.key]
                if (hasattr(cmd, 'obj') and cmd.obj == caller) and not \
                        (hasattr(ocmd, 'obj') and ocmd.obj == caller):
                    unique[cmd.key] = cmd
            else:
                unique[cmd.key] = cmd
        self.commands = listvalues(unique)
예제 #28
0
def fixDBSmissingFileAssoc():
    os.environ[
        'WMAGENT_CONFIG'] = '/data/srv/wmagent/current/config/wmagent/config.py'
    connectToDB()
    myThread = threading.currentThread()
    formatter = DBFormatter(logging, myThread.dbi)
    problemFilesSql = """
        select dbsbuffer_file.id as fileid, dbsbuffer_location.id as seid from wmbs_file_location fl
            inner join wmbs_file_details fd on fd.id = fl.fileid
            inner join wmbs_location_pnns wls on wls.location = fl.location
            inner join wmbs_location wl on wl.id = fl.location
            inner join dbsbuffer_location on dbsbuffer_location.pnn = wls.pnn
            inner join dbsbuffer_file on dbsbuffer_file.lfn = fd.lfn
            where fd.lfn in (select df.lfn from dbsbuffer_file df
                               left outer join dbsbuffer_file_location dfl on df.id = dfl.filename
                               where dfl.location is null)
                      """
    unfinishedTasks = formatter.formatDict(
        formatter.dbi.processData(problemFilesSql))
    print("%s lenth" % len(unfinishedTasks))
    result = {}
    for row in unfinishedTasks:
        result.setdefault(row["fileid"], row)
        print(row)
    print("trimed %s lenth" % len(result))
    insertSQL = """INSERT INTO dbsbuffer_file_location (filename, location)
               VALUES (:fileid, :seid)"""
    done = formatter.dbi.processData(insertSQL, listvalues(result))
    print("inserted %s" % done)
예제 #29
0
    def formatWorkflow(self, results):
        workflow = {}
        tran = Transitions()
        for result in results:
            if result["task"] not in workflow:
                workflow[result["task"]] = {}
                for state in tran.states():
                    workflow[result["task"]][state] = 0

                workflow[result["task"]][result["state"]] = result["num_job"]
                workflow[result["task"]]['total_jobs'] = result["num_job"]
                workflow[result["task"]]["real_success"] = int(result["success"])
                workflow[result["task"]]["id"] = result["id"]
                workflow[result["task"]]["wmspec"] = result["wmspec"]
                workflow[result["task"]]["task"] = result["task"]
                workflow[result["task"]]["real_fail"] = self.failCount(result)
                workflow[result["task"]]['processing'] = self.processingCount(result)
            else:
                workflow[result["task"]][result["state"]] = result["num_job"]
                workflow[result["task"]]['total_jobs'] += result["num_job"]
                workflow[result["task"]]["real_success"] += int(result["success"])
                workflow[result["task"]]["real_fail"] += self.failCount(result)
                workflow[result["task"]]['processing'] += self.processingCount(result)

        # need to order by id (client side)
        return listvalues(workflow)
예제 #30
0
파일: graph.py 프로젝트: svbar/OSCE-Exploit
    def render_graph_graphviz(self):
        """
        Render the graphviz graph structure.

        @rtype:  pydot.Dot
        @return: Pydot object representing entire graph
        """
        dot_graph = pydot.Dot()

        for node in listvalues(self.nodes):
            dot_graph.add_node(node.render_node_graphviz(self))

        for edge in listvalues(self.edges):
            dot_graph.add_edge(edge.render_edge_graphviz(self))

        return dot_graph
예제 #31
0
    def repack_small_blocks(self, force=False, sync=False, closed_file_size=0):
        """Packs small blocks together before uploading"""

        self._pending_write_size += closed_file_size

        # Check if there are enough small blocks for filling up one in full
        if not (force or (self._pending_write_size >= config.KEEP_BLOCK_SIZE)):
            return

        # Search blocks ready for getting packed together before being
        # committed to Keep.
        # A WRITABLE block always has an owner.
        # A WRITABLE block with its owner.closed() implies that its
        # size is <= KEEP_BLOCK_SIZE/2.
        try:
            small_blocks = [
                b for b in listvalues(self._bufferblocks)
                if b.state() == _BufferBlock.WRITABLE and b.owner.closed()
            ]
        except AttributeError:
            # Writable blocks without owner shouldn't exist.
            raise UnownedBlockError()

        if len(small_blocks) <= 1:
            # Not enough small blocks for repacking
            return

        for bb in small_blocks:
            bb.repack_writes()

        # Update the pending write size count with its true value, just in case
        # some small file was opened, written and closed several times.
        self._pending_write_size = sum([b.size() for b in small_blocks])

        if self._pending_write_size < config.KEEP_BLOCK_SIZE and not force:
            return

        new_bb = self._alloc_bufferblock()
        new_bb.owner = []
        files = []
        while len(small_blocks) > 0 and (
                new_bb.write_pointer +
                small_blocks[0].size()) <= config.KEEP_BLOCK_SIZE:
            bb = small_blocks.pop(0)
            new_bb.owner.append(bb.owner)
            self._pending_write_size -= bb.size()
            new_bb.append(bb.buffer_view[0:bb.write_pointer].tobytes())
            files.append((bb, new_bb.write_pointer - bb.size()))

        self.commit_bufferblock(new_bb, sync=sync)

        for bb, new_bb_segment_offset in files:
            newsegs = bb.owner.segments()
            for s in newsegs:
                if s.locator == bb.blockid:
                    s.locator = new_bb.blockid
                    s.segment_offset = new_bb_segment_offset + s.segment_offset
            bb.owner.set_segments(newsegs)
            self._delete_bufferblock(bb.blockid)
예제 #32
0
파일: graph.py 프로젝트: svbar/OSCE-Exploit
    def graph_cat(self, other_graph):
        """
        Concatenate the other graph into the current one.

        @todo: Add support for clusters

        @type  other_graph: pgraph.Graph
        @param other_graph: Graph to concatenate into this one.
        """

        for other_node in listvalues(other_graph.nodes):
            self.add_node(other_node)

        for other_edge in listvalues(other_graph.edges):
            self.add_edge(other_edge)

        return self
예제 #33
0
    def execute(self, siteName, conn=None, transaction=False):
        binds = {"location": siteName}
        result = self.dbi.processData(self.sql,
                                      binds,
                                      conn=conn,
                                      transaction=transaction)

        return listvalues(result[0].fetchall()[0])[0]
예제 #34
0
 def __init__(self, path_response):
     self.perpetual_range_selector = path_response.get('_perpetual_range_selector')
     self.data = path_response
     self.videos = OrderedDict(self.data.get('videos', {}))
     self.videoids = _get_videoids(self.videos)
     # self.artitem = next(itervalues(self.videos))
     self.artitem = listvalues(self.videos)[0] if self.videos else None
     self.contained_titles = _get_titles(self.videos)
예제 #35
0
파일: graph.py 프로젝트: svbar/OSCE-Exploit
    def render_graph_gml(self):
        """
        Render the GML graph description.

        @rtype:  String
        @return: GML graph description.
        """

        gml = 'Creator "pGRAPH - Pedram Amini <*****@*****.**>"\n'
        gml += "directed 1\n"

        # open the graph tag.
        gml += "graph [\n"

        # add the nodes to the GML definition.
        for node in listvalues(self.nodes):
            gml += node.render_node_gml(self)

        # add the edges to the GML definition.
        for edge in listvalues(self.edges):
            gml += edge.render_edge_gml(self)

        # close the graph tag.
        gml += "]\n"
        """
        TODO: Complete cluster rendering
        # if clusters exist.
        if len(self.clusters):
            # open the rootcluster tag.
            gml += 'rootcluster [\n'

            # add the clusters to the GML definition.
            for cluster in self.clusters:
                gml += cluster.render()

            # add the clusterless nodes to the GML definition.
            for node in self.nodes:
                if not self.find_cluster_by_node("id", node.id):
                    gml += '    vertex "%d"\n' % node.id

            # close the rootcluster tag.
            gml += ']\n'
        """

        return gml
예제 #36
0
    def format(self, results):

        if len(results) == 0:
            return False
        else:
            format = []
            for i in results[0].fetchall():
                format.append(listvalues(i)[0])
            return format
예제 #37
0
def saygo(depends_on={}, cond_map=None, kind='xdpm', fit_on='subjects'):
    """ generate confirmation message that model is prepared and ready to fit.
    repeats structure and important fit details for user to confirm """
    depkeys = describe_model(depends_on)
    if 'x' in kind:
        bias = '(w/ dynamic bias)'
    else:
        bias = ""
    dep = listvalues(depends_on)
    # flatten list of all cond levels
    lbls = ', '.join(sum(listvalues(depends_on), []))
    msg = get_nonsense()
    strings = (kind, bias, fit_on, pdep, dep, lbls, msg)
    print("""
      Model is prepared to fit %s model %s to %s data,
      allowing %s to vary across levels of %s (%s)
      %s \n""" % strings)
    return True
예제 #38
0
파일: graph.py 프로젝트: svbar/OSCE-Exploit
    def graph_sub(self, other_graph):
        """
        Remove the elements shared between the current graph and other graph from the current
        graph.

        @todo: Add support for clusters

        @type  other_graph: pgraph.Graph
        @param other_graph: Graph to diff/remove against
        """

        for other_node in listvalues(other_graph.nodes):
            self.del_node(other_node.id)

        for other_edge in listvalues(other_graph.edges):
            self.del_edge(None, other_edge.src, other_edge.dst)

        return self
예제 #39
0
 def make_f_cols(self):
     """ make header names for various fit statistics in fitDF
     (model parameters, goodness-of-fit measures, etc)
     """
     params = np.sort(list(self.inits))
     if not self.model.is_flat:
         cond_param_names = listvalues(self.model.clmap)
         params = np.hstack([params, np.squeeze(cond_param_names)]).tolist()
     fit_cols = ['nfev', 'nvary', 'df', 'chi', 'rchi', 'logp', 'AIC', 'BIC', 'cnvrg']
     self.f_cols = np.hstack([['idx'], params, fit_cols]).tolist()
예제 #40
0
파일: arvfile.py 프로젝트: wtsi-hgi/arvados
    def repack_small_blocks(self, force=False, sync=False, closed_file_size=0):
        """Packs small blocks together before uploading"""

        self._pending_write_size += closed_file_size

        # Check if there are enough small blocks for filling up one in full
        if not (force or (self._pending_write_size >= config.KEEP_BLOCK_SIZE)):
            return

        # Search blocks ready for getting packed together before being
        # committed to Keep.
        # A WRITABLE block always has an owner.
        # A WRITABLE block with its owner.closed() implies that its
        # size is <= KEEP_BLOCK_SIZE/2.
        try:
            small_blocks = [b for b in listvalues(self._bufferblocks)
                            if b.state() == _BufferBlock.WRITABLE and b.owner.closed()]
        except AttributeError:
            # Writable blocks without owner shouldn't exist.
            raise UnownedBlockError()

        if len(small_blocks) <= 1:
            # Not enough small blocks for repacking
            return

        for bb in small_blocks:
            bb.repack_writes()

        # Update the pending write size count with its true value, just in case
        # some small file was opened, written and closed several times.
        self._pending_write_size = sum([b.size() for b in small_blocks])

        if self._pending_write_size < config.KEEP_BLOCK_SIZE and not force:
            return

        new_bb = self._alloc_bufferblock()
        new_bb.owner = []
        files = []
        while len(small_blocks) > 0 and (new_bb.write_pointer + small_blocks[0].size()) <= config.KEEP_BLOCK_SIZE:
            bb = small_blocks.pop(0)
            new_bb.owner.append(bb.owner)
            self._pending_write_size -= bb.size()
            new_bb.append(bb.buffer_view[0:bb.write_pointer].tobytes())
            files.append((bb, new_bb.write_pointer - bb.size()))

        self.commit_bufferblock(new_bb, sync=sync)

        for bb, new_bb_segment_offset in files:
            newsegs = bb.owner.segments()
            for s in newsegs:
                if s.locator == bb.blockid:
                    s.locator = new_bb.blockid
                    s.segment_offset = new_bb_segment_offset+s.segment_offset
            bb.owner.set_segments(newsegs)
            self._delete_bufferblock(bb.blockid)
예제 #41
0
파일: CORE.py 프로젝트: dunovank/radd_kd
 def set_conditions(self, depends_on=None):
     data = self.data.copy()
     self.depends_on = depends_on
     self.conds = np.unique(listvalues(self.depends_on)).tolist()
     if 'flat' in self.conds:
         self.is_flat = True
         data['flat'] = 'flat'
         self.data = data.copy()
     else:
         self.is_flat = False
     self.nconds = len(self.conds)
     self.clmap = {c: np.sort(data[c].unique()) for c in self.conds}
     self.nlevels = np.sum([len(lvls) for lvls in listvalues(self.clmap)])
     self.groups = np.hstack([['idx'], self.conds]).tolist()
     self.__format_pcmap__()
     if hasattr(self, 'ssd'):
         self.__set_ssd_info__()
     if hasattr(self, 'fitparams'):
         self.generate_model_id()
         self.set_fitparams(nlevels=self.nlevels, clmap=self.clmap)
예제 #42
0
파일: put.py 프로젝트: tomclegg/arvados
 def _collection_size(self, collection):
     """
     Recursively get the total size of the collection
     """
     size = 0
     for item in listvalues(collection):
         if isinstance(item, arvados.collection.Collection) or isinstance(item, arvados.collection.Subcollection):
             size += self._collection_size(item)
         else:
             size += item.size()
     return size
예제 #43
0
파일: stream.py 프로젝트: tomclegg/arvados
 def manifest_text(self, strip=False):
     manifest_text = [self.name().replace(' ', '\\040')]
     if strip:
         for d in self._data_locators:
             m = re.match(r'^[0-9a-f]{32}\+\d+', d.locator)
             manifest_text.append(m.group(0))
     else:
         manifest_text.extend([d.locator for d in self._data_locators])
     manifest_text.extend([' '.join(["{}:{}:{}".format(seg.locator, seg.range_size, f.name.replace(' ', '\\040'))
                                     for seg in f.segments])
                           for f in listvalues(self._files)])
     return ' '.join(manifest_text) + '\n'
예제 #44
0
    def photos(self):
        if not self._photo_assets:
            child_assets = self.data.get('childAssetsBinaryFeed')
            if not child_assets:
                raise PyiCloudBinaryFeedParseError(
                    "Missing childAssetsBinaryFeed in photo album")

            self._photo_assets = listvalues(_parse_binary_feed(child_assets))

            for asset in self._photo_assets:
                asset.album = self

        return self._photo_assets
예제 #45
0
파일: colors.py 프로젝트: CoAxLab/radd
def get_cpals(name='all', aslist=False, random=False):
    rpal = lambda nc: sns.blend_palette(['#e88379', '#de143d'], n_colors=nc)
    bpal = lambda nc: sns.blend_palette(['#81aedb', '#3572C6'], n_colors=nc)
    gpal = lambda nc: sns.blend_palette(['#65b88f', '#27ae60'], n_colors=nc)
    ppal = lambda nc: sns.blend_palette(['#9B59B6', "#663399"], n_colors=nc)
    heat = lambda nc: sns.blend_palette(['#f39c12', '#e5344a'], n_colors=nc)
    cool = lambda nc: sns.blend_palette(["#4168B7", "#27ae60"], n_colors=nc)
    slate = lambda nc: sns.blend_palette(['#95A5A6', "#6C7A89"], n_colors=nc)
    wet = lambda nc: sns.blend_palette(['#34495e', "#99A4AE"], n_colors=nc)
    fire = lambda nc: sns.blend_palette(['#e5344a', "#f39c12"], n_colors=nc)
    bupu = lambda nc: sns.blend_palette(['#8E44AD', "#3498db"], n_colors=nc)
    color_dict = {'bpal': bpal, 'gpal': gpal, 'rpal': rpal, 'ppal': ppal, 'heat': heat, 'cool': cool, 'slate': slate, 'wet': wet, 'fire':fire, 'bupu': bupu}
    if random:
        pals = listvalues(color_dict)
        i = randint(0, len(pals), 1)
        return pals[i]
    if name=='all':
        if aslist:
            return listvalues(color_dict)
        return color_dict
    else:
        return color_dict[name]
예제 #46
0
파일: wsgi_server.py 프로젝트: BielosX/ycmd
 def Shutdown( self ):
   """Properly shutdown the server."""
   self.shutdown_requested = True
   # Shutdown waitress threads.
   self.task_dispatcher.shutdown()
   # Close asyncore channels.
   # We don't use itervalues here because _map is modified while looping
   # through it.
   # NOTE: _map is an attribute from the asyncore.dispatcher class, which is a
   # base class of TcpWSGIServer. This may change in future versions of
   # waitress so extra care should be taken when updating waitress.
   for channel in listvalues( self._map ):
     channel.close()
예제 #47
0
파일: xform.py 프로젝트: onaio/onadata
    def get_choice_label(self, field, choice_value, lang='English'):
        choices = [
            choice for choice in field.children if choice.name == choice_value
        ]
        if len(choices):
            choice = choices[0]
            label = choice.label

            if isinstance(label, dict):
                label = label.get(lang, listvalues(choice.label)[0])

            return label

        return choice_value
예제 #48
0
    def all(self, interval=None):
        """
        Get all subscriptions.

        Args:
            interval (int): Limit match to tickers with this interval.

        Returns:
            tickers (list): If `interval` was given, this is a list of
                tickers using that interval.
            tickerpool_layout (dict): If `interval` was *not* given,
                this is a dict {interval1: [ticker1, ticker2, ...],  ...}

        """
        if interval is None:
            # return dict of all, ordered by interval
            return dict((interval, listvalues(ticker.subscriptions))
                         for interval, ticker in self.ticker_pool.tickers.items())
        else:
            # get individual interval
            ticker = self.ticker_pool.tickers.get(interval, None)
            if ticker:
                return listvalues(ticker.subscriptions)
예제 #49
0
파일: colors.py 프로젝트: dunovank/radd_kd
def get_cpals(name='all', aslist=False):
    rpal = lambda nc: sns.blend_palette(['#e88379', '#c0392b'], n_colors=nc)
    bpal = lambda nc: sns.blend_palette(['#81aedb', '#3A539B'], n_colors=nc)
    gpal = lambda nc: sns.blend_palette(['#65b88f', '#27ae60'], n_colors=nc)
    ppal = lambda nc: sns.blend_palette(['#848bb6', "#9B59B6"], n_colors=nc)
    heat = lambda nc: sns.blend_palette(['#f39c12', '#c0392b'], n_colors=nc)
    cool = lambda nc: sns.blend_palette(["#4168B7", "#27ae60"], n_colors=nc)
    slate = lambda nc: sns.blend_palette(['#95A5A6', "#6C7A89"], n_colors=nc)
    color_dict = {'bpal': bpal, 'gpal': gpal, 'rpal': rpal, 'ppal': ppal, 'heat': heat, 'cool': cool, 'slate': slate}
    if name=='all':
        if aslist:
            return listvalues(color_dict)
        return color_dict
    else:
        return color_dict[name]
예제 #50
0
def logger(param_report, finfo={}, popt={}, fitparams={}, kind='xdpm', fit_on='average', array_names = ['y', 'wts', 'yhat']):
    """ logs information by opening and appending to an existing log file
    (named according to parameter dependencies) or creating a new log.
    """
    # functions for writing numpy arrays to strings (ex. "y = np.array([1,2,3])"")
    name_equals = lambda name, strvector: '{0} = array([{1}])'.format(name, strvector)
    stringify = lambda x: name_equals(x[0], ', '.join('{:f}'.format(n) for n in x[1]))
    # brief-ify fitparams reference
    fp = dict(deepcopy(fitparams))
    # list flattened y, wts, and yhat arrays
    arrays = [fp[k].flatten() for k in array_names]
    # write arays to strings for easy logging
    names_arrays = zip(array_names, arrays)
    y_str, wts_str, yhat_str = map(stringify, names_arrays)

    if fp['nlevels']==1:
        fit_on = ' '.join([fit_on, 'FLAT'])
        dep_id = "flat model (no conditional parameters)"
        fname = './' + kind + '_flat.txt'
    else:
        depends_on = fp['depends_on']
        fit_on = ' '.join([fit_on, 'FULL'])
        pkeys = '_'.join(list(depends_on))
        pconds = '_'.join(listvalues(depends_on))
        dep_id = "{0} depends on {1}".format(pconds, pkeys)
        fname = '_'.join(['./' + kind, pconds + '.txt'])

    with open(fname, 'a') as f:
        f.write('\n\n')
        f.write('==' * 30 + '\n\n')
        f.write(str(fit_on) + '\n')
        f.write(str(kind) + '\n')
        f.write(str(dep_id) + '\n\n')
        f.write(wts_str + '\n\n')
        f.write(yhat_str + '\n\n')
        f.write(y_str + '\n\n')
        f.write('--' * 30 + '\n')
        f.write("FIT REPORT")
        f.write('\n' + '--' * 30 + '\n')
        f.write(param_report)
        f.write('\n' + '--' * 30 + '\n')
        f.write('='.join(['popt', repr(popt)]) + '\n')
        f.write('AIC: %.8f' % finfo['AIC'] + '\n')
        f.write('BIC: %.8f' % finfo['BIC'] + '\n')
        f.write('chi: %.8f' % finfo['chi'] + '\n')
        f.write('rchi: %.8f' % finfo['rchi'] + '\n')
        f.write('converged: %s' % finfo['cnvrg'] + '\n\n')
        f.write('==' * 30 + '\n\n\n')
예제 #51
0
 def updateIntensities(self, fiContainer, iKey='intensity'):
     """ #TODO: docstring
     :param fiContainer:
     :param iKey: Attribute name of :class:`Fi` that contains the feature
         intensity or an abundance measure. Default "intensity"
     """
     for fgi in listvalues(self.container):
         intensities = list()
         specfileIds = {i: j for i, j in zip(fgi.specfiles, fgi.featureIds)}
         for specfile in self._matrixTemplate:
             if specfile in specfileIds:
                 fi = fiContainer.getItem(specfile, specfileIds[specfile])
                 intensities.append(getattr(fi, iKey))
             else:
                 intensities.append(None)
         fgi.intensities = intensities
예제 #52
0
    def get_sessions(self, include_unloggedin=False):
        """
        Returns the connected session objects.

        Args:
            include_unloggedin (bool, optional): Also list Sessions
                that have not yet authenticated.

        Returns:
            sessions (list): A list of `Session` objects.

        """
        if include_unloggedin:
            return listvalues(self)
        else:
            return [session for session in self.values() if session.logged_in]
예제 #53
0
파일: theta.py 프로젝트: dunovank/radd_kd
def scalarize_params(params, pc_map=None, is_flat=True):
    """ scalarize all parameters in params dict """
    exclude = []
    if isinstance(params, pd.Series):
        params = params.to_dict()
    if pc_map is not None and not is_flat:
        exclude = np.sort(list(pc_map))
        p_conds = np.sort(listvalues(pc_map)).squeeze()
        for pkc in exclude:
            params[pkc] = array([params[pc] for pc in p_conds])
    for pk in list(params):
        if pk in exclude:
            continue
        if hasattr(params[pk], '__iter__'):
            try:
                params[pk] = np.asscalar(params[pk])
            except ValueError:
                params[pk] = np.mean(params[pk])
    return params
예제 #54
0
def get_users(project, context, all_perms=True):
    """
    Return a list of users and organizations that have access to the project.
    """
    if all_perms:
        users = cache.get('{}{}'.format(PROJ_PERM_CACHE, project.pk))
        if users:
            return users

    data = {}
    for perm in project.projectuserobjectpermission_set.all():
        if perm.user_id not in data:
            user = perm.user

            if all_perms or user in [
                    context['request'].user, project.organization
            ]:
                data[perm.user_id] = {
                    'permissions': [],
                    'is_org': is_organization(user.profile),
                    'metadata': user.profile.metadata,
                    'first_name': user.first_name,
                    'last_name': user.last_name,
                    'user': user.username
                }
        if perm.user_id in data:
            data[perm.user_id]['permissions'].append(perm.permission.codename)

    for k in list(data):
        data[k]['permissions'].sort()
        data[k]['role'] = get_role(data[k]['permissions'], project)
        del data[k]['permissions']

    results = listvalues(data)

    if all_perms:
        cache.set('{}{}'.format(PROJ_PERM_CACHE, project.pk), results)

    return results
예제 #55
0
def updateFgiAnnotationFromFi(fgiContainer, fiContainer, largerBetter):
    """ #TODO: docstring

    :param fgiContainer:
    :param fiContainer:
    :param largerBetter:
    """
    for fgi in listvalues(fgiContainer.container):
        annotations = list()
        for specfile, fiId in zip(fgi.specfiles, fgi.featureIds):
            fi = fiContainer.getItem(specfile, fiId)
            if not fi.isAnnotated:
                continue
            annotations.append([fi.score, fi.peptide, fi.sequence])
        annotations.sort(reverse=largerBetter)
        if len(annotations) > 0:
            fgi.isAnnotated = True
            fgi.score = annotations[0][0]
            fgi.peptide = annotations[0][1]
            fgi.sequence = annotations[0][2]
        else:
            fgi.isAnnotated = False
예제 #56
0
def massTimeContinuityGroups(arrays, mKey, tKey, mLimit, tLimit):
    """ #TODO docstring

    :param arrays: a dictionary containing ``numpy.arrays``, must be sorted
        according to the "mKey" (mass key) value.
    :param mKey: "arrays" key that contains the mass ``numpy.array``
    :param tKey: "arrays" key that contains the time ``numpy.array``
    :param mLimit: maximal mass difference for separating continuity groups
    :param tLimit: maximal time difference for separating continuity groups

    :returns: a list containing array positions of continuous groups."""
    arrayPositions = numpy.array(range(listvalues(arrays)[0].size))

    finalGroupPositions = list()
    for start, end in continuityGrouping(arrays[mKey], mLimit):
        if start == end:
            finalGroupPositions.append(arrayPositions[start:end+1])
            continue

        #Perform time continuity grouping on the mass continuity groups
        preSelectionT = arrays[tKey][start:end+1]
        preSelectionM = arrays[mKey][start:end+1]
        preSelectionPositions = arrayPositions[start:end+1]
        _sort = numpy.argsort(preSelectionT)
        preGroups = continuityGrouping(preSelectionT[_sort], tLimit)

        #Perform a second round of mass continuity grouping
        finalGroupPrePos = list()
        for _start, _end in preGroups:
            preGroupPos = sorted(_sort[_start:_end+1])
            secGroups = continuityGrouping(preSelectionM[preGroupPos], mLimit)
            for fStart, fEnd in secGroups:
                finalGroupPrePos.append(preGroupPos[fStart:fEnd+1])

        #Add the final group positions
        for _pos in finalGroupPrePos:
            finalGroupPositions.append(preSelectionPositions[_pos])

    return finalGroupPositions
예제 #57
0
def proximityGrouping(matchArr, distInfo, distLimit, categoryKey):
    """ #TODO: docstring. Group according to the distance value provided by
        ``distInfo['eucDist']`` with the limitation that each ... category value
        can occur only once per group.

    :param matchArr: #TODO: docstring
    :param distInfo: #TODO: docstring, must be sorted, provide keys "posPairs"
        and "eucDist". As generated by :func:`calcDistMatchArr()`
    :param distLimit: #TODO: docstring
    :param categoryKey: #TODO: docstring

    :returns: #TODO: docstring
    """
    #Group fi according to their proximity
    matchArrSize = listvalues(matchArr)[0].size

    linkageGroups = {p: [p] for p in range(matchArrSize)}
    posToGroup = {p: p for p in range(matchArrSize)}
    groupCategories = {p: set([s]) for p, s in zip(range(matchArrSize),
                                                  matchArr[categoryKey]
                                                  )
                      }
    for (pos1, pos2), dist in zip(distInfo['posPairs'], distInfo['eucDist']):
        if dist > distLimit:
            break

        id1 = posToGroup[pos1]
        id2 = posToGroup[pos2]
        if groupCategories[id1].intersection(groupCategories[id2]):
            continue

        linkageGroups[id1].extend(linkageGroups[id2])
        groupCategories[id1].update(groupCategories[id2])
        for _pos in linkageGroups[id2]:
            posToGroup[_pos] = id1
        del linkageGroups[id2]
        del groupCategories[id2]

    return linkageGroups
예제 #58
0
파일: put.py 프로젝트: tomclegg/arvados
 def _datablocks_on_item(self, item):
     """
     Return a list of datablock locators, recursively navigating
     through subcollections
     """
     if isinstance(item, arvados.arvfile.ArvadosFile):
         if item.size() == 0:
             # Empty file locator
             return ["d41d8cd98f00b204e9800998ecf8427e+0"]
         else:
             locators = []
             for segment in item.segments():
                 loc = segment.locator
                 locators.append(loc)
             return locators
     elif isinstance(item, arvados.collection.Collection):
         l = [self._datablocks_on_item(x) for x in listvalues(item)]
         # Fast list flattener method taken from:
         # http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python
         return [loc for sublist in l for loc in sublist]
     else:
         return None
예제 #59
0
    def get_users(self, obj):
        xform_perms = []
        if obj:
            xform_perms = cache.get(
                '{}{}'.format(XFORM_PERMISSIONS_CACHE, obj.pk))
            if xform_perms:
                return xform_perms

            cache.set('{}{}'.format(XFORM_PERMISSIONS_CACHE, obj.pk),
                      xform_perms)
        data = {}
        for perm in obj.xformuserobjectpermission_set.all():
            if perm.user_id not in data:
                user = perm.user

                data[perm.user_id] = {
                    'permissions': [],
                    'is_org': is_organization(user.profile),
                    'metadata': user.profile.metadata,
                    'first_name': user.first_name,
                    'last_name': user.last_name,
                    'user': user.username
                }
            if perm.user_id in data:
                data[perm.user_id]['permissions'].append(
                    perm.permission.codename)

        for k in list(data):
            data[k]['permissions'].sort()
            data[k]['role'] = get_role(data[k]['permissions'], XForm)
            del (data[k]['permissions'])

        xform_perms = listvalues(data)

        cache.set('{}{}'.format(XFORM_PERMISSIONS_CACHE, obj.pk), xform_perms)

        return xform_perms
예제 #60
0
파일: reader.py 프로젝트: hollenstein/maspy
def applySiiRanking(siiContainer, specfile):
    """Iterates over all Sii entries of a specfile in siiContainer and sorts Sii
    elements of the same spectrum according to the score attribute specified in
    ``siiContainer.info[specfile]['rankAttr']``. Sorted Sii elements are then
    ranked  according to their sorted position, if multiple Sii have the same
    score, all get the same rank and the next entries rank is its list position.

    :param siiContainer: instance of :class:`maspy.core.SiiContainer`
    :param specfile: unambiguous identifier of a ms-run file. Is also used as
        a reference to other MasPy file containers.
    """
    attr = siiContainer.info[specfile]['rankAttr']
    reverse = siiContainer.info[specfile]['rankLargerBetter']
    for itemList in listvalues(siiContainer.container[specfile]):
        sortList = [(getattr(sii, attr), sii) for sii in itemList]
        itemList = [sii for score, sii in sorted(sortList, reverse=reverse)]

        #Rank Sii according to their position
        lastValue = None
        for itemPosition, item in enumerate(itemList, 1):
            if getattr(item, attr) != lastValue:
                rank = itemPosition
            item.rank = rank
            lastValue = getattr(item, attr)