def parseFile(filename, patIdx, medIdx, diagIdx, labelIdx, delim="|"):
    """ 
    Parse a csv file using the delimiter and the appropriate columns of interest.
    The resultant sparse tensor has patient on the 0th mode, diagnosis on the 1st mode,
    and medications on the 2nd mode.
    
    Tensor info contains the axis information for each mode.
    """
    print "Creating the tensor for " + filename

    patList = OrderedDict(sorted({}.items(), key=lambda t:t[1]))
    medList = OrderedDict(sorted({}.items(), key=lambda t:t[1]))
    diagList = OrderedDict(sorted({}.items(), key=lambda t:t[1]))
    patClass = OrderedDict(sorted({}.items(), key=lambda t:t[1]))

    ## storing tensor class as empty array
    tensorIdx = np.array([[0, 0, 0]])
    datfile = open(filename)

    for i, line in enumerate(datfile):
        line = line.rstrip('\r\n')
        parse = line.split(delim)
        
        # insert them into the list if necessary
        if not patList.has_key(parse[patIdx]):
            patList[parse[patIdx]] = len(patList)
            patClass[parse[patIdx]] = parse[labelIdx]
        if not diagList.has_key(parse[diagIdx]):
            diagList[parse[diagIdx]] = len(diagList)
        if not medList.has_key(parse[medIdx]):
            medList[parse[medIdx]] = len(medList)
        
        patId = patList.get(parse[patIdx])
        diagId = diagList.get(parse[diagIdx])
        medId = medList.get(parse[medIdx])
    
        # we know the first one is already mapped
        if i > 1:
            tensorIdx = np.append(tensorIdx, [[patId, diagId, medId]], axis=0)

    tensorVal = np.ones((tensorIdx.shape[0], 1))
    # initialize size
    siz = np.array([len(patList), len(diagList), len(medList)])
    X = sptensor.sptensor(tensorIdx, tensorVal, siz)
    
    tensorInfo = {}
    tensorInfo['axis'] = [patList.keys(), diagList.keys(), medList.keys()]
    tensorInfo['pat'] = patList.keys()
    tensorInfo['med'] = medList.keys()
    tensorInfo['diag'] = diagList.keys()
    tensorInfo['class'] = patClass.values()
      
    return X, tensorInfo
Example #2
0
class LRUCache(OrderedDict):
    """
    #实现一个LRU算法的dict,用于记录order_id
    #避免storm平台重发log记录,造成redis数据偏大!
    """
    def __init__(self, capacity):
        """
        @param capacity 字典长度
        """
        self.capacity = capacity
        self.cache = OrderedDict()
    
    def get(self,key):
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
        else:
            value = None
        return value
    
    def set(self,key,value):
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
        else:
            if len(self.cache) == self.capacity:
                 #pop出第一个item
                self.cache.popitem(last = False)
                self.cache[key] = value
            else:
                self.cache[key] = value
Example #3
0
    def getParentTransforms(self, variableName, topLayerName=None):
        layerNames = self.layers.keys()
        if topLayerName:
            layerIndex = layerNames.index(topLayerName)
        else:
            layerIndex = len(self.layers) + 1
        transforms = OrderedDict()
        transforms[variableName] = CaffeTransform([1., 1.], [1., 1.], [1., 1.])
        for layerName in reversed(layerNames[0:layerIndex]):
            layer = self.layers[layerName]
            layerTfs = layer.getTransforms(self)
            for i, inputName in enumerate(layer.inputs):
                tfs = []
                if transforms.has_key(inputName):
                    tfs.append(transforms[inputName])
                for j, outputName in enumerate(layer.outputs):
                    if layerTfs[i][j] is None: continue
                    if transforms.has_key(outputName):
                        composed = composeTransforms(layerTfs[i][j], transforms[outputName])
                        tfs.append(composed)

                if len(tfs) > 0:
                    # should resolve conflicts, not simply pick the first tf
                    transforms[inputName] = tfs[0]
        return transforms
Example #4
0
class LRUCache(object):
    def __init__(self, capacity):
        """
        :type capacity: int
        """
        self.cache = OrderedDict()
        self.capacity = capacity

    def get(self, key):
        """
        :rtype: int
        """
        if self.cache.has_key(key):
            val = self.cache[key]
            del self.cache[key]
            self.cache[key] = val
            return val
        return -1

    def set(self, key, value):
        """
        :type key: int
        :type value: int
        :rtype: nothing
        """
        if self.cache.has_key(key):
            del self.cache[key]
            self.cache[key] = value
        else:
            if len(self.cache) >= self.capacity:
                self.cache.popitem(last=False)
                self.cache[key] = value
            else:
                self.cache[key] = value
Example #5
0
class LRUCache(OrderedDict):
    '''不能存储可变类型对象,不能并发访问set()''' 

    def __init__(self,capacity):
        self.capacity = capacity
        self.cache = OrderedDict()


    def get(self,key):
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
        else:
            value = None

        return value


    def set(self,key,value):
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
        else:
            if len(self.cache) == self.capacity:
                self.cache.popitem(last = False)    #pop出第一个item
                self.cache[key] = value
            else:
                self.cache[key] = value
Example #6
0
class LRUCache(object):
    def __init__(self, capacity):
        self.cap = capacity
        self.cache = OrderedDict()

    def get(self, key):
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
            return value

        else:
            return -1

    def set(self, key, value):
        if self.cache.has_key(key):
            self.cache.pop(key)
            self.cache[key] = value

        else:
            if len(self.cache) == self.cap:
                self.cache.popitem(False)
                self.cache[key] = value
            else:
                self.cache[key] = value
Example #7
0
class LRUCache(OrderedDict):
    def __init__(self, capacity):
        self.capacity = capacity
        self.cache = OrderedDict()

    def get(self, key):
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
        else:
            value = None

        return value

    def set(self, key, value):
        global count_s
        global count_f
        global count_m
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
            count_s += 1
        else:
            count_f += 1
            if len(self.cache) == self.capacity:
                self.cache.popitem(last=False)
                self.cache[key] = value
                count_m += 1
            else:
                self.cache[key] = value
Example #8
0
class StatusConsole:
    """ A static console for displaying parameters that change over time. """
    def __init__(self,x,y,width):
        self.parameters = OrderedDict()
        self.paramCount = 0
        self.x = x
        self.y = y
        self.title = ''
        self.width = width
        self.label = pyglet.text.Label(self.title,
                                       font_name='Consolas',
                                       font_size=10,
                                       color = (255, 255, 255, 255),
                                       x=self.x,
                                       y=self.y,
                                       width=self.width,
                                       multiline=True)
    def addParameter(self, parameter):
        if(not self.parameters.has_key(parameter)):
            self.parameters[parameter] = None
            self.modified = True
    def setParameter(self, parameter, value):
        if(self.parameters.has_key(parameter)):
            self.parameters[parameter] = value
            self.modified = True
    def setTitle(self, string=''):
        self.title = string
    def updateLabel(self):
        s = ''.join([str(a) + " : " + str(b) + '\n' for a,b in self.parameters.items()])
        s = self.title + '\n' + s
        self.label.text = s
    def draw(self):
        if(self.modified):
            self.updateLabel()
        self.label.draw()
Example #9
0
class FIFOCache:
    def __init__(self, max_cache_size=10):
        self.cache = OrderedDict()
        self.max_size = max_cache_size

    def set(self, key, value):
        '''
        添加元素,
        1.判断是否已满,如果已满则先删除最先插入的元素
        2.判断key是否存在,如果存在则直接返回其值
        3.不存在则直接添加
        :param key:
        :param value:
        :return:
        '''
        if self.max_size <= len(self.cache):
            #缓存已满,删除最先插入的元素
            self.cache.popitem(last=False)
            self.cache[key] = value
        elif self.cache.has_key(key):
            self.cache[key] = value
        else:
            self.cache[key] = value

    def get(self, key):
        '''
        获取元素,存在则先删除,再添加并返回对应的value;否则返回None
        :param key:
        :return:
        '''
        if self.cache.has_key(key):
            value = self.cache[key]
            return value
        else:
            return None
Example #10
0
def nodeLabelDict(nodes, trace):
  # Inverse look up dict for node -> symbol from trace.globalEnv
  inv_env_dict = OrderedDict()
  for (sym, env_node) in trace.globalEnv.frame.iteritems():
    assert isinstance(env_node, Node)
    assert not inv_env_dict.has_key(env_node)
    inv_env_dict[env_node] = sym

  label_dict = OrderedDict()
  for node in nodes:
    if inv_env_dict.has_key(node):
      label = inv_env_dict[node]
    elif isOutputNode(node):
      label = 'O' # 'Output' #: ' + str(node.value)
    elif isRequestNode(node):
      label = 'R' # 'Request' #: ' + str(node.value)
    elif isLookupNode(node):
      label = 'L' # 'Lookup'
    elif isConstantNode(node):
      label = 'C' # 'Constant'
    else:
      label = '' # str(node.value)
    label_dict[node] = label

  return label_dict
Example #11
0
    def getParentTransforms(self, variableName, topLayerName=None):
        layerNames = self.layers.keys()
        if topLayerName:
            layerIndex = layerNames.index(topLayerName)
        else:
            layerIndex = len(self.layers) + 1
        transforms = OrderedDict()
        transforms[variableName] = CaffeTransform([1., 1.], [1., 1.], [1., 1.])
        for layerName in reversed(layerNames[0:layerIndex]):
            layer = self.layers[layerName]
            layerTfs = layer.getTransforms(self)
            for i, inputName in enumerate(layer.inputs):
                tfs = []
                if transforms.has_key(inputName):
                    tfs.append(transforms[inputName])
                for j, outputName in enumerate(layer.outputs):
                    if layerTfs[i][j] is None: continue
                    if transforms.has_key(outputName):
                        composed = composeTransforms(layerTfs[i][j],
                                                     transforms[outputName])
                        tfs.append(composed)

                if len(tfs) > 0:
                    # should resolve conflicts, not simply pick the first tf
                    transforms[inputName] = tfs[0]
        return transforms
Example #12
0
 def get_samples(self, manager, cache, resources):
    meminfo = OrderedDict()
    if os.path.isfile("/proc/meminfo"):
        with open("/proc/meminfo") as f:
            for line in f:
                temp = line.split(":")
                meminfo[temp[0]] = temp[1].strip()
        if (meminfo.has_key('MemTotal') and meminfo.has_key('MemFree')):
            memtotal = int(meminfo['MemTotal'].strip().split(' ')[0])
            memfree = int(meminfo['MemFree'].strip().split(' ')[0])
            memutil = (memtotal - memfree)/(1.0*memtotal)
            memutil = float("%.2f"%memutil)*100  
            
        hostinfo = os.popen("uname -a").read()
        host = hostinfo.split(' ')[1]         
        
        yield util.make_sample_from_host(
            host,
            name='host_mem_util',
            type=sample.TYPE_DELTA,
            unit='%',
            volume=memutil,
            additional_metadata=None,
        )
    else:
        LOG.error(" the /proc/meminfo is not exist")
def climbingLeaderboard(scores, alice):
    ranks = OrderedDict()

    rank = 1
    for i in range(len(scores)):
        if not ranks.has_key(scores[i]):
            ranks[scores[i]] = rank
            rank += 1

    serach_from_index = len(scores) - 1
    # Loop over Alice's scores
    highest_rank_acheived = False
    for i in range(len(alice)):
        if highest_rank_acheived:
            print 1
            continue

        if ranks.has_key(alice[i]):
            print ranks[alice[i]]
            continue

        next_largest_score, serach_from_index = find_rank_index(
            scores, alice[i], serach_from_index)
        #print "i =", i, " alice_score =", alice[i], " next_higher_score =", next_largest_score
        if next_largest_score is None:
            # If there is no greater score than this, then alice has reached RANK 1
            highest_rank_acheived = True
            print 1
        else:
            print ranks[next_largest_score] + 1
class LRUCache(object):

    def __init__(self, capacity):
        """
        :type capacity: int
        """
        self.cache = OrderedDict()
        self.capacity = capacity

    def get(self, key):
        """
        :rtype: int
        """
        if self.cache.has_key(key):
            val = self.cache[key]
            del self.cache[key]
            self.cache[key] = val
            return val
        return -1

    def set(self, key, value):
        """
        :type key: int
        :type value: int
        :rtype: nothing
        """
        if self.cache.has_key(key):
            del self.cache[key]
            self.cache[key] = value
        else:
            if len(self.cache) >= self.capacity:
                self.cache.popitem(last=False)
                self.cache[key] = value
            else:
                self.cache[key] = value
Example #15
0
File: lru.py Project: P79N6A/self
class LRUCache(OrderedDict):
    '''不能存储可变类型对象,不能并发访问set()'''
    def __init__(self, capacity):
        self.capacity = capacity
        self.cache = OrderedDict()

    def get(self, key):
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
        else:
            value = None

        return value

    def set(self, key, value):
        if self.cache.has_key(key):
            value = self.cache.pop(key)
            self.cache[key] = value
        else:
            if len(self.cache) == self.capacity:
                self.cache.popitem(last=False)  #pop出第一个item
                self.cache[key] = value
            else:
                self.cache[key] = value
Example #16
0
class LRUCache(object):

    def __init__(self, capacity):
        """
        :type capacity: int
        """
        self.capacity = capacity
        self.items = OrderedDict()
        

    def get(self, key):
        """
        :type key: int
        :rtype: int
        """
        if not self.items.has_key(key):
            return -1
        value = self.items.pop(key)
        self.items[key] = value
        return value
        

    def put(self, key, value):
        """
        :type key: int
        :type value: int
        :rtype: void
        """
        if self.items.has_key(key):
            self.items.pop(key)
            self.items[key] = value
            return
        if len(self.items) >= self.capacity:
            self.items.popitem(False)
        self.items[key] = value
Example #17
0
class LRUCache(object):
    def __init__(self, capacity):
        """
        :type capacity: int
        """
        self._len = capacity
        self._dict = OrderedDict([])

    def get(self, key):
        """
        :type key: int
        :rtype: int
        """
        if not self._dict.has_key(key):
            return -1
        self._dict[key] = self._dict.pop(key)
        return self._dict[key]

    def put(self, key, value):
        """
        :type key: int
        :type value: int
        :rtype: void
        """
        if self._dict.has_key(key):
            self._dict.pop(key)
        if len(self._dict) < self._len:
            self._dict.popitem(last=False)
        self._dict[key] = value
def score_with_cocoeval(samples_valid, samples_test, engine):
    scorer = COCOScorer()
    if samples_valid:
        gts_valid = OrderedDict()
        for ID in engine.val_data_ids:
            vidID, capID = ID.split('|')
            words = engine.get_cap_tokens(vidID, int(capID), mode='val')
            caption = ' '.join(words)
            if gts_valid.has_key(vidID):
                gts_valid[vidID].append({'image_id': vidID, 'caption': caption, 'cap_id': capID})
            else:
                gts_valid[vidID] = [{'image_id': vidID, 'caption': caption, 'cap_id': capID}]
        valid_score = scorer.score(gts_valid, samples_valid, gts_valid.keys())
    else:
        valid_score = None
    if samples_test:
        gts_test = OrderedDict()
        for ID in engine.test_data_ids:
            vidID, capID = ID.split('|')
            words = engine.get_cap_tokens(vidID, int(capID), mode='test')
            caption = ' '.join(words)
            if gts_test.has_key(vidID):
                gts_test[vidID].append({'image_id': vidID, 'caption': caption, 'cap_id': capID})
            else:
                gts_test[vidID] = [{'image_id': vidID, 'caption': caption, 'cap_id': capID}]
        test_score = scorer.score(gts_test, samples_test, gts_test.keys())
    else:
        test_score = None
    return valid_score, test_score
def compare_betweenPair(num):
    dbconn = MySQLdb.connect('10.141.221.73', 'root', 'root', 'fdroid')
    dbcursor = dbconn.cursor()
    # sql = 'select block_id,block_code from fdroid.cc_block where detection_id=1 and detection_tp = "20150101--20150131"'
    sql = 'SELECT distinct cc_clonepair.block1_id \
    from fdroid.cc_clonepair where \
    clonepair_id<%s and cc_clonepair.detection_tp="20150101--20150131"' % num

    blocksql = "SELECT block_id, block_code FROM fdroid.cc_block \
        where detection_id=1 and detection_tp = '20150101--20150131' \
        and block_id = %s"
    dbcursor.execute(sql)
    SourceCodedistanceDict = OrderedDict()
    CodeTokendistanceDict = OrderedDict()
    tem = dbcursor.fetchall()
    
    for i in range(len(tem)-1):
        print i
        blockid1 = tem[i][0]
        blockid2 = tem[i+1][0]
        sql1 = blocksql % blockid1
        sql2 = blocksql % blockid2

        dbcursor.execute(sql1)
        code1 = dbcursor.fetchone()[1].replace(r"\n","").replace(r"\t","").replace(r"b'","").replace(r"',","")
        hash1 = getHash.get_nilsimsa(code1)
        with open('lexer/tem.code', 'w') as f:
            f.write(code1)
        os.popen('java -jar codeLexer.jar lexer/tem.code')
        token1 = lexerparser.parse()

        dbcursor.execute(sql2)
        code2 = dbcursor.fetchone()[1].replace(r"\n","").replace(r"\t","").replace(r"b'","").replace(r"',","")
        hash2 = getHash.get_nilsimsa(code2)
        with open('lexer/tem.code', 'w') as f:
            f.write(code2)
        os.popen('java -jar codeLexer.jar lexer/tem.code')
        token2 = lexerparser.parse()

        dist = getHash.compare_hash(hash1,hash2)
        if SourceCodedistanceDict.has_key(dist):
            SourceCodedistanceDict[dist] += 1
        else:
            SourceCodedistanceDict[dist] = 1
        
        dist = getHash.compare_hash(getHash.get_nilsimsa(token1),getHash.get_nilsimsa(token2))
        
        # print code1
        # print code2
        # print token1,token2
        
        if CodeTokendistanceDict.has_key(dist):
            CodeTokendistanceDict[dist] += 1
        else:
            CodeTokendistanceDict[dist] = 1

    dbcursor.close()
    dbconn.close()
    return SourceCodedistanceDict, CodeTokendistanceDict
Example #20
0
class Recording(object):
    """Recording Class is used for managing the recording video process."""

    #----------------------------------------------------------------------#
    #                      Devices Class Constructor                       #
    #----------------------------------------------------------------------#
    def __init__(self):
        """Devices Class Constructor."""
        # Creates a dictionary for managering multiple cameras.
        self.__videos = OrderedDict()
        self.__fourcc = cv2.VideoWriter_fourcc(*"WMV2")

    #----------------------------------------------------------------------#
    #                         Public Class Methods                         #
    #----------------------------------------------------------------------#
    def AddVideo(self, index, fps=30.0, size=(640, 480)):
        """Add a new video in the Python dictionary."""
        print index
        key = str(index)
        if self.__videos.has_key(key):
            return False

        self.__videos[key] = cv2.VideoWriter(index, self.__fourcc, fps, size)

        return True

    def DelVideo(self, index):
        """Remove a video from the Python dictionary."""
        key = str(index)
        if not self.__videos.has_key(key):
            return False

        self.__videos[key].release()
        del self.__videos[key]

        return True

    def Write(self, images):
        """Writes the next video frame."""
        if type(images).__module__ == np.__name__:
            images = [images]

        for key, image in zip(self.__videos, images):
            self.__videos[key].write(image)

    def Release(self):
        """Closes all videos."""
        for key in self.__videos:
            self.__videos[key].release()
        self.__videos.clear()

    #----------------------------------------------------------------------#
    #                            Class Methods                             #
    #----------------------------------------------------------------------#
    def __repr__(self):
        """Get a object representation in a string format."""
        return "Framework.RecordingVideos.Recording object."
Example #21
0
class ATimeCache(object):
    """Cache class (dictionary) with a limited size, where only the
    'max_entries' most recently added or accessed entries are stored."""

    def __init__(self, max_entries):
        self._cache = OrderedDict()
        self._max_entries = max_entries

    def _shrink(self):
        while len(self._cache) > self._max_entries:
            self._cache.popitem(last=False)
        
    def get_max_entries(self):
        return self._max_entries

    def set_max_entries(self, value):
        self._max_entries = value
        self._shrink()
        
    max_entries = property(
        get_max_entries, set_max_entries, None, "Set or get the cache size")

    def has_key(self, key):
        return self._cache.has_key(key)
    
    def __eq__(self, other):
        try:
            return self._cache.__eq__(other._cache)
        except:
            return False
    
    def __len__(self):
        return self._cache.__len__()

    def __getitem__(self, key):
        value = self._cache.pop(key)
        self._cache[key] = value
        return value

    def __setitem__(self, key, value):
        if self._cache.has_key(key):
            self._cache.pop(key)
        self._cache.__setitem__(key, value)
        self._shrink()

    def __contains__(self, key):
        return self.has_key(key)

    def __str__(self):
        return self.cache.__str__()

    def __iter__(self):
        # Iterate directly on the underlying dict, rather than on this
        # class, in order to change the order of cached items (as
        # opposed to []/__getitem__, which will reinsert an item on top
        # of the stack whenever it is looked up.
        return iter(self._cache)
Example #22
0
class ATimeCache(object):
    """Cache class (dictionary) with a limited size, where only the
    'max_entries' most recently added or accessed entries are stored."""
    def __init__(self, max_entries):
        self._cache = OrderedDict()
        self._max_entries = max_entries

    def _shrink(self):
        while len(self._cache) > self._max_entries:
            self._cache.popitem(last=False)

    def get_max_entries(self):
        return self._max_entries

    def set_max_entries(self, value):
        self._max_entries = value
        self._shrink()

    max_entries = property(get_max_entries, set_max_entries, None,
                           "Set or get the cache size")

    def has_key(self, key):
        return self._cache.has_key(key)

    def __eq__(self, other):
        try:
            return self._cache.__eq__(other._cache)
        except:
            return False

    def __len__(self):
        return self._cache.__len__()

    def __getitem__(self, key):
        value = self._cache.pop(key)
        self._cache[key] = value
        return value

    def __setitem__(self, key, value):
        if self._cache.has_key(key):
            self._cache.pop(key)
        self._cache.__setitem__(key, value)
        self._shrink()

    def __contains__(self, key):
        return self.has_key(key)

    def __str__(self):
        return self.cache.__str__()

    def __iter__(self):
        # Iterate directly on the underlying dict, rather than on this
        # class, in order to change the order of cached items (as
        # opposed to []/__getitem__, which will reinsert an item on top
        # of the stack whenever it is looked up.
        return iter(self._cache)
Example #23
0
class Recording(object):
    """Recording Class is used for managing the recording video process."""

    #----------------------------------------------------------------------#
    #                      Devices Class Constructor                       #
    #----------------------------------------------------------------------#
    def __init__(self):
        """Devices Class Constructor."""
        # Creates a dictionary for managering multiple cameras.
        self.__videos = OrderedDict()
        self.__fourcc = cv2.VideoWriter_fourcc(*"WMV2")

    #----------------------------------------------------------------------#
    #                         Public Class Methods                         #
    #----------------------------------------------------------------------#
    def AddVideo(self, index, fps=30.0, size=(640, 480)):
        """Add a new video in the Python dictionary."""
        key = str(index)
        if self.__videos.has_key(key):
            return False

        self.__videos[key] = cv2.VideoWriter(index, self.__fourcc, fps, size)

        return True

    def DelVideo(self, index):
        """Remove a video from the Python dictionary."""
        key = str(index)
        if not self.__videos.has_key(key):
            return False

        self.__videos[key].release()
        del self.__videos[key]

        return True

    def Write(self, images):
        """Writes the next video frame."""
        if type(images).__module__ == np.__name__:
            images = [images]

        for key, image in zip(self.__videos, images):
            self.__videos[key].write(image)

    def Release(self):
        """Closes all videos."""
        for key in self.__videos:
            self.__videos[key].release()
        self.__videos.clear()

    #----------------------------------------------------------------------#
    #                            Class Methods                             #
    #----------------------------------------------------------------------#
    def __repr__(self):
        """Get a object representation in a string format."""
        return "Framework.RecordingVideos.Recording object."
Example #24
0
def parseFile(filename, patIdx, medIdx, diagIdx, labelIdx, delim="|"):
    """ 
    Parse a csv file using the delimiter and the appropriate columns of interest.
    The resultant sparse tensor has patient on the 0th mode, diagnosis on the 1st mode,
    and medications on the 2nd mode.
    
    Tensor info contains the axis information for each mode.
    """
    print "Creating the tensor for " + filename

    patList = OrderedDict(sorted({}.items(), key=lambda t: t[1]))
    medList = OrderedDict(sorted({}.items(), key=lambda t: t[1]))
    diagList = OrderedDict(sorted({}.items(), key=lambda t: t[1]))
    patClass = OrderedDict(sorted({}.items(), key=lambda t: t[1]))

    ## storing tensor class as empty array
    tensorIdx = np.array([[0, 0, 0]])
    datfile = open(filename)

    for i, line in enumerate(datfile):
        line = line.rstrip('\r\n')
        parse = line.split(delim)

        # insert them into the list if necessary
        if not patList.has_key(parse[patIdx]):
            patList[parse[patIdx]] = len(patList)
            patClass[parse[patIdx]] = parse[labelIdx]
        if not diagList.has_key(parse[diagIdx]):
            diagList[parse[diagIdx]] = len(diagList)
        if not medList.has_key(parse[medIdx]):
            medList[parse[medIdx]] = len(medList)

        patId = patList.get(parse[patIdx])
        diagId = diagList.get(parse[diagIdx])
        medId = medList.get(parse[medIdx])

        # we know the first one is already mapped
        if i > 1:
            tensorIdx = np.append(tensorIdx, [[patId, diagId, medId]], axis=0)

    tensorVal = np.ones((tensorIdx.shape[0], 1))
    # initialize size
    siz = np.array([len(patList), len(diagList), len(medList)])
    X = sptensor.sptensor(tensorIdx, tensorVal, siz)

    tensorInfo = {}
    tensorInfo['axis'] = [patList.keys(), diagList.keys(), medList.keys()]
    tensorInfo['pat'] = patList.keys()
    tensorInfo['med'] = medList.keys()
    tensorInfo['diag'] = diagList.keys()
    tensorInfo['class'] = patClass.values()

    return X, tensorInfo
Example #25
0
    def get_net_param_info(self):
        param_set = OrderedDict()
        total_count = 0
        for layer in self.layers:
            if (not isinstance(layer,la.InputLayer)):
                if (not param_set.has_key(layer.params.weights)):
                    param_set[layer.params.weights] = True
                    total_count += layer.params.weights.count
                if (not param_set.has_key(layer.params.biases)):
                    param_set[layer.params.biases] = True
                    total_count += layer.params.biases.count

        return param_set,total_count
Example #26
0
File: hello.py Project: fajoy/py
def main():
    parser = argparse.ArgumentParser(description='this is hello example.')

    parser.add_argument('name', metavar='NAME',type=str, nargs='?',default=None,
                   help='display name.')

    parser.add_argument('msg', metavar='MESSAGE',type=str, nargs='*',
                   help='display a line of text')

    parser.add_argument('-v','--verbose',action='store_true',default=None,
                   help='enable verbose.')

    parser.add_argument('-d','--debug',action='store_true',default=None,
                   help='enable debug.')

    args = parser.parse_args()

    conf=config.defaults()

    if config.has_section("fajoy"):
        conf=OrderedDict(config.items("fajoy"))


    log_fmt="%(asctime)s - [%(threadName)s:%(name)s.%(funcName)s((%(lineno)d))] - %(levelname)s - %(message)s"

    if conf.has_key("log_format"):
        log_fmt=conf["log_format"]

    if args.debug:
        logging.basicConfig(stream=sys.stderr , level=logging.DEBUG, format=log_fmt)

    if args.verbose:
        logging.basicConfig(stream=sys.stderr , level=logging.INFO, format=log_fmt)
    logging.basicConfig(stream=sys.stderr , level=logging.WARNING, format=log_fmt)

    if conf.has_key("log"):
        enable_log(conf["log"])

    if log.isEnabledFor(logging.INFO):
        log.info(conf)

    if log.isEnabledFor(logging.DEBUG):
        log.debug(conf)

    name = args.name or "%s%s" % (conf.get("name"),"(%s)" % conf.get("mail") if conf.has_key("mail") else "")
    msg = " ".join(args.msg) or "hello"

    e = Exception("test")
    log.exception(e)
    print "%s : %s" %(name,msg)
Example #27
0
class ParameterSet(object):
    def __init__(self, definition, parameter_tuple):
        if len(definition) != len(parameter_tuple):
            raise ExperimentError('ParameterSet: Parameter set definition and '
                                  'provided parameters do not agree')
        self.definition = definition
        self.ps = parameter_tuple
        self.names = OrderedDict()
        for i, n in enumerate(definition.values()):
            self.names[n] = i
        self.shortnames = OrderedDict()
        for i, n in enumerate(definition.keys()):
            self.shortnames[n] = i

    def has(self, n):
        return self.shortnames.has_key(n) or self.names.has_key(n)

    def get(self, n):
        if self.shortnames.has_key(n):
            return self.ps[self.shortnames[n]]
        elif self.names.has_key(n):
            return self.ps[self.names[n]]
        else:
            raise ExperimentError(
                'Parameter "{}" does not exist in this parameter set'.format(
                    n))

    def __getattr__(self, n):
        shortnames = object.__getattribute__(self, 'shortnames')
        ps = object.__getattribute__(self, 'ps')
        if not shortnames.has_key(n):
            raise AttributeError()
        return ps[shortnames[n]]

    def __getitem__(self, n):
        if isinstance(n, int):
            return self.ps[n]
        if not self.names.has_key(n):
            raise IndexError()
        return self.ps[self.names[n]]

    def __repr__(self):
        s = '('
        s += ','.join([
            '{}={}'.format(n, v)
            for n, v in zip(self.shortnames.keys(), self.ps)
        ])
        s += ')'
        return s
Example #28
0
def get_notes(sno):
    db = connect.connect()
    cursor = db.cursor()
    cursor.execute('''SELECT line from notes where serialno = %s
    order by lineno''', sno)
    results = cursor.fetchall()
    cursor.close()

    notes_dict = OrderedDict()
    ndate, op = "", ""

    #a line is like ('\x01REC\x0c\x08m\x0c\x08m\n\x08',)
    for line, in results:
        ntype, note, operator, date2 = notes.decipher_noteline(line)
        if date2 != "":
            ndate = date2
        if operator != "":
            op = operator

        key = (ndate, op)
        if notes_dict.has_key(key):
            notes_dict[key].append((ntype, note))
        else:
            notes_dict[key] = [(ntype, note)]

    return notes_dict
Example #29
0
def longest_amicable_chain_min_member(n):  # ), queue_size=600):
    '''Return the smallest member of the longest amicable chain with no element exceeding n-1. Assumed
    to be unique, with chain length <= queue_size.'''
    d, x, c = DCalculator(n).divisor_sum(), 1, np.zeros((n + 1,), dtype=np.int)
    c.fill(-1)  # Initially, set all nodes to unvisited. 0=visited. >0: cycle length at smallest element of the cycle
    while x < n:  # x points to next unvisited element
        #print 'x', x
        y, p = x, 0
        q = OrderedDict([(y, p)])
        #print '\t', y, p
        while y < n:
            # Advance along cycle
            y, p = d[y], p + 1
            appeared_before = q.has_key(y)
            #print '\t', y, p
            if y > n or c[y] > 0 or appeared_before: break
            q[y] = p
        #print '\tlast', y, p, q
        r = q.keys()
        c[r] = 0
        if appeared_before:
            i = q[y]
            z = r[i + np.argmin(r[i:])]
            c[z] = p - i 
            print x, '\t', 'c[%d] = %d, i=%d' % (z, c[z], i)
        while c[x] >= 0 and x < n: x += 1  # Advance to next unvisited element
#     np.set_printoptions(threshold=np.nan)
#     print c
    return np.argmax(c) 
Example #30
0
def from_files(filenames):
    """Return an iterator that provides a sequence of Histograms for
the histograms defined in filenames.
    """
    all_histograms = OrderedDict()
    for filename in filenames:
        parser = FILENAME_PARSERS[os.path.basename(filename)]
        histograms = parser(filename)

        # OrderedDicts are important, because then the iteration order over
        # the parsed histograms is stable, which makes the insertion into
        # all_histograms stable, which makes ordering in generated files
        # stable, which makes builds more deterministic.
        if not isinstance(histograms, OrderedDict):
            raise BaseException, "histogram parser didn't provide an OrderedDict"

        for (name, definition) in histograms.iteritems():
            if all_histograms.has_key(name):
                raise DefinitionException, "duplicate histogram name %s" % name
            all_histograms[name] = definition

    # We require that all USE_COUNTER2_* histograms be defined in a contiguous
    # block.
    use_counter_indices = filter(lambda x: x[1].startswith("USE_COUNTER2_"),
                                 enumerate(all_histograms.iterkeys()));
    if use_counter_indices:
        lower_bound = use_counter_indices[0][0]
        upper_bound = use_counter_indices[-1][0]
        n_counters = upper_bound - lower_bound + 1
        if n_counters != len(use_counter_indices):
            raise DefinitionException, "use counter histograms must be defined in a contiguous block"

    for (name, definition) in all_histograms.iteritems():
        yield Histogram(name, definition)
Example #31
0
    def _check_md5(self, projectDir):
        # type: (str) -> list, list
        """
        :param projectDir:
        :return:
        """
        readMe = ""
        title = ""
        files = OrderedDict()
        not_match_files = []
        with open(os.path.join(projectDir, "readme.txt"), "rb") as f:
            i = 0
            for line in f:
                line = line.strip()
                if i == 0:
                    title = line
                if i > 2:
                    l = line.split("\t")
                    if len(l) != 2:
                        print line
                        continue
                    files[l[0]] = l[1]
                i += 1
        for key in files.keys():
            p = os.path.join(projectDir, key)
            if files.has_key(key) == False:
                print "key is not in files:", files, key
                continue
            md5Str = files[key]

            if md5Str != self.md5_checksum(p):
                not_match_files.append(key)

        return not_match_files, files
Example #32
0
 def tags_sorted (self):
   tags = reversed(sorted(self.tags.iteritems(), key=operator.itemgetter(1)))
   grouped = OrderedDict()
   totals = {}
   
   for tag in tags:
     temp = tag[0].split(':')
     if len(temp) > 1:
       g = temp[0]
       t = temp[1]
       
     else:
       g = 'Ungrouped'
       t = temp[0]
       
     if grouped.has_key(g):
       grouped[g].append([t, None, tag[1]])
       totals[g] += tag[1]
       
     else:
       grouped[g] = [[t, None, tag[1]]]
       totals[g] = tag[1]
       
   for key in grouped.keys():
     if key != 'Ungrouped':
       total = 0
       for t in grouped[key]:
         total += t[2]
         
       for t in grouped[key]:
         t[1] = round((float(t[2]) / total) * 100, 1)
         
   return grouped.items()
Example #33
0
def treasures():
  results = sqlrelay.execute_results("""
SELECT
   GROUP_ID
 , ITEM_ID
 , FREQUENCY
FROM
   ARPG_BT_TREASURE
""")

  treasure_list = OrderedDict()
  for r in results:
    group_id = int(r[0])
    item_id = int(r[1])
    frequency = int(r[2])

    if treasure_list.has_key(group_id):
      treasure = treasure_list[group_id]
    else:
      treasure = OrderedDict()
      treasure_list[group_id] = treasure

    treasure[item_id] = {
      'frequency': frequency,
    }

  # 토탈 확률계산
  for treasure in treasure_list.values():
    total = 0
    for item in treasure.values():
      total += item['frequency']
    treasure['total'] = total

  return treasure_list
 def flat(self, pattern='ptc/*_ptc.*.fits', time_stamp=None,
          verbose=True):
     if time_stamp is None:
         time_stamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
     exptime = lambda x : pyfits.open(x)[0].header['EXPTIME']
     infiles = self._infiles(pattern)
     # Group files by exposure time and therefore into pairs, presumably.
     groups = OrderedDict()
     for infile in infiles:
         my_exptime = exptime(infile)
         if not groups.has_key(my_exptime):
             groups[my_exptime] = []
         groups[my_exptime].append(infile)
     # Translate first two files in each exptime group as flat1 and flat2.
     for key, infiles in groups.items():
         if key == 0 or len(infiles) < 2:
             # Skip zero exposure frames and groups with only one frame.
             continue
         seqno = '%09.4f_flat1' % key
         self.translate(infiles[0], 'flat', 'flat', seqno,
                        time_stamp=time_stamp, verbose=verbose)
         seqno = '%09.4f_flat2' % key
         self.translate(infiles[1], 'flat', 'flat', seqno,
                        time_stamp=time_stamp, verbose=verbose)
     return time_stamp
Example #35
0
def parse(dat):
    f = open(dat, 'r')
    first = True
    labels = []
    lines = f.readlines()
    data = OrderedDict()
    min = OrderedDict()
    max = OrderedDict()
    scale_by = None
    cur_name = None
    it = iter(xrange(len(lines)))
    for i in it:
        if first:
            cur_name = lines[i]
            labels.append(lines[i])
            first = False
        elif lines[i] == '\n':
            for d in data.values():
                if not d.has_key(cur_name):
                    d[cur_name] = 0
            for d in min.values():
                if not d.has_key(cur_name):
                    d[cur_name] = 0
            for d in max.values():
                if not d.has_key(cur_name):
                    d[cur_name] = 0

            first = True
            scale_by = None
        else:
            print lines[i], lines[i + 1]
            if not scale_by:
                scale_by = int(cur_name[:2]) if SCALE_CORE else 1


#float(lines[i+1])
            if not data.has_key(lines[i]):
                data[lines[i]] = OrderedDict()
                min[lines[i]] = OrderedDict()
                max[lines[i]] = OrderedDict()
                # initialize previously missed values to 0
                for l in labels:
                    data[lines[i]][l] = 0
                    min[lines[i]][l] = 0
                    max[lines[i]][l] = 0
            raw_dat = eval(lines[i + 1])  # very safe lol
            throughputs, neworders = raw_dat
            print "XXXXX"
            print throughputs
            print median(throughputs)
            print numpy.amin(throughputs)
            print numpy.amax(throughputs)
            data[lines[i]][cur_name] = median(throughputs) / scale_by
            min[lines[i]][cur_name] = numpy.amin(throughputs) / scale_by
            max[lines[i]][cur_name] = numpy.amax(throughputs) / scale_by
            next(it)

    print data

    return data, min, max, labels
    def loadSettings(self):
        returnData = OD()
        cur_dict = None
        
        try:
            inifile = open(self.currentFile, 'r')
        except:
            raise Exception("Error opening file at %s" % self.currentFile)
        
        #Here's the business end of the function
        for line in inifile:
            #Ignore any commented out lines
            if line[0] == "#":
                continue
            
            #New group
            if line[0] == "[":
                grpname = re.match("\[(.*?)\]", line).group(1)
                cur_dict = grpname
                if returnData.has_key("grpname") is False:
                    returnData[grpname] = OD()
                
            #Options
            elif re.match("(.*?)(?:\s+)?=(?:\s+)?(.*)", line) is not None:
                if cur_dict is not None:
                    option_line = re.match("(.*?)(?:\s+)?=(?:\s+)?(.*)", line)
                    returnData[cur_dict][str(option_line.group(1)).lower()] = str(option_line.group(2))
                else:
                    pass
        
        #Gotta close it before we quit
        inifile.close()

        return returnData
Example #37
0
def delete_duplicates(input_file, my_file):
    content = ""
    last_first = re.compile("\w+,\w+")
    last_first_middle = re.compile("\w+,\w+ \w+")
    fid = open(input_file)
    fid_wrt = open(my_file, "w+")
    lines = fid.readlines()
    dict = OrderedDict()

    for line in lines:
        if len(re.split(":", line)) != 1:
            name, SSN = re.split(":", line)
            SSN = SSN.strip("\n")
            if dict.has_key(SSN):
                dict[SSN].append(name)
            else:
                dict.update({SSN: []})
                dict[SSN].append(name)

    for SSN in dict.iterkeys():
        name_array = dict[SSN]
        split_name = []
        name_array.sort(key=lambda item: (-len(item), item))
        if re.match(last_first_middle, name_array[0]) or re.match(last_first, name_array[0]):
            split_name = re.findall(r"[\w']+", name_array[0])
            if re.match(last_first_middle, name_array[0]):
                normal_form = split_name[1] + " " + split_name[2] + " " + split_name[0]
            elif re.match(last_first, name_array[0]):
                normal_form = split_name[1] + " " + split_name[0]
        else:
            normal_form = name_array[0]
        content += normal_form + ":" + SSN + "\n"

    fid_wrt.write(content.rstrip("\n"))
    fid_wrt.close()
Example #38
0
class Cost():

    def __init__(self, cost, params, constants=None):
        self.cost = cost
        self.grads = OrderedDict()
        self.computed_cost = False

        self.params = OrderedDict()
        for p in params:
            self.params[p] = True

        self.constants = OrderedDict()
        constants = [] if constants is None else constants
        for c in constants:
            self.constants[c] = True

    def compute_gradients(self, lr, multipliers=None):
        multipliers = OrderedDict() if multipliers is None else multipliers
        grads =  T.grad(self.cost, self.params.keys(), 
                        consider_constant=self.constants.keys(),
                        disconnected_inputs='ignore')
        for param, gparam in zip(self.params.keys(), grads):
            param_lr = multipliers.get(param.name, 1.0) * lr
            self.grads[param] = param_lr * gparam
        self.computed_cost = True

    def update_gradient(self, param, new_grad):
        assert self.computed_cost
        assert self.grads.has_key(param)
        self.grads[param] = new_grad
Example #39
0
def UpdateNLHTIV(nlhtiv):
    if (nlhtiv.has_key("Trigger_Condition")):
        tc_list = []

        tc = nlhtiv["Trigger_Condition"]
        #del nlhtiv["Trigger_Condition"]

        if (tc == "TriggerList"):
            if (nlhtiv.has_key("Trigger_Condition_List")):
                tc_list = nlhtiv["Trigger_Condition_List"]
            else:
                raise Exception("Expected Trigger_Condition_List")
        elif (tc == "TriggerString"):
            if (nlhtiv.has_key("Trigger_Condition_String")):
                tc_list.append(nlhtiv["Trigger_Condition_String"])
                del nlhtiv["Trigger_Condition_String"]
            else:
                raise Exception("Expected Trigger_Condition_String")
        else:
            tc_list.append(tc)

        nlhtiv = OrderedDict([("Trigger_Condition_List",
                               tc_list) if k == "Trigger_Condition" else (k, v)
                              for k, v in nlhtiv.items()])
    elif (not nlhtiv.has_key("Trigger_Condition_List")):
        raise Exception("Expected Trigger_Condition")

    return nlhtiv
def parse_solver(solverfile):
    solver = OrderedDict()
    lines = open(solverfile).readlines()
    for line in lines:
        line = line.strip()
        if line[0] == '#':
            continue
        if line.find('#') >= 0:
            line = line.split('#')[0]
        items = line.split(':')
        key = items[0].strip()
        value = items[1].strip().strip('"')

        try:
            _has_key = solver.has_key(key)
        except:
            _has_key = key in solver

        if not _has_key:
            solver[key] = value
        elif not type(solver[key]) == list:
            solver[key] = [solver[key], value]
        else:
            solver[key].append(value)
    return solver
Example #41
0
class Cost():

    def __init__(self, cost, params, constants=None):
        self.cost = cost
        self.grads = OrderedDict()
        self.computed_cost = False

        self.params = OrderedDict()
        for p in params:
            self.params[p] = True

        self.constants = OrderedDict()
        constants = [] if constants is None else constants
        for c in constants:
            self.constants[c] = True

    def compute_gradients(self, lr, multipliers=None):
        multipliers = OrderedDict() if multipliers is None else multipliers
        grads =  T.grad(self.cost, self.params.keys(), 
                        consider_constant=self.constants.keys(),
                        disconnected_inputs='ignore')
        for param, gparam in zip(self.params.keys(), grads):
            param_lr = multipliers.get(param.name, 1.0) * lr
            self.grads[param] = param_lr * gparam
        self.computed_cost = True

    def update_gradient(self, param, new_grad):
        assert self.computed_cost
        assert self.grads.has_key(param)
        self.grads[param] = new_grad
    def train(self):
        config = self.config

        print config.train_batch_size % len(self.unlabeled_loader)
        self.param_init()

        self.iter_cnt = 0
        iter, min_dev_incorrect = 0, 1e6
        monitor = OrderedDict()

        batch_per_epoch = int((len(self.unlabeled_loader) + config.train_batch_size - 1) / config.train_batch_size)
        while True:

            if iter % batch_per_epoch == 0:
                epoch = iter / batch_per_epoch
                if epoch >= config.max_epochs:
                    break
                epoch_ratio = float(epoch) / float(config.max_epochs)
                # use another outer max to prevent any float computation precision problem
                self.dis_optimizer.param_groups[0]['lr'] = config.dis_lr * max(0., min(3. * (1. - epoch_ratio), 1.))
                self.gen_optimizer.param_groups[0]['lr'] = config.gen_lr * max(0., min(3. * (1. - epoch_ratio), 1.))

            iter_vals = self._train(iter=iter)

            for k, v in iter_vals.items():
                if not monitor.has_key(k):
                    monitor[k] = 0.
                monitor[k] += v

            if iter % config.vis_period == 0:
                self.visualize()

            if iter % config.eval_period == 0:
                train_loss, train_incorrect = self.eval(self.labeled_loader)
                dev_loss, dev_incorrect = self.eval(self.dev_loader)

                min_dev_incorrect = min(min_dev_incorrect, dev_incorrect)
                disp_str = '#{}\ttrain: {:.4f}, {} | dev: {:.4f}, {} | best: {}'.format(
                        iter, train_loss, train_incorrect, dev_loss, dev_incorrect, min_dev_incorrect)
                for k, v in monitor.items():
                    disp_str += ' | {}: {:.4f}'.format(k, v / config.eval_period)

                disp_str += ' | lr: dis {:.5f}, gen {:.5f}'.format(
                    self.dis_optimizer.param_groups[0]['lr'], self.gen_optimizer.param_groups[0]['lr'])
                monitor = OrderedDict()

                print disp_str

                noise = Variable(torch.Tensor(400, self.config.noise_size).uniform_().cuda(), volatile=True)
                images = self.gen(noise)
                images = (images - 0.5) / 0.5
                images = images.view(-1, 1, 28, 28)
                logits = self.pixelcnn(images)
                log_probs = - pixelcnn_loss.discretized_mix_logistic_loss_c1(images.permute(0, 2, 3, 1), logits.permute(0, 2, 3, 1), sum_all=False).data.cpu()
                gen_ploss_stats = log_probs.min(), log_probs.max(), log_probs.mean(), log_probs.var()
                print 'gen stats', gen_ploss_stats
                print 'unl stats', self.unl_ploss_stats

            iter += 1
            self.iter_cnt += 1
Example #43
0
 def parse_block(fp):
     block = OrderedDict()
     line = fp.readline().strip()
     while line != '}':
         ltype = line_type(line)
         if ltype == 0: # key: value
             #print line
             line = line.split('#')[0]
             key, value = line.split(':')
             key = key.strip()
             value = value.strip().strip('"')
             if block.has_key(key):
                 if type(block[key]) == list:
                     block[key].append(value)
                 else:
                     block[key] = [block[key], value]
             else:
                 block[key] = value
         elif ltype == 1: # blockname {
             key = line.split('{')[0].strip()
             sub_block = parse_block(fp)
             block[key] = sub_block
         line = fp.readline().strip()
         line = line.split('#')[0]
     return block
def rougeS(refSent , sents):
    skip2 = list()
    newDict = OrderedDict()
    refLen = len(refSent)
    noSents = len(sents)
    noComb = (refLen*(refLen - 1)/2)

    
    for i in range(refLen):
        for j in range(i+1 , refLen):
            newDict.update({refSent[i] + '_' + refSent[j] : True})
            
    
    for sent in sents:
        sentLen = len(sent)
        count = 0
        for p in range(sentLen):
            for q in range(p+1 , sentLen):
                if newDict.has_key(sent[p] + '_' + sent[q]):
                    count = count + 1
        skip2.append((float(count)/noComb,sent))
                    
                    
    
    return skip2
class Device_Pool(object):
    '''Holds a number of telemedicine devices to be assigned to patients.
    Maintains a list of Device objects as well as a dictionary of which device is assigned to which '''
    def __init__(self, number_of_devices):
        self.devices = [
            Device(self) for x in xrange(number_of_devices)
        ]  #Instantiate device objects, with a default UUID4 id from the Device's constructor.
        self.device_assignments = OrderedDict(
        )  #Maintain an ordered dictionary of which patients are assigned which device.

    def get_device_count(self):
        return len(
            self.devices)  #Return the length of the device pool as an integer.

    def assign_device(self, patient):
        '''Assign the next available device in the devices attribute, or raise an exception.'''
        if self.get_device_count(
        ) > 0:  #Still have some devices left to assign.
            self.device_assignments[
                patient.get_patient_id()] = self.devices.pop(
                )  #Assign a device from the list of device objects.
        else:
            raise Exception("No devices left to assign!")
            #This is where code to add a queue for patients who are recruited but don't have a device yet would go.

    def return_device(self, patient, device):
        self.devices.append(device)
        if self.device_assignments.has_key(patient.get_patient_id()):
            del self.device_assignments[patient.get_patient_id(
            )]  #Will raise an KeyError if the patient ID isn't in the device assignment.
        else:
            pass  #Or catch the KeyError.
Example #46
0
    def format(self, data, keys=None, group_by=None, domain=None):
        rows_dict = OrderedDict()
        tmp_data = OrderedDict()
        sorted_data = []
        value_chains = get_domain_configuration(domain).by_type_hierarchy
        for key, row in data.items():
            to_list = list(key)

            def find_name(list, deep):
                for element in list:
                    if deep == len(key)-3 and key[deep+1] == element.val:
                        return element.text
                    elif key[deep+1] == element.val:
                        return find_name(element.next, deep+1)

            name = find_name(value_chains, 0)
            to_list[2] = name
            tmp_data.update({tuple(to_list): row})
        if tmp_data:
            sorted_data = sorted(tmp_data.items(), key=lambda x: (x[0][0], x[0][2]))

        for row in sorted_data:
            formatted_row = self._format.format_row(row[1])
            if not rows_dict.has_key(formatted_row[0]):
                rows_dict[formatted_row[0]] = []
            rows_dict[formatted_row[0]].append(formatted_row[1])

        min_length = min([len(item[1]) for item in rows_dict.items()])

        for key, row in rows_dict.items():
            total_column = self.calculate_total_column(row)
            res = [key, total_column]
            res.extend(row[0:min_length])
            yield res
Example #47
0
 def get(self):
     user = users.get_current_user()
     if user:
         
         #current user name
         current_username = user.nickname()
         
         #report query
         reportQry = DailyReportModel.query().order(-DailyReportModel.reportDay, -DailyReportModel.writeTime)
         reportFetch = reportQry.fetch()
         
         reportDict = OrderedDict()
         for item in reportFetch:
             reportDay = item.getReportDay()
             if reportDict.has_key(reportDay):
                 reportDict[reportDay].append(item)
             else:
                 reportDict[reportDay] = [item]
                 
         # sort
         OrderedDict(sorted(reportDict.items(), key=lambda t: t[0], reverse=True))
         
         # jinja2 param
         template_values = {
             'current_username' : current_username,
             'report_dict' : reportDict,
             'date_today' : datetime.datetime.now().strftime("%Y-%m-%d")
         }
         template = jinja_env.get_template('MainPage.html')
         self.response.out.write(template.render(template_values))
     else:
         self.redirect(users.create_login_url(self.request.uri))
Example #48
0
def get_idmap(idfile, key_column, value_column, header=False):
    """
    get_dict_from_file
    ------------------
    Use when you want to get the mappings between id's, e.g.,from MGI to Entrez Gene ID

    Args:
        idfile: Gene id file (tab-separated values)
        key_column: column index of keys
        value_column: column index of values
        header: True if datafile has a header row
    Returns:
        gene_id_map (OrderedDict)

    """
    with open(idfile) as fh:
        if header is True:
            fh.next()
        mydict = OrderedDict()
        for curline in fh:
            curline = curline.rstrip()
            items = curline.split("\t")
            key = items[key_column]
            value = items[value_column]
            if mydict.has_key(key):
                if not mydict[key] == value:
                    LOG.debug(
                        "Conflicting items: key=%s, registered value=%s <=> new value=%s"
                        % (key, mydict[key], value))
            else:
                mydict[key] = value
    return mydict
Example #49
0
def extract_student_options(options):
    options.update({
        'poll': Polls.query.order_by(Polls.id.desc()).limit(1).first()
    })

    options.update({
        'history': History.query.order_by(History.timestamp.desc()).limit(10).all()
    })
    options.update({
        'categories': ResourceCategory.query.all()
    })

    if 'testing' in session:
        testing = session.pop('testing') if session['testing'].has_key('finished') else session['testing']

        options.update({
            'testing': {
                'tests': Test.query.filter(Test.id.in_(testing['tests'])).all(),
                'index': testing['index'],
                'answers': testing['answers'],
                'correct': testing['correct'],
                'finished': testing['finished'] if testing.has_key('finished') else False
            }
        })

    uploaded_files = File.query.join(Account).join(Student).filter(Student.id == current_user.student.id).order_by(
        File.timestamp.asc()).all()

    # if url in ['index', 'files']:
    options.update({
        'uploadedFiles': uploaded_files
    })

    if current_user.student.stats.all():
        options.update({
            'stats': {
                'last': current_user.student.stats.order_by(Stat.timestamp.desc()).first(),
                'best': current_user.student.stats.order_by(Stat.answers.desc()).first()
            }
        })

    # STATISTIC
    group_files_stat = OrderedDict()

    if current_user.student.group:
        for student in current_user.student.group.students.order_by(Student.id.desc()):
            student_files = File.query.join(Account).join(Student).filter(
                Student.id == student.id).order_by(File.timestamp.desc()).all()

            files_stat = OrderedDict()

            for studentFile in student_files:
                ts = studentFile.timestamp.strftime('%Y.%m.%d')
                files_stat.update({
                    ts: files_stat[ts] + 1 if files_stat.has_key(ts) else 1
                })

            group_files_stat.update({student: files_stat})

    options.update({'group_files_stat': group_files_stat})
Example #50
0
def client_devices_new(client_id):
    detail = get_client_detail(client_id)
    data = detail['data']

    device_types_keys = sorted(data["device_types"])
    device_types_list = OrderedDict()
    for key in device_types_keys:
        device_types_list[key] = data["device_types"][key]
    products = {}
    products_per_type = OrderedDict()
    if "products" in data:
        products_list = data["products"]
        products_list = sorted(products_list, key=itemgetter("name"))
        for prod in products_list:
            product_label = data['device_types'][prod["type"]]['name']
            products[prod["name"]] = prod["type"]
            #if not products_per_type.has_key(prod["type"]):
            if not products_per_type.has_key(product_label):
                products_per_type[product_label] = OrderedDict()
            products_per_type[product_label][prod['name']] = prod["type"]
    # TODO : include products icons
        
 
    return render_template('client_device_new.html',
            device_types = device_types_list,
            products = products,
            products_per_type = products_per_type,
            clientid = client_id,
            mactive="clients",
            active = 'devices',
            client_detail = detail,
            )
Example #51
0
def from_files(filenames):
    """Return an iterator that provides a sequence of Histograms for
the histograms defined in filenames.
    """
    all_histograms = OrderedDict()
    for filename in filenames:
        parser = FILENAME_PARSERS[os.path.basename(filename)]
        histograms = parser(filename)

        # OrderedDicts are important, because then the iteration order over
        # the parsed histograms is stable, which makes the insertion into
        # all_histograms stable, which makes ordering in generated files
        # stable, which makes builds more deterministic.
        if not isinstance(histograms, OrderedDict):
            raise BaseException, "histogram parser didn't provide an OrderedDict"

        for (name, definition) in histograms.iteritems():
            if all_histograms.has_key(name):
                raise DefinitionException, "duplicate histogram name %s" % name
            all_histograms[name] = definition

    # We require that all USE_COUNTER_* histograms be defined in a contiguous
    # block.
    use_counter_indices = filter(lambda x: x[1].startswith("USE_COUNTER_"),
                                 enumerate(all_histograms.iterkeys()))
    if use_counter_indices:
        lower_bound = use_counter_indices[0][0]
        upper_bound = use_counter_indices[-1][0]
        n_counters = upper_bound - lower_bound + 1
        if n_counters != len(use_counter_indices):
            raise DefinitionException, "use counter histograms must be defined in a contiguous block"

    for (name, definition) in all_histograms.iteritems():
        yield Histogram(name, definition)
Example #52
0
def clients():
    cli = MQSyncReq(app.zmq_context)
    msg = MQMessage()
    msg.set_action('client.list.get')
    res = cli.request('manager', msg.get(), timeout=10)
    if res is not None:
        client_list = res.get_data()
    else:
        client_list = {}

    client_list_per_host_per_type = OrderedDict()
    for client in client_list:
        cli_type = client_list[client]['type']
        cli_host = client_list[client]['host']

        if not client_list_per_host_per_type.has_key(cli_host):
            client_list_per_host_per_type[cli_host] = {}

        if not client_list_per_host_per_type[cli_host].has_key(cli_type):
            client_list_per_host_per_type[cli_host][cli_type] = {}

        client_list_per_host_per_type[cli_host][cli_type][client] = client_list[client]

    return render_template('clients.html',
        mactive="clients",
        overview_state="collapse",
        clients=client_list,
        client_list_per_host_per_type=client_list_per_host_per_type
        )
Example #53
0
class DebugBox(Window):

    def __init__(self,*args):
        super(DebugBox,self).__init__(*args)
        self.attr_list = OrderedDict()
        self.next_free = 1


    def register(self,text,width,win):
        '''
        store details of watch in an ordered dict of tuples
        dict1{"attr"+win} = attr, win_ref, width, index
        index = order on the screen (y coord)
        '''
        if not self.attr_list.has_key(text + str(win)):
            self.attr_list.__setitem__(text + str(win),(text,win,width,self.next_free))
            self.next_free += 1
        else:
            pass
            #raise Exception # nothing should try to register twice

    def run(self):

        while 1:
            for key in self.attr_list.keys():
                text,win,width,index = self.attr_list.__getitem__(key)
            
                self.addstr_no_cursor(text,index,1,width,win)
            time.sleep(1)
            self.refresh()
Example #54
0
    def get_representation(self, infile):
        histograms = self.get_all_histograms(infile)

        #create dictionary with [category][sample][systematic] -> bin data
        nd = OrderedDict()
        cats = set([])
        samps = set([])
        systs = set([])
        for k in sorted(histograms.keys()):
            samp, cat, syst = self.get_key(k)
            if not self.filter_key((samp, cat, syst)):
                continue
            if not nd.has_key(cat):
                nd[cat] = OrderedDict()
            if not nd[cat].has_key(samp):
                nd[cat][samp] = OrderedDict()
            if not nd[cat][samp].has_key(syst):
                nd[cat][samp][syst] = OrderedDict()
            nd[cat][samp][syst] = get_bins(histograms[k])
            samps.add(samp)
            systs.add(syst)
            cats.add(cat)

        self.all_categories = list(cats)
        self.all_samples = list(samps)
        self.all_systematics = list(systs)

        return nd
Example #55
0
 def parse_block(fp):
     block = OrderedDict()
     line = fp.readline().strip()
     while line != '}':
         ltype = line_type(line)
         if ltype == 0: # key: value
             #print line
             line = line.split('#')[0]
             key, value = line.split(':')
             key = key.strip()
             value = value.strip().strip('"')
             if block.has_key(key):
                 if type(block[key]) == list:
                     block[key].append(value)
                 else:
                     block[key] = [block[key], value]
             else:
                 block[key] = value
         elif ltype == 1: # blockname {
             key = line.split('{')[0].strip()
             sub_block = parse_block(fp)
             block[key] = sub_block
         line = fp.readline().strip()
         line = line.split('#')[0]
     return block
Example #56
0
 def getTeacherField(self, item):
     fields = ['localLang'     , 'certification', 'study'     , 'qualified_teacher', \
               'ethnic_teacher', 'education'    , 'experience', 'teaching_years'   , 'remarks']
     fieldsName = {
         'localLang': _(u'Local Language'),
         'certification': _(u'Ethnic language certification'),
         'study': _(u'Revitalization study'),
         'qualified_teacher': _(u'Teaching class (Qualified teacher)'),
         'ethnic_teacher': _(u'Teaching class (Ethnic teacher)'),
         'education': _(u'Education'),
         'experience': _(u'work experience'),
         'teaching_years': _(u'Teaching years'),
         'remarks': _(u'Remarks')
     }
     fieldsDict = OrderedDict()
     for field in fields:
         field_value = getattr(item, field, '')
         if field_value:
             fieldsDict.update({fieldsName[field]: field_value})
     if fieldsDict.has_key(fieldsName['localLang']):
         localLangValue = '\r\n'.join([
             lang.split(',')[1]
             for lang in fieldsDict[fieldsName['localLang']].split('/')
         ])
         fieldsDict[fieldsName['localLang']] = localLangValue
     return fieldsDict
Example #57
0
    def format(self, data, keys=None, group_by=None, domain=None):
        rows_dict = OrderedDict()
        tmp_data = OrderedDict()
        sorted_data = []
        value_chains = get_domain_configuration(domain).by_type_hierarchy
        for key, row in data.items():
            to_list = list(key)

            def find_name(list, deep):
                for element in list:
                    if deep == len(key) - 3 and key[deep + 1] == element.val:
                        return element.text
                    elif key[deep + 1] == element.val:
                        return find_name(element.next, deep + 1)

            name = find_name(value_chains, 0)
            to_list[2] = name
            tmp_data.update({tuple(to_list): row})
        if tmp_data:
            sorted_data = sorted(tmp_data.items(),
                                 key=lambda x: (x[0][0], x[0][2]))

        for row in sorted_data:
            formatted_row = self._format.format_row(row[1])
            if not rows_dict.has_key(formatted_row[0]):
                rows_dict[formatted_row[0]] = []
            rows_dict[formatted_row[0]].append(formatted_row[1])

        for key, row in rows_dict.items():
            total_column = self.calculate_total_column(row)
            res = [key, total_column]
            res.extend(row)
            yield res
Example #58
0
def get_readgroup_and_seq_dict_from_bam(bam_files, allow_collision=False):
    print "Gather seq dict and read groups from %s bam files" % len(bam_files)
    all_read_groups = {}
    all_seq_dict = OrderedDict()
    for bam_file in bam_files:
        command = "samtools view -H %s | egrep '@RG|@SQ' " % bam_file
        stdout, process = utils_commands.get_output_stream_from_command(command)
        for line in stdout:
            if line.startswith('@RG'):
                read_group_dict = {}
                for element in line.strip().split('\t'):
                    if element != '@RG':
                        key, value = element.split(':')
                        read_group_dict[key] = value
                if read_group_dict.has_key('ID') and read_group_dict.get('ID') not in all_read_groups:
                    all_read_groups[read_group_dict.get('ID')] = read_group_dict
            if line.startswith('@SQ'):
                seq_dict = {}
                for element in line.strip().split('\t'):
                    if element != '@SQ':
                        key, value = element.split(':')
                        if key == 'LN': value = int(value)
                        seq_dict[key] = value
                if seq_dict.has_key('SN'):
                    name = seq_dict.get('SN')
                    if all_seq_dict.has_key(name) and not allow_collision:
                        raise StandardError(
                            "Identical sequence dictionary name %s in %s and previous bam entry and collision not allowed" % (
                            name, bam_file))
                    all_seq_dict[name] = seq_dict

    return all_read_groups.values(), all_seq_dict.values()