コード例 #1
0
def get_shapely_from_cp(leaf_boundaries, control_point):
    """
    This function will return the outline of MLCs within jaws
    :param leaf_boundaries: an ordered list of leaf boundaries
    :param control_point: a ControlPoint class object
    :return: a shapely object of the complete MLC aperture as one shape (including MLC overlap)
    """
    lb = leaf_boundaries
    mlc = control_point.mlc
    jaws = get_jaws(control_point)
    x_min, x_max = jaws['x_min'], jaws['x_max']
    y_min, y_max = jaws['y_min'], jaws['y_max']

    jaw_points = [(x_min, y_min), (x_min, y_max), (x_max, y_max), (x_max, y_min)]
    jaw_shapely = Polygon(jaw_points)

    if control_point.leaf_type == 'mlcx':
        a = flatten([[(m, lb[i]), (m, lb[i+1])] for i, m in enumerate(mlc[0])])
        b = flatten([[(m, lb[i]), (m, lb[i+1])] for i, m in enumerate(mlc[1])])
    elif control_point.leaf_type == 'mlcy':
        a = flatten([[(lb[i], m), (lb[i + 1], m)] for i, m in enumerate(mlc[0])])
        b = flatten([[(lb[i], m), (lb[i + 1], m)] for i, m in enumerate(mlc[1])])
    else:
        return jaw_shapely

    mlc_points = a + b[::-1]  # concatenate a and reverse(b)
    mlc_aperture = Polygon(mlc_points).buffer(0)

    # This function is very slow, since jaws are rectangular, perhaps there's a more efficient method?
    aperture = mlc_aperture.intersection(jaw_shapely)

    return aperture
コード例 #2
0
    def similar(self):
        top_cpt = self.cpt.top_similar()
        top_icd = self.cpt.top_similar()

        combi = utilities.get_init_combi()
        combi = utilities.flatten(top_cpt, combi)
        combi = utilities.flatten(top_icd, combi)

        return utilities.beautify_and_sort(combi)
コード例 #3
0
def compute_mc_associations(frg_inf,
                            pos_crd,
                            bin_bnd,
                            n_perm=1000,
                            pos_ids=None,
                            verbose=True):
    from utilities import hasOL, flatten

    # initialization
    n_bin = bin_bnd.shape[0]

    # re-index circles
    frg_inf[:, 0] = np.unique(frg_inf[:, 0], return_inverse=True)[1] + 1
    n_read = np.max(frg_inf[:, 0])

    # convert fragments to bin-coverage
    cfb_lst = [list() for i in range(n_read + 1)]
    n_frg = frg_inf.shape[0]
    for fi in range(n_frg):
        bin_idx = np.where(hasOL(frg_inf[fi, 2:4], bin_bnd))[0]
        cfb_lst[frg_inf[fi, 0]].append(list(bin_idx))

    # select positive/negative circles
    if pos_ids is not None:
        assert len(pos_crd) == 0
        is_pos = np.isin(frg_inf[:, 0], pos_ids)
    else:
        is_pos = np.where(hasOL(pos_crd, frg_inf[:, 1:4]))[0]
    frg_pos = frg_inf[np.isin(frg_inf[:, 0], frg_inf[is_pos, 0]), :]
    frg_neg = frg_inf[~np.isin(frg_inf[:, 0], frg_inf[is_pos, 0]), :]
    cfb_pos = [cfb_lst[i] for i in np.unique(frg_pos[:, 0])]
    cfb_neg = [cfb_lst[i] for i in np.unique(frg_neg[:, 0])]
    n_pos = len(cfb_pos)
    n_neg = len(cfb_neg)

    # make positive profile
    prf_pos = np.zeros(n_bin)
    for pi in range(n_pos):
        bin_lst = flatten(cfb_pos[pi])
        prf_pos[bin_lst] += 1

    # make background profile from negative set
    prf_rnd = np.zeros([n_perm, n_bin])
    neg_lst = range(n_neg)
    for ei in np.arange(n_perm):
        if verbose and (((ei + 1) % 200) == 0):
            print '\t{:d} randomized profiles are computed.'.format(ei + 1)
        np.random.shuffle(neg_lst)
        for rd_idx in neg_lst[:n_pos]:
            f2b_rnd = cfb_neg[rd_idx]
            np.random.shuffle(f2b_rnd)
            prf_rnd[ei, flatten(
                f2b_rnd[1:]
            )] += 1  # making sure one element is randomly removed everytime

    return prf_pos, prf_rnd, frg_pos, frg_neg
コード例 #4
0
ファイル: df1commands.py プロジェクト: frellwan/SciFy-Pi
    def decode(self, packet):
        """ Decodes a the response

        :param packet: The packet data to decode
        """
        count, self.records = 1, []

        self.frame = packet
        ########################################################
        # Do not include DLE STX and DLE ETX CRC in calcualtion
        ########################################################
        data = packet[2:-4]

        #############################################
        # Calculate CRC after removing escaped DLE's
        #############################################
        crc, = struct.unpack(">H", packet[-2:])
        if utilities.checkCRC(data, crc) != True:
            raise CRCException("Error in CRC : %d" % crc)

        ############################
        # Packet Header Information
        ############################
        self.dest, self.src, self.command, self.sts, self.transaction_id = struct.unpack(">BBBBH", data[0:6])
        data = data[6:]

        #####################################
        # Packet data Information
        # Use Little Endian format for data
        #####################################
        self.records = ()
        if len(data) > 0:
            if self.Address.subElement > 0:
                elementSize = SUBELEMENT_SIZE[self.Address.fileType]
                formatStr = "<" + SUBELEMENT_STRUCT[self.Address.fileType]
            else:
                elementSize = ELEMENT_SIZE[self.Address.fileType]
                formatStr = "<" + ELEMENT_STRUCT[self.Address.fileType]

            for i in range(0, len(data), elementSize):
                if self.Address.bitNumber != None:
                    register, = struct.unpack(formatStr, data[i : i + elementSize])
                    if register & 2 ** self.Address.bitNumber:
                        self.records += (1,)
                    else:
                        self.records += (0,)
                else:
                    if self.Address.fileType == 0x8D:
                        record, = struct.unpack(formatStr, data[i : i + elementSize])
                        size, = struct.unpack("<h", record[0:2])
                        newRecord = ""
                        for i in range(2, elementSize, 2):
                            newRecord += record[i + 1] + record[i]
                        self.records += (newRecord[0:size],)
                    else:
                        self.records += struct.unpack(formatStr, data[i : i + elementSize])

            self.records = utilities.flatten(self.records)
        else:
            self.records = None
コード例 #5
0
ファイル: dottorrent.py プロジェクト: dionyziz/downpour
 def __populate( self, metainfo ):
     # a torent file may have a single tracker, multiple trackers,
     # or both a reference to a single tracker and multiple trackers (for backwards compatibility)
     if 'announce' in metainfo:
         # single tracker
         self.trackerURLs.add( metainfo[ 'announce' ] )
     if 'announce-list' in metainfo:
         # multiple trackers
         self.trackerURLs |= set( utilities.flatten( metainfo[ 'announce-list' ] ) )
     if 'created by' in metainfo:
         self.createdBy = metainfo[ 'created by' ]
     if 'comment' in metainfo:
         self.comment = metainfo[ 'comment' ]
     if 'encoding' in metainfo:
         self.encoding = metainfo[ 'encoding' ]
     if 'creation date' in metainfo:
         self.creationDate = datetime.datetime.fromtimestamp( metainfo[ 'creation date' ] )
     if 'files' in metainfo[ 'info' ]:
         # multi file mode
         self.singleFile = False
         self.name = metainfo[ 'info' ][ 'name' ]
         self.files = []
         for file in metainfo[ 'info' ][ 'files' ]:
             self.files.append( File( file[ 'path' ], file[ 'length' ] ) )
     if 'length' in metainfo[ 'info' ]:
         # single file mode
         self.singleFile = True
         self.name = metainfo[ 'info' ][ 'name' ]
         self.files = [ File( [ metainfo[ 'info' ][ 'name' ] ], metainfo[ 'info' ][ 'length' ] ) ]
コード例 #6
0
ファイル: graph.py プロジェクト: sgord512/id_algo
 def get_ancestors(self, y):
     assert (self.obs is not None)
     # This returns the ancestors of the vertices in the set y
     n = self.get_num_nodes()
     ancestorIndices = utilities.flatten(
         self.obs.neighborhood(y, order=n, mode=igraph.IN))
     return set(self.obs.vs[ancestorIndices]["name"])
コード例 #7
0
ファイル: dottorrent.py プロジェクト: dionyziz/downpour
 def __populate(self, metainfo):
     # a torent file may have a single tracker, multiple trackers,
     # or both a reference to a single tracker and multiple trackers (for backwards compatibility)
     if 'announce' in metainfo:
         # single tracker
         self.trackerURLs.add(metainfo['announce'])
     if 'announce-list' in metainfo:
         # multiple trackers
         self.trackerURLs |= set(
             utilities.flatten(metainfo['announce-list']))
     if 'created by' in metainfo:
         self.createdBy = metainfo['created by']
     if 'comment' in metainfo:
         self.comment = metainfo['comment']
     if 'encoding' in metainfo:
         self.encoding = metainfo['encoding']
     if 'creation date' in metainfo:
         self.creationDate = datetime.datetime.fromtimestamp(
             metainfo['creation date'])
     if 'files' in metainfo['info']:
         # multi file mode
         self.singleFile = False
         self.name = metainfo['info']['name']
         self.files = []
         for file in metainfo['info']['files']:
             self.files.append(File(file['path'], file['length']))
     if 'length' in metainfo['info']:
         # single file mode
         self.singleFile = True
         self.name = metainfo['info']['name']
         self.files = [
             File([metainfo['info']['name']], metainfo['info']['length'])
         ]
コード例 #8
0
def main():
    triangles = [(n, 0) for n in itertools.takewhile(
        lambda n: n < 10000, map(utilities.triangle, itertools.count(1)))
                 if n > 999 and n % 100 > 10]
    squares = [(n, 1) for n in itertools.takewhile(
        lambda n: n < 10000, map(utilities.square, itertools.count(1)))
               if n > 999 and n % 100 > 10]
    pentagons = [(n, 2) for n in itertools.takewhile(
        lambda n: n < 10000, map(utilities.pentagon, itertools.count(1)))
                 if n > 999 and n % 100 > 10]
    hexagons = [(n, 3) for n in itertools.takewhile(
        lambda n: n < 10000, map(utilities.hexagon, itertools.count(1)))
                if n > 999 and n % 100 > 10]
    heptagons = [(n, 4) for n in itertools.takewhile(
        lambda n: n < 10000, map(utilities.heptagon, itertools.count(1)))
                 if n > 999 and n % 100 > 10]
    octagons = [(n, 5) for n in itertools.takewhile(
        lambda n: n < 10000, map(utilities.octagon, itertools.count(1)))
                if n > 999 and n % 100 > 10]
    cycles = []
    values = utilities.flatten(
        [squares, pentagons, hexagons, heptagons, octagons])
    for t in triangles:
        for n in values:
            if str(t[0])[2:4] == str(n[0])[:2]:
                cycles.append([t, n])
    for i in range(4):
        cycles = extend_cycles(cycles, values)
    return sum([
        term[0] for term in [
            cycle for cycle in cycles
            if str(cycle[-1][0])[2:4] == str(cycle[0][0])[:2]
        ][0]
    ])
コード例 #9
0
 def __init__(self, category_source_path, category_text_dir_path):
     self.category_text_dir_path = self.append_slash_if_omitted(
         category_text_dir_path)
     self.texts = None
     with open(category_source_path) as file:
         words = self.parse_categories_yaml(yaml.load(file.read()))
         self.list = sorted(flatten(words.values()))
         self.inverse_words = self.create_inverse_index(words)
コード例 #10
0
def get_omitted_baike_links(
        category_path='./sources/category.txt',
        output_missed_path='./output/missed_standard_words.txt',
        output_link_path='./output/standard_words_link.txt'):
    with open(category_path) as file:
        words = parse_categories_dict(parse_categories(file.read()))
        words = flatten(words.values())
        get_words_omitted_in_baike(words, output_missed_path, output_link_path)
コード例 #11
0
ファイル: bencoding.py プロジェクト: dionyziz/downpour
def encode( data ):
    if type( data ) is str:
        return str( len( data ) ) + ':' + data
    if type( data ) is int:
        return 'i' + str( data ) + 'e'
    if type( data ) is list:
        return 'l' + ''.join( map( encode, data ) ) + 'e'
    if type( data ) is dict:
        flattened = utilities.flatten( data.items() )
        encoded = map( encode, flattened )
        joined = ''.join( encoded )
        return 'd' + joined + 'e'
コード例 #12
0
ファイル: bencoding.py プロジェクト: dionyziz/downpour
def encode(data):
    if type(data) is str:
        return str(len(data)) + ':' + data
    if type(data) is int:
        return 'i' + str(data) + 'e'
    if type(data) is list:
        return 'l' + ''.join(map(encode, data)) + 'e'
    if type(data) is dict:
        flattened = utilities.flatten(data.items())
        encoded = map(encode, flattened)
        joined = ''.join(encoded)
        return 'd' + joined + 'e'
コード例 #13
0
def build_password(password, keylog, cap):
	if len(password) + len(set(utilities.flatten(keylog))) > cap:
		return ""
	start_chars = {key[0] for key in keylog if len(key) > 0}
	if len(start_chars) == 0:
		return password
	for char in start_chars:
		new_keylog = [key[:1].replace(char, "") + key[1:] for key in keylog if len(key) > 0]
		new_password = password + char
		new_password = build_password(new_password, new_keylog, cap)
		if len(new_password) > 0:
			return new_password
	return ""
コード例 #14
0
ファイル: list.py プロジェクト: insperatum/ec
    def __init__(self, tasks):
        self.lexicon = set(flatten((t.examples for t in tasks), abort=lambda x:isinstance(x, str))).union({"LIST_START", "LIST_END", "?"})

        # Calculate the maximum length
        self.maximumLength = POSITIVEINFINITY
        self.maximumLength = max( len(l)
                                  for t in tasks
                                  for xs,y in self.tokenize(t.examples)
                                  for l in [y] + [ x for x in xs ] )
        
        super(LearnedFeatureExtractor, self).__init__(lexicon=list(self.lexicon),
                                                      tasks=tasks,
                                                      cuda=self.USE_CUDA,
                                                      H=self.H,
                                                      bidirectional=True)
コード例 #15
0
    def write(self, response):
        ''' Write the data to the specified file

        :param response: The response to process
        '''
        data = []
        for item in response:
            data.append(item)
        response = utilities.flatten(data)
        currentTime = time.localtime()
        stringData = time.strftime('%m/%d/%Y,', currentTime)
        stringData += time.strftime('%T,', currentTime)
        stringData += ','.join(map(str, response))
        stringData += '\n'
        logName = self.logFile.write(stringData)
        if (self.fileName != logName):
            self.fileName = logName
コード例 #16
0
    def sample_jobs_to_df(self, sample, limit=0):
        #OUT OF ORDER
        sample_coll = self._database[sample]
        project = {'_id': False, 'id': True, 'jobs': True}
        cursor = sample_coll.find(projection=project, limit=limit)

        def unwind_jobs(task):
            """moves all of a task's attributes down into each of its jobs,
            and returns the updated jobs list"""
            for job in task['jobs']:
                for attr in task.keys():
                    if attr != 'jobs':
                        job['task_{}'.format(attr)] = task[attr]
                        return task['jobs']

        jobs = utilities.flatten(map(unwind_jobs, cursor),
                                 ltypes=(list, tuple))
        jobs_df = pd.DataFrame(jobs)
        return jobs_df
コード例 #17
0
    def __callNumactl(self):
        """Call numactl, detecting sockets and core addressing
           return An array of int (concatenation of node X cpus: outputs of numactl --hardware)
        """

        rvl = []
        cmd = "numactl --hardware"
        output = runCmd(cmd).split('\n')

        # Looking for lines (in this order !)
        # node 0 cpus: 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30
        # node 1 cpus: 1 3 5 7 9 11 13 15 17 19 21 23 25 27 29 31
        sock_cnt = 0
        for l in output:
            if l.startswith('node ' + str(sock_cnt) + ' cpus:'):
                cores = l.partition(':')[2]
                rvl.append(list(map(int, cores.strip().split(' '))))
                sock_cnt += 1

        return flatten(rvl)
コード例 #18
0
    def forward(self, x: Tensor):
        """
        Expects (N,Cin,H,W)
        N is batch size
        Args:
            x:

        Returns:

        """
        # make sure that the input shape is correct
        in_shape = x.shape[1:]
        if in_shape != self.input_shape:
            raise AssertionError("%s != %s" % (in_shape, self.input_shape))
        # convert the input image to the correct format
        x = x.to(torch.float32) / 255

        x = self.conv_layers(x)
        x = self.activation(self.fc(flatten(x)))
        x = self.to_actions(x)
        return x
コード例 #19
0
    def find_paths_multi_tables(self, list_of_tables, fix_first=False):
        '''
        Given a list of tables in any order, find a path that traverses all of them.

        If fix_first is True, then the first element will remain constant (useful when wanting to break down a specific outcome by various other variables)
        '''
        # first get all combos, these are candidate incomplete paths (missing intermediary tables)
        if len(list_of_tables) == 1:
            return [list_of_tables]

        permutations = itertools.permutations(list_of_tables)
        if fix_first:
            permutations = [x for x in permutations if x[0] == list_of_tables[0]]
        
        valid_incomplete_paths = []
        for permutation in permutations:
            is_valid = True
            for pair in u.pairwise(permutation):
                if len(self.find_paths_between_tables(start_table=pair[0], destination_table=pair[1])) == 0:
                    is_valid = False
            if is_valid:
                valid_incomplete_paths.append(permutation)
        
        unflattened_valid_complete_paths = []
        for valid_incomplete_path in valid_incomplete_paths:
            path_possibilities_pairwise = []
            for pair in u.pairwise(valid_incomplete_path):
                path_possibilities_pairwise.append(self.find_paths_between_tables(start_table=pair[0], destination_table=pair[1]))
            combos = itertools.product(*path_possibilities_pairwise)
            for combo in combos:
                unflattened_valid_complete_paths.append(list(combo))

        flattened_valid_complete_paths = []
        for l in unflattened_valid_complete_paths:
            flattened_valid_complete_paths.append(list(u.flatten(l)))
        
        flattened_valid_complete_paths = u.remove_adjacent_repeats(flattened_valid_complete_paths)
        
        return flattened_valid_complete_paths
コード例 #20
0
ファイル: df1commands.py プロジェクト: frellwan/SciFy-Pi
    def decode(self, packet):
        ''' Decodes a the response

        :param packet: The packet data to decode
        '''
        count, self.records = 1, []

        self.frame = packet
        ########################################################
        # Do not include DLE STX and DLE ETX CRC in calcualtion
        ########################################################
        data = packet[2:-4]

        #############################################
        # Calculate CRC after removing escaped DLE's
        #############################################
        crc, = struct.unpack('>H', packet[-2:])
        if (utilities.checkCRC(data, crc) != True):
            raise CRCException("Error in CRC : %d" % crc)

        ############################
        # Packet Header Information
        ############################
        self.dest, self.src, self.command, self.sts, self.transaction_id = struct.unpack(
            '>BBBBH', data[0:6])
        data = data[6:]

        #####################################
        # Packet data Information
        # Use Little Endian format for data
        #####################################
        self.records = ()
        if len(data) > 0:
            if (self.Address.subElement > 0):
                elementSize = SUBELEMENT_SIZE[self.Address.fileType]
                formatStr = '<' + SUBELEMENT_STRUCT[self.Address.fileType]
            else:
                elementSize = ELEMENT_SIZE[self.Address.fileType]
                formatStr = '<' + ELEMENT_STRUCT[self.Address.fileType]

            for i in range(0, len(data), elementSize):
                if (self.Address.bitNumber != None):
                    register, = struct.unpack(formatStr,
                                              data[i:i + elementSize])
                    if (register & 2**self.Address.bitNumber):
                        self.records += (1, )
                    else:
                        self.records += (0, )
                else:
                    if (self.Address.fileType == 0x8D):
                        record, = struct.unpack(formatStr,
                                                data[i:i + elementSize])
                        size, = struct.unpack('<h', record[0:2])
                        newRecord = ""
                        for i in range(2, elementSize, 2):
                            newRecord += record[i + 1] + record[i]
                        self.records += (newRecord[0:size], )
                    else:
                        self.records += struct.unpack(formatStr,
                                                      data[i:i + elementSize])

            self.records = utilities.flatten(self.records)
        else:
            self.records = None
コード例 #21
0
    def evaluateBits(self, alarmBits):
        bits = utilities.flatten(alarmBits)
        if (bits[0]):
            # self.notified added so multiple emails won't be sent
            if (not self.notified):
                serialLog.debug("Sending Email notice of Error")
                strMsg = ''
                if (bits[0] & 1):
                    """ Temperature Too High Alarm """
                    strMsg += "Temperature has reached a critical point\n"

                if (bits[0] & 2):
                    """ Motor Amps exceeded threshold """
                    strMsg += "TU Motor Current has exceeded baseline threshold\n"

                if (bits[0] & 4):
                    """ Vibration exceeds threshold """
                    strMsg += "Vibration sensor readings outside of acceptable tolerance\n"

                if (bits[0] & 8):
                    """ Speed variance outside of tolerance """
                    strMsg += "TU speed is varying more than specified tolerance\n"

                sender, recepient = self.config.getEmail()
                sendEmail(sender, recepient, strMsg, "Alert: Alarm")
                self.notified = True
        else:
            self.notified = False

        if (bits[1]):
            # self.loaded added so multiple uploads won't be initiated
            # TODO: Add code to load values to PLC
            if (not self.loaded):
                serialLog.debug("Loading Values to PLC")
                self.loaded = True

                def clearRecipeBit(response):
                    request = protectedBitWriteRequest(1, self.ALARMS[1], [0])
                    d = self.sendRequest(request)
                    d.addErrback(self.errorHandler, 'clearRecipeBit')


                def sendRecipe(recipe):
                    PLCRecipe = self.config.getPLCRecipe()

                    result = self.mtrimSerial.writeParameter(1, 1, float(recipe[-3]))
                    result.addErrback(self.errorHandler, "sendRecipe")

                    result = self.mtrimSerial.writeParameter(1, 20, float(recipe[-2]))
                    result.addErrback(self.errorHandler, "sendRecipe")

                    result = self.mtrimSerial.writeParameter(1, 21, float(recipe[-1]))
                    result.addErrback(self.errorHandler, "sendRecipe")

                    index = 1                               # Index 0 is recipe name
                    var = []
                    for address in PLCRecipe:
                        request = protectedWriteRequest(1, address, [float(recipe[index])])
                        result = self.sendRequest(request)
                        result.addErrback(self.errorHandler, "sendRecipe")
                        var.append(result)
                    d = defer.gatherResults(var)
                    d.addCallback(clearRecipeBit)
                    d.addErrback(self.errorHandler, 'saving data in StartOEEData failed')
                         
                def getRecipeValues(recipeName):
                    localDir, remoteDir = self.config.getRecipeDirectories()
                    filename = localDir + '/' + 'families.csv'
                    fObj = open(filename, 'r')
                    for recipe in fObj:
                        if recipe.strip() in recipeName[0]:
                            recipeFile = localDir + '/' + recipe.strip() + '.csv'
                            fRecipe = open(recipeFile, 'r')
                            for line in fRecipe:
                                if recipeName[0] in line.strip():
                                    sendRecipe(line.strip().split(','))
                                
                request = protectedReadRequest(1, 'ST15:20')
                d = self.sendRequest(request)
                d.addCallback(getRecipeValues)
                d.addErrback(self.errorHandler, 'saving recipe data')
                
        else:
            self.loaded = False
            

        if (bits[2]):
            # self.transferred added so multiple downloads won't be initiated
            def clearDownloadBit(response):
                request = protectedBitWriteRequest(1, self.ALARMS[2], [0])
                d = self.sendRequest(request)
                d.addErrback(self.errorHandler, 'clearDownloadBit')
               
            if (not self.transferred):
                #Download Recipes from Server
                localDir, remoteDir = self.config.getRecipeDirectories()
                serialLog.debug("Downloading Recipes")
                d = self.ftpEndpoint.connect(FTPClientAFactory())
                d.addErrback(self.FTPfail, 'startRecipeTransfer')
                d.addCallback(getRecipeFiles, localDir)
                d.addCallback(clearDownloadBit)
                self.transferred = True
        else:
            self.transferred = False
コード例 #22
0
def perform_vpsoi_analysis(configs, soi_name, min_n_frg=2, n_perm=1000):
    import platform
    import matplotlib
    if platform.system() == 'Linux':
        matplotlib.use('Agg')
    from matplotlib import pyplot as plt, patches
    from matplotlib.colors import LinearSegmentedColormap

    from utilities import load_mc4c, load_annotation, hasOL, flatten

    # initialization
    if configs['output_file'] is None:
        configs['output_file'] = configs[
            'output_dir'] + '/analysis_atVP-SOI_{:s}_{:s}.pdf'.format(
                configs['run_id'], soi_name)
    edge_lst = np.linspace(configs['roi_start'],
                           configs['roi_end'],
                           num=201,
                           dtype=np.int64).reshape(-1, 1)
    bin_bnd = np.hstack([edge_lst[:-1], edge_lst[1:] - 1])
    bin_cen = np.mean(bin_bnd, axis=1, dtype=np.int64)
    bin_w = bin_bnd[0, 1] - bin_bnd[0, 0]
    x_lim = [configs['roi_start'], configs['roi_end']]
    y_lim = [0, 10]

    # load MC-HC data
    frg_dp = load_mc4c(configs,
                       unique_only=True,
                       valid_only=True,
                       min_mq=20,
                       reindex_reads=True,
                       verbose=True)
    frg_np = frg_dp[['ReadID', 'Chr', 'ExtStart', 'ExtEnd']].values
    del frg_dp

    # select within roi fragments
    vp_crd = [configs['vp_cnum'], configs['vp_start'], configs['vp_end']]
    roi_crd = [configs['vp_cnum'], configs['roi_start'], configs['roi_end']]
    is_vp = hasOL(vp_crd, frg_np[:, 1:4])
    is_roi = hasOL(roi_crd, frg_np[:, 1:4])
    frg_roi = frg_np[~is_vp & is_roi, :]
    del frg_np

    # filter small circles (>1 roi-frg, ex.)
    cir_size = np.bincount(frg_roi[:, 0])[frg_roi[:, 0]]
    frg_inf = frg_roi[cir_size >= min_n_frg, :]
    frg_inf[:, 0] = np.unique(frg_inf[:, 0], return_inverse=True)[1] + 1
    n_read = len(np.unique(frg_inf[:, 0]))

    # convert fragments to bin-coverage
    cfb_lst = [list() for i in range(n_read + 1)]
    n_frg = frg_inf.shape[0]
    for fi in range(n_frg):
        bin_idx = np.where(hasOL(frg_inf[fi, 2:4], bin_bnd))[0]
        cfb_lst[frg_inf[fi, 0]].append(bin_idx.tolist())

    # filter circles for (>1 bin cvg)
    valid_lst = []
    for rd_nid in range(1, n_read + 1):
        fb_lst = cfb_lst[rd_nid]
        bin_cvg = np.unique(flatten(fb_lst))
        if len(bin_cvg) > 1:
            valid_lst.append(rd_nid)
    frg_inf = frg_inf[np.isin(frg_inf[:, 0], valid_lst), :]
    frg_inf[:, 0] = np.unique(frg_inf[:, 0], return_inverse=True)[1] + 1
    n_read = np.max(frg_inf[:, 0])

    # get soi info
    ant_pd = load_annotation(configs['genome_build'],
                             roi_crd=roi_crd).reset_index(drop=True)
    n_ant = ant_pd.shape[0]
    is_in = np.where(np.isin(ant_pd['ant_name'], soi_name))[0]
    assert len(is_in) == 1
    soi_pd = ant_pd.loc[is_in[0], :]
    soi_crd = [
        soi_pd['ant_cnum'], soi_pd['ant_pos'] - int(bin_w * 1.5),
        soi_pd['ant_pos'] + int(bin_w * 1.5)
    ]
    if hasOL(soi_crd, vp_crd)[0]:
        '[w] Selected SOI coordinate overlaps with the view point. Ignoring the analysis'
        return

    # compute positive profile and backgrounds
    print 'Computing expected profile for bins:'
    prf_frq, prf_rnd, frg_pos, frg_neg = compute_mc_associations(frg_inf,
                                                                 soi_crd,
                                                                 bin_bnd,
                                                                 n_perm=n_perm)
    n_pos = len(np.unique(frg_pos[:, 0]))
    prf_obs = prf_frq * 100.0 / n_pos
    print '{:,d} reads are found to cover '.format(n_pos) + \
          '{:s} area ({:s}:{:d}-{:d})'.format(soi_pd['ant_name'], soi_pd['ant_chr'], soi_crd[1], soi_crd[2])

    # check enough #pos
    if n_pos < MIN_N_POS:
        print '[w] #reads in the positive set is insufficient (n={:d}, required >{:d})'.format(
            n_pos, MIN_N_POS)
        print 'Analysis is ignored ...'
        return

    # compute scores
    nrm_rnd = prf_rnd * 100.0 / n_pos
    prf_exp = np.mean(nrm_rnd, axis=0)
    prf_std = np.std(nrm_rnd, axis=0, ddof=0)
    np.seterr(all='ignore')
    bin_scr = np.divide(prf_obs - prf_exp, prf_std)
    np.seterr(all=None)

    # set vp bins to nan
    vp_bnd = [configs['vp_start'], configs['vp_end']]
    is_vp = hasOL(vp_bnd, bin_bnd)
    bin_scr[is_vp] = np.nan

    # compute score for annotations
    print 'Computing expected profile for annotations:'
    ant_pos = ant_pd['ant_pos'].values.reshape(-1, 1)
    ant_bnd = np.hstack(
        [ant_pos - int(bin_w * 1.5), ant_pos + int(bin_w * 1.5)])
    ant_obs, soi_rnd = compute_mc_associations(frg_inf,
                                               soi_crd,
                                               ant_bnd,
                                               n_perm=n_perm)[:2]
    ant_exp = np.mean(soi_rnd, axis=0)
    ant_std = np.std(soi_rnd, axis=0, ddof=0)
    np.seterr(all='ignore')
    ant_scr = np.divide(ant_obs - ant_exp, ant_std)
    np.seterr(all=None)

    # set vp score to nan
    is_vp = hasOL(vp_bnd, ant_bnd)
    is_soi = hasOL(soi_crd[1:3], ant_bnd)
    ant_scr[is_vp | is_soi] = np.nan

    # plotting
    fig = plt.figure(figsize=(15, 3))
    ax_prf = plt.subplot2grid((20, 40), (0, 0), rowspan=19, colspan=39)
    ax_cmp = plt.subplot2grid((20, 40), (0, 39), rowspan=10, colspan=1)
    ax_scr = plt.subplot2grid((20, 40), (19, 0), rowspan=1, colspan=39)

    # set up colorbar
    c_lim = [-6, 6]
    clr_lst = [
        '#ff1a1a', '#ff7575', '#ffcccc', '#ffffff', '#ffffff', '#ffffff',
        '#ccdfff', '#3d84ff', '#3900f5'
    ]
    clr_map = LinearSegmentedColormap.from_list('test', clr_lst, N=9)
    clr_map.set_bad('gray', 0.05)
    norm = matplotlib.colors.Normalize(vmin=c_lim[0], vmax=c_lim[1])
    cbar_h = matplotlib.colorbar.ColorbarBase(ax_cmp, cmap=clr_map, norm=norm)
    # cbar_h.ax.tick_params(labelsize=12)
    cbar_h.ax.set_ylabel('z-score', rotation=90)

    # profile plot
    ax_prf.plot(bin_cen, prf_obs, color='#5757ff', linewidth=1)
    ax_prf.plot(bin_cen, prf_exp, color='#cccccc', linewidth=1)
    ax_prf.fill_between(bin_cen,
                        prf_exp - prf_std,
                        prf_exp + prf_std,
                        color='#ebebeb',
                        linewidth=0.2)

    ax_prf.add_patch(
        patches.Rectangle([vp_bnd[0], y_lim[0]],
                          vp_bnd[1] - vp_bnd[0],
                          y_lim[1] - y_lim[0],
                          edgecolor='None',
                          facecolor='orange',
                          zorder=100))
    ax_prf.add_patch(
        patches.Rectangle([soi_crd[1], y_lim[0]],
                          soi_crd[2] - soi_crd[1],
                          y_lim[1] - y_lim[0],
                          edgecolor='None',
                          facecolor='green',
                          zorder=100))
    ax_prf.set_xlim(x_lim)
    ax_prf.set_ylim(y_lim)
    ax_prf.set_xticks([])

    # add score plot
    ax_scr.imshow(bin_scr.reshape(1, -1),
                  extent=x_lim + [-500, 500],
                  cmap=clr_map,
                  vmin=c_lim[0],
                  vmax=c_lim[1],
                  interpolation='nearest')
    ax_scr.set_xlim(x_lim)
    ax_scr.set_yticks([])

    # add annotations
    for ai in range(n_ant):
        ax_prf.text(ant_pos[ai],
                    y_lim[1],
                    ant_pd.loc[ai, 'ant_name'],
                    horizontalalignment='center',
                    verticalalignment='bottom',
                    rotation=60)
        ax_prf.plot(ant_pos[[ai, ai]],
                    y_lim,
                    ':',
                    color='#bfbfbf',
                    linewidth=1,
                    alpha=0.4)

        if not np.isnan(ant_scr[ai]):
            ax_prf.add_patch(
                patches.Rectangle([ant_bnd[ai, 0], y_lim[1] - 0.15],
                                  ant_bnd[ai, 1] - ant_bnd[ai, 0],
                                  0.15,
                                  edgecolor='None',
                                  facecolor=clr_map(ant_scr[ai]),
                                  zorder=10))
            ax_prf.text(ant_pos[ai],
                        y_lim[1] - 0.2,
                        '{:+0.1f}'.format(ant_scr[ai]),
                        horizontalalignment='center',
                        verticalalignment='top',
                        fontweight='bold',
                        fontsize=6)

    # final adjustments
    x_ticks = np.linspace(configs['roi_start'],
                          configs['roi_end'],
                          7,
                          dtype=np.int64)
    y_ticks = ax_prf.get_yticks()
    x_tick_label = ['{:0.2f}m'.format(x / 1e6) for x in x_ticks]
    y_tick_label = ['{:0.0f}%'.format(y) for y in y_ticks]
    ax_scr.set_xticks(x_ticks)
    ax_scr.set_xticklabels(x_tick_label)
    ax_prf.set_yticklabels(y_tick_label)
    ax_prf.set_ylabel('Percentage of reads')
    ax_prf.set_title(
        'VP-SOI from {:s}, using as SOI {:s}\n'.format(configs['run_id'],
                                                       soi_name) +
        '#read (#roiFrg>{:d}, ex. vp)={:,d}, '.format(min_n_frg - 1, n_read) +
        '#pos = {:d}\n#perm={:d}\n\n\n'.format(n_pos, n_perm))
    plt.savefig(configs['output_file'], bbox_inches='tight')
コード例 #23
0
 def get_descendants(self, v):
     n = self.get_num_nodes()
     visible = self.generate_visible()
     ancestorIndices = utilities.flatten(
         visible.neighborhood(v, order=n, mode=igraph.OUT))
     return set(visible.vs[ancestorIndices]["name"])
コード例 #24
0
def perform_soisoi_analysis(config_lst, min_n_frg=2, n_perm=1000):
    import platform
    import matplotlib
    if platform.system() == 'Linux':
        matplotlib.use('Agg')
    from matplotlib import pyplot as plt
    from matplotlib.colors import LinearSegmentedColormap

    from utilities import load_mc4c, load_annotation, hasOL, flatten

    # initialization
    run_id = ','.join([config['run_id'] for config in config_lst])
    if config_lst[0]['output_file'] is None:
        config_lst[0]['output_file'] = config_lst[0][
            'output_dir'] + '/analysis_atSOI-SOI_{:s}.pdf'.format(run_id)
    edge_lst = np.linspace(config_lst[0]['roi_start'],
                           config_lst[0]['roi_end'],
                           num=201,
                           dtype=np.int64).reshape(-1, 1)
    bin_bnd = np.hstack([edge_lst[:-1], edge_lst[1:] - 1])
    bin_w = bin_bnd[0, 1] - bin_bnd[0, 0]
    del edge_lst

    # load MC-HC data
    frg_dp = load_mc4c(config_lst,
                       unique_only=True,
                       valid_only=True,
                       min_mq=20,
                       reindex_reads=True,
                       verbose=True)
    frg_np = frg_dp[['ReadID', 'Chr', 'ExtStart', 'ExtEnd']].values
    del frg_dp

    # select within roi fragments
    vp_crd = [
        config_lst[0]['vp_cnum'], config_lst[0]['vp_start'],
        config_lst[0]['vp_end']
    ]
    roi_crd = [
        config_lst[0]['vp_cnum'], config_lst[0]['roi_start'],
        config_lst[0]['roi_end']
    ]
    is_vp = hasOL(vp_crd, frg_np[:, 1:4])
    is_roi = hasOL(roi_crd, frg_np[:, 1:4])
    frg_roi = frg_np[~is_vp & is_roi, :]
    del frg_np

    # filter small read (>1 roi-frg, ex.)
    cir_size = np.bincount(frg_roi[:, 0])[frg_roi[:, 0]]
    frg_inf = frg_roi[cir_size >= min_n_frg, :]
    frg_inf[:, 0] = np.unique(frg_inf[:, 0], return_inverse=True)[1] + 1
    n_read = len(np.unique(frg_inf[:, 0]))

    # convert fragments to bin-coverage
    cfb_lst = [list() for i in range(n_read + 1)]
    n_frg = frg_inf.shape[0]
    for fi in range(n_frg):
        bin_idx = np.where(hasOL(frg_inf[fi, 2:4], bin_bnd))[0]
        cfb_lst[frg_inf[fi, 0]].append(bin_idx.tolist())

    # filter reads for (>1 bin cvg)
    valid_lst = []
    for rd_nid in range(1, n_read + 1):
        fb_lst = cfb_lst[rd_nid]
        bin_cvg = np.unique(flatten(fb_lst))
        if len(bin_cvg) > 1:
            valid_lst.append(rd_nid)
    frg_inf = frg_inf[np.isin(frg_inf[:, 0], valid_lst), :]

    # Downsample and re-index
    # rnd_rid = np.random.choice(np.unique(frg_inf[:, 0]), 8618, replace=False)  ### random selection
    # frg_inf = frg_inf[np.isin(frg_inf[:, 0], rnd_rid), :]
    frg_inf[:, 0] = np.unique(frg_inf[:, 0], return_inverse=True)[1] + 1
    n_read = np.max(frg_inf[:, 0])

    # loop over each SOI
    ant_pd = load_annotation(config_lst[0]['genome_build'],
                             roi_crd=roi_crd).reset_index(drop=True)
    n_ant = ant_pd.shape[0]
    ant_name_lst = ant_pd['ant_name'].values
    ant_scr = np.full(shape=[n_ant, n_ant], fill_value=np.nan)
    n_pos = np.zeros(n_ant, dtype=np.int)
    x_tick_lbl = []
    for ai in range(n_ant):
        soi_pd = ant_pd.loc[ai, :]
        soi_crd = [
            soi_pd['ant_cnum'], soi_pd['ant_pos'] - int(bin_w * 1.5),
            soi_pd['ant_pos'] + int(bin_w * 1.5)
        ]
        if hasOL(vp_crd[1:], soi_crd[1:]):
            x_tick_lbl.append(ant_name_lst[ai])
            continue

        # compute score for annotations
        print 'Computing expected profile for {:s}:'.format(soi_pd['ant_name'])
        ant_pos = ant_pd['ant_pos'].values.reshape(-1, 1)
        ant_bnd = np.hstack(
            [ant_pos - int(bin_w * 1.5), ant_pos + int(bin_w * 1.5)])
        ant_obs, soi_rnd, frg_pos = compute_mc_associations(frg_inf,
                                                            soi_crd,
                                                            ant_bnd,
                                                            n_perm=n_perm)[:3]
        n_pos[ai] = len(np.unique(frg_pos[:, 0]))
        x_tick_lbl.append('{:s}\n#{:,d}'.format(ant_name_lst[ai], n_pos[ai]))
        del frg_pos

        # check number of positive reads
        if n_pos[ai] <= MIN_N_POS:
            print '[w] #reads (n={:d}) in the positive set is insufficient '.format(n_pos[ai]) + \
                  '(required >{:d}). This analysis is ignored ...'.format(MIN_N_POS)
            continue

        # calculate expected profile
        ant_exp = np.mean(soi_rnd, axis=0)
        ant_std = np.std(soi_rnd, axis=0, ddof=0)
        np.seterr(all='ignore')
        ant_scr[:, ai] = np.divide(ant_obs - ant_exp, ant_std)
        np.seterr(all=None)

        # set vp score to nan
        is_vp = hasOL(vp_crd[1:], ant_bnd)
        is_soi = hasOL(soi_crd[1:3], ant_bnd)
        ant_scr[is_vp | is_soi, ai] = np.nan

    # plotting
    plt.figure(figsize=(8, 7))
    ax_scr = plt.subplot2grid((40, 40), (0, 0), rowspan=39, colspan=39)
    ax_cmp = plt.subplot2grid((40, 40), (0, 39), rowspan=20, colspan=1)

    # set up colorbar
    c_lim = [-6, 6]
    clr_lst = [
        '#ff1a1a', '#ff7575', '#ffcccc', '#ffffff', '#ffffff', '#ffffff',
        '#ccdfff', '#3d84ff', '#3900f5'
    ]
    clr_map = LinearSegmentedColormap.from_list('test', clr_lst, N=9)
    clr_map.set_bad('gray', 0.2)
    norm = matplotlib.colors.Normalize(vmin=c_lim[0], vmax=c_lim[1])
    cbar_h = matplotlib.colorbar.ColorbarBase(ax_cmp, cmap=clr_map, norm=norm)
    # cbar_h.ax.tick_params(labelsize=12)
    cbar_h.ax.set_ylabel('z-score', rotation=90)
    cbar_edge = np.round(cbar_h.cmap(norm(c_lim)), decimals=2)

    # add score scatter matrix
    x_lim = [0, n_ant]
    img_h = ax_scr.imshow(ant_scr,
                          extent=x_lim + x_lim,
                          cmap=clr_map,
                          vmin=c_lim[0],
                          vmax=c_lim[1],
                          interpolation='nearest',
                          origin='bottom')
    ax_scr.set_xlim(x_lim)
    ax_scr.set_ylim(x_lim)

    # add score values to each box
    for ai in range(n_ant):
        for aj in range(n_ant):
            if np.isnan(ant_scr[ai, aj]):
                continue
            ant_clr = np.round(img_h.cmap(img_h.norm(ant_scr[ai, aj])),
                               decimals=2)
            if np.array_equal(ant_clr, cbar_edge[0]) or np.array_equal(
                    ant_clr, cbar_edge[1]):
                txt_clr = '#ffffff'
            else:
                txt_clr = '#000000'
            ax_scr.text(aj + 0.5,
                        ai + 0.5,
                        '{:+0.1f}'.format(ant_scr[ai, aj]),
                        color=txt_clr,
                        horizontalalignment='center',
                        verticalalignment='center',
                        fontsize=12)

    # final adjustments
    ax_scr.set_xticks(np.arange(n_ant) + 0.5)
    ax_scr.set_yticks(np.arange(n_ant) + 0.5)
    ax_scr.set_xticklabels(x_tick_lbl)
    ax_scr.set_yticklabels(ant_name_lst)
    ax_scr.set_xlabel('Selected SOIs')
    ax_scr.set_title(
        'Association matrix from {:s}\n'.format(run_id) +
        '#read (#roiFrg>{:d}, ex. vp)={:,d}, '.format(min_n_frg - 1, n_read) +
        'bin-w={:d}; #perm={:d}'.format(config_lst[0]['bin_width'], n_perm))
    plt.savefig(config_lst[0]['output_file'], bbox_inches='tight')
コード例 #25
0
def main():
	n_gons = [[(10, pair[0], pair[1])] for pair in itertools.permutations(nums, 2)]
	n_gons = [n_gon + [(pair[0], n_gon[0][2], pair[1])] for n_gon in n_gons for pair in itertools.permutations([num for num in nums if num not in utilities.flatten(n_gon)], 2) if sum(n_gon[0]) == sum((pair[0], n_gon[0][2], pair[1]))]
	n_gons = [n_gon + [(pair[0], n_gon[1][2], pair[1])] for n_gon in n_gons for pair in itertools.permutations([num for num in nums if num not in utilities.flatten(n_gon)], 2) if sum(n_gon[0]) == sum((pair[0], n_gon[1][2], pair[1]))]
	n_gons = [n_gon + [(pair[0], n_gon[2][2], pair[1])] for n_gon in n_gons for pair in itertools.permutations([num for num in nums if num not in utilities.flatten(n_gon)], 2) if sum(n_gon[0]) == sum((pair[0], n_gon[2][2], pair[1]))]
	n_gons = [n_gon + [(sum(n_gon[0]) - (n_gon[3][2] + n_gon[0][1]), n_gon[3][2], n_gon[0][1])] for n_gon in n_gons if sum(n_gon[0]) - (n_gon[3][2] + n_gon[0][1]) not in utilities.flatten(n_gon) and sum(n_gon[0]) - (n_gon[3][2] + n_gon[0][1]) <= 10 and sum(n_gon[0]) - (n_gon[3][2] + n_gon[0][1]) >= 1]
	n_gons = sorted([n_gon[n_gon.index(min(n_gon)):] + n_gon[:n_gon.index(min(n_gon))] for n_gon in n_gons])
	n_gons = [''.join([''.join([str(n) for n in triplet]) for triplet in n_gon]) for n_gon in n_gons]
	return n_gons[-1]
コード例 #26
0
def perform_at_across_roi(config_lst, min_n_frg=2, n_perm=1000):
    import platform
    import matplotlib
    if platform.system() == 'Linux':
        matplotlib.use('Agg')
    from matplotlib import pyplot as plt, patches
    from matplotlib.colors import LinearSegmentedColormap

    from utilities import load_mc4c, load_annotation, hasOL, flatten, limit_to_roi

    # initialization
    run_id = ','.join([config['run_id'] for config in config_lst])
    configs = config_lst[0]
    if configs['output_file'] is None:
        configs['output_file'] = configs[
            'output_dir'] + '/analysis_atAcrossROI_{:s}.pdf'.format(run_id)

    # create bin list
    edge_lst = np.linspace(configs['roi_start'],
                           configs['roi_end'],
                           num=201,
                           dtype=np.int64).reshape(-1, 1)
    bin_bnd = np.hstack([edge_lst[:-1], edge_lst[1:] - 1])
    bin_w = bin_bnd[0, 1] - bin_bnd[0, 0]
    n_bin = bin_bnd.shape[0]

    # make block list
    bin_cen = np.mean(bin_bnd, axis=1, dtype=np.int64).reshape(-1, 1)
    # blk_crd = np.hstack([np.repeat(configs['vp_cnum'], n_bin / 3).reshape(-1, 1), edge_lst[:-3:3], edge_lst[3::3] - 1])
    blk_crd = np.hstack([
        np.repeat(configs['vp_cnum'], n_bin).reshape(-1, 1),
        bin_cen - int(bin_w * 1.5), bin_cen + int(bin_w * 1.5) - 1
    ])
    blk_w = blk_crd[0, 2] - blk_crd[0, 1]
    n_blk = blk_crd.shape[0]
    del edge_lst

    # define areas
    roi_cen = np.mean(
        [np.min(configs['prm_start']),
         np.max(configs['prm_end'])],
        dtype=np.int)
    vp_crd = np.array([
        configs['vp_cnum'], roi_cen - int(bin_w * 1.5),
        roi_cen + int(bin_w * 1.5)
    ])
    roi_crd = [configs['vp_cnum'], configs['roi_start'], configs['roi_end']]

    # load MC-HC data
    frg_dp = load_mc4c(config_lst,
                       unique_only=True,
                       valid_only=True,
                       min_mq=20,
                       reindex_reads=True,
                       verbose=True)
    read_all = frg_dp[['ReadID', 'Chr', 'ExtStart', 'ExtEnd']].values
    del frg_dp

    # select >2 roi-fragments
    read_inf = limit_to_roi(read_all[:, :4],
                            vp_crd=vp_crd,
                            roi_crd=roi_crd,
                            min_n_frg=min_n_frg)
    del read_all

    # re-index reads
    read_inf[:, 0] = np.unique(read_inf[:, 0], return_inverse=True)[1] + 1
    n_read = len(np.unique(read_inf[:, 0]))

    # convert fragments to bin-coverage
    print 'Mapping reads to bins ...'
    cfb_lst = [list() for i in range(n_read + 1)]
    n_frg = read_inf.shape[0]
    for fi in range(n_frg):
        bin_idx = np.where(hasOL(read_inf[fi, 2:4], bin_bnd))[0]
        cfb_lst[read_inf[fi, 0]].append(bin_idx.tolist())

    # filter circles for (>1 bin cvg)
    'Selecting only reads with >1 bins covered'
    valid_lst = []
    for rd_nid in range(1, n_read + 1):
        fb_lst = cfb_lst[rd_nid]
        bin_cvg = np.unique(flatten(fb_lst))
        if len(bin_cvg) > 1:
            valid_lst.append(rd_nid)
    read_inf = read_inf[np.isin(read_inf[:, 0], valid_lst), :]

    # subsample reads
    # rnd_ids = np.random.choice(np.unique(read_inf[:, 0]), 6870, replace=False)
    # read_inf = read_inf[np.isin(read_inf[:, 0], rnd_ids), :]

    # reindexing reads
    read_inf[:, 0] = np.unique(read_inf[:, 0], return_inverse=True)[1] + 1
    n_read = np.max(read_inf[:, 0])
    print '{:,d} reads are left after bin-coverage filter.'.format(n_read)

    # get soi info
    ant_pd = load_annotation(configs['genome_build'], roi_crd=roi_crd)
    ant_bnd = np.hstack(
        [ant_pd[['ant_pos']].values, ant_pd[['ant_pos']].values])

    # compute score for annotations
    print 'Computing expected profile for {:d} blocks (required coverage: {:d} reads):'.format(
        n_blk, MIN_N_POS)
    blk_scr = np.full([n_blk, n_blk], fill_value=np.nan)
    # x_tick_lbl = [' '] * n_blk
    y_tick_lbl = [' '] * n_blk
    n_ignored = 0
    for bi in range(n_blk):
        showprogress(bi, n_blk, n_step=20)

        # add axes labels
        ant_idx = np.where(hasOL(blk_crd[bi, 1:], ant_bnd, offset=0))[0]
        if len(ant_idx) > 0:
            ant_name = ','.join([ant_pd.loc[i, 'ant_name'] for i in ant_idx])
            # x_tick_lbl[bi] = ('{:s}, #{:0.0f}'.format(ant_name, n_pos))
            y_tick_lbl[bi] = ant_name
        # else:
        # x_tick_lbl[bi] = ('#{:0.0f}'.format(n_pos))

        # ignore if vp
        if hasOL(blk_crd[bi, :], vp_crd, offset=blk_w)[0]:
            continue

        # compute the observe and background
        blk_obs, blk_rnd, read_pos = compute_mc_associations(read_inf,
                                                             blk_crd[bi, :],
                                                             blk_crd[:, 1:],
                                                             n_perm=n_perm,
                                                             verbose=False)[:3]
        n_pos = len(np.unique(read_pos[:, 0]))
        if n_pos < MIN_N_POS:
            n_ignored += 1
            continue

        # compute the scores
        blk_exp = np.mean(blk_rnd, axis=0)
        blk_std = np.std(blk_rnd, axis=0, ddof=0)
        np.seterr(all='ignore')
        blk_scr[:, bi] = np.divide(blk_obs - blk_exp, blk_std)
        np.seterr(all=None)

        # remove scores overlapping with positive set
        is_nei = hasOL(blk_crd[bi, 1:], blk_crd[:, 1:], offset=blk_w)
        blk_scr[is_nei, bi] = np.nan

    if n_ignored != 0:
        print '[w] {:d}/{:d} blocks are ignored due to low coverage.'.format(
            n_ignored, n_blk)

    # set self scores to nan
    # np.fill_diagonal(blk_scr, val=np.nan)

    # clean up tick labels

    # plotting the scores
    plt.figure(figsize=(15, 13))
    ax_scr = plt.subplot2grid((40, 40), (0, 0), rowspan=39, colspan=39)
    ax_cmp = plt.subplot2grid((40, 40), (0, 39), rowspan=20, colspan=1)

    # set up color bar
    c_lim = [-6, 6]
    clr_lst = [
        '#ff1a1a', '#ff7575', '#ffcccc', '#ffffff', '#ffffff', '#ffffff',
        '#ccdfff', '#3d84ff', '#3900f5'
    ]
    clr_map = LinearSegmentedColormap.from_list('test', clr_lst, N=9)
    clr_map.set_bad('gray', 0.1)
    norm = matplotlib.colors.Normalize(vmin=c_lim[0], vmax=c_lim[1])
    cbar_h = matplotlib.colorbar.ColorbarBase(ax_cmp, cmap=clr_map, norm=norm)
    # cbar_h.ax.tick_params(labelsize=12)
    cbar_h.ax.set_ylabel('z-score', rotation=90)
    cbar_edge = np.round(cbar_h.cmap(norm(c_lim)), decimals=2)

    # add score scatter matrix
    x_lim = [0, n_blk]
    ax_scr.imshow(blk_scr,
                  extent=x_lim + x_lim,
                  cmap=clr_map,
                  vmin=c_lim[0],
                  vmax=c_lim[1],
                  interpolation='nearest',
                  origin='bottom')
    ax_scr.set_xlim(x_lim)
    ax_scr.set_ylim(x_lim)

    # add vp patches
    vp_idx = np.where(hasOL(vp_crd, blk_crd, offset=blk_w))[0]
    ax_scr.add_patch(
        patches.Rectangle([0, vp_idx[0]],
                          n_blk,
                          vp_idx[-1] - vp_idx[0],
                          linewidth=0,
                          edgecolor='None',
                          facecolor='orange'))
    ax_scr.add_patch(
        patches.Rectangle([vp_idx[0], 0],
                          vp_idx[-1] - vp_idx[0],
                          n_blk,
                          linewidth=0,
                          edgecolor='None',
                          facecolor='orange'))

    # add score values to each box
    # for bi in range(n_blk):
    #     for bj in range(n_blk):
    #         if np.isnan(blk_scr[bi, bj]):
    #             continue
    #         ant_clr = np.round(img_h.cmap(img_h.norm(blk_scr[bi, bj])), decimals=2)
    #         if np.array_equal(ant_clr, cbar_edge[0]) or np.array_equal(ant_clr, cbar_edge[1]):
    #             txt_clr = '#ffffff'
    #         else:
    #             txt_clr = '#000000'
    #         ax_scr.text(bj + 0.5, bi + 0.5, '{:+0.1f}'.format(blk_scr[bi, bj]), color=txt_clr,
    #                     horizontalalignment='center', verticalalignment='center', fontsize=12)

    # adjust ticks
    for lbl in np.unique(y_tick_lbl):
        if lbl == ' ':
            continue
        idx_lst = np.where(np.isin(y_tick_lbl, lbl))[0]
        if len(idx_lst) > 1:
            kpt_idx = np.mean(idx_lst, dtype=np.int)
            for idx in idx_lst:
                y_tick_lbl[idx] = 'l'
            y_tick_lbl[kpt_idx] = lbl + ' '

    # final adjustments
    ax_scr.set_xticks(np.arange(n_blk) + 0.5)
    ax_scr.set_yticks(np.arange(n_blk) + 0.5)
    ax_scr.set_xticklabels(y_tick_lbl, rotation=90)
    ax_scr.set_yticklabels(y_tick_lbl)
    ax_scr.set_xlabel('Selected SOIs')
    ax_scr.set_title(
        'Association matrix from {:s}\n'.format(configs['run_id']) +
        '#read (#roiFrg>{:d}, ex. vp)={:,d}, '.format(min_n_frg - 1, n_read) +
        'bin-w={:0.0f}; block-w={:0.0f}; #perm={:d}'.format(
            bin_w, blk_w, n_perm))
    plt.savefig(configs['output_file'], bbox_inches='tight')
コード例 #27
0
ファイル: graph.py プロジェクト: sgord512/id_algo
 def get_all_connected_vertices(self, y):
     n = self.get_num_nodes()
     reachableIndices = utilities.flatten(
         self.neighborhood(y, order=n, mode=igraph.ALL))
     return set(self.vs[reachableIndices]["name"])
コード例 #28
0
	def __hash__(self): 
		return reduce(lambda a, b: hash(a + hash(b)), flatten(self.IO), 0) + hash(self.p) + hash(self.sketch)
コード例 #29
0
ファイル: lhe.py プロジェクト: elliot-hughes/truculence
# VARIABLES:
tags = {
	"LesHouchesEvents": {
		"event": {},
		"init": {},
		"header": {
			"MGVersion": {},
			"MG5ProcCard": {},
			"MGProcCard": {},
			"MGRunCard": {},
			"slha": {},
			"MCGenerationInfo": {},
		},
	},
}
tags_full = utilities.flatten(tags).keys()
tags_all = [tag_full.split("_")[-1] for tag_full in tags_full]
# :VARIABLES


# CLASSES:
class header:
	def __init__(self, lhe_string):
		match = re.search("(<header>[\s\S]*</header>)", lhe_string)
#		print lhe_string
#		print match
		if match:
			self.raw = match.group(1)
		else:
			self.raw = False
	
コード例 #30
0
 def __hash__(self):
     return reduce(lambda a, b: hash(a + hash(b)),
                   flatten(self.IO, abort=lambda x: type(x) is str),
                   0) + hash(self.p) + hash(self.sketch)
コード例 #31
0
def get_block(sudoku, row, col):
	block_row = row // 3 * 3
	block_col = col // 3 * 3
	return utilities.flatten([row[block_col:block_col+3] for row in sudoku[block_row:block_row+3]])
コード例 #32
0
 def line_existance_part(self, x, scope, dilation_rate=4,drop_prob=0.1, verbose=True):
     # so the output of dilated convolution should be 36 x 100 x 32, if the input was 288 x 800 x 3, encoder output will be 36 x 100 x 128
     # # dilated conv:
     # w h w c
     shape = x.get_shape().as_list()
     
     W_conv = self.get_variable_weight_decay(scope + "/W_dconv",
                 shape=[3, 3, shape[-1], 32], # ([filter_height, filter_width, in_depth, out_depth])
                 initializer=tf.contrib.layers.xavier_initializer(),
                 loss_category="line_existance_wd_loss")
     b_conv = self.get_variable_weight_decay(scope + "/b_dconv", shape=[32], # ([out_depth])
                 initializer=tf.constant_initializer(0),
                 loss_category="line_existance_wd_loss")
     conv_branch = tf.nn.atrous_conv2d(x, W_conv, rate=dilation_rate,
                 padding="SAME") + b_conv
     if(verbose):
         print(conv_branch.get_shape().as_list(), "dilated convolution")
            
                                     
                             
     # # # batch norm and ReLU:
     conv_branch = tf.contrib.slim.batch_norm(conv_branch)
     if(verbose):print(conv_branch.get_shape().as_list(), "batch normalization")
     conv_branch = tf.nn.relu(conv_branch, name=scope + "/RELU1")
     
     if(verbose):print(conv_branch.get_shape().as_list(), "relu")
     # # regularizer:
     conv_branch = spatial_dropout(conv_branch, drop_prob)
     
     if(verbose):print(conv_branch.get_shape().as_list(), "dropout")
     
     # # conv:
     W_conv = self.get_variable_weight_decay(scope + "/W_conv",
                 shape=[3, 3, 32, 5], # ([filter_height, filter_width, in_depth, out_depth])
                 initializer=tf.contrib.layers.xavier_initializer(),
                 loss_category="line_existance_wd_loss")
     b_conv = self.get_variable_weight_decay(scope + "/b_conv", shape=[5], # ([out_depth])
                 initializer=tf.constant_initializer(0),
                 loss_category="line_existance_wd_loss")
     conv_branch = tf.nn.conv2d(conv_branch, W_conv, strides=[1, 1, 1, 1],
                 padding="SAME") + b_conv
     if(verbose):print(conv_branch.get_shape().as_list(), "convolution 1, 1")                  
     # spatial softmax
     conv_branch = spatial_softmax(conv_branch)
     if(verbose):print(conv_branch.get_shape().as_list(), "spatial softmax")
     
     conv_branch = tf.nn.avg_pool(conv_branch, ksize=2, strides=2,padding="SAME")
     if(verbose):print(conv_branch.get_shape().as_list(), "average pooling")
     # size of flattened matrix should be 4500
     fc = flatten(conv_branch)
     if(verbose):print(fc.get_shape().as_list(), "flatten")
     # # fully connected network:
     W_fc = self.get_variable_weight_decay(scope + "/W_fc",
                 shape=[4500, 128], 
                 initializer=tf.contrib.layers.xavier_initializer(),
                 loss_category="line_existance_wd_loss")
     
     b_fc = self.get_variable_weight_decay(scope + "/b_fc", shape=[128],
                 initializer=tf.constant_initializer(0),
                 loss_category="line_existance_wd_loss")
     fc = tf.matmul(fc, W_fc)+ b_fc
     if(verbose):print(fc.get_shape().as_list(), "fully connected")
     fc = tf.nn.relu(fc, name=scope + "/RELU2")
     if(verbose):print(fc.get_shape().as_list(), "relu")
     # # fully connected network:
     W_fc = self.get_variable_weight_decay(scope + "/W_fc1",
                 shape=[128, 4], 
                 initializer=tf.contrib.layers.xavier_initializer(),
                 loss_category="line_existance_wd_loss")
     
     b_fc = self.get_variable_weight_decay(scope + "/b_fc1", shape=[4],
                 initializer=tf.constant_initializer(0),
                 loss_category="line_existance_wd_loss")
     fc = tf.matmul(fc, W_fc)+ b_fc
     if(verbose):print(fc.get_shape().as_list(), "fully connected")
     fc = tf.math.sigmoid(fc,name=scope + '/existance_logits')
     if(verbose):print(fc.get_shape().as_list(), "sigmoid")
     return fc
コード例 #33
0
ファイル: ufunc_gen.py プロジェクト: jsalvatier/ufunc_gen
def generate_ufuncs(file, function_definitions, includes=[]):
    template = env.get_template("function_file.pyx")
    context = {"functions": flatten(function_definitions), "includes": includes}

    with open(file, "w") as f:
        f.write(template.render(**context))
コード例 #34
0
 def __hash__(self): 
     from utilities import flatten
     return reduce(lambda a, b: hash(a + hash(b)), flatten(self.IO), 0) + hash(self.p) + hash(self.sketch)