def discover(self, service_uuids, char_uuids, timeout_sec=TIMEOUT_SEC):
     """Wait up to timeout_sec for the specified services and characteristics
     to be discovered on the device.  If the timeout is exceeded without
     discovering the services and characteristics then an exception is thrown.
     """
     # Turn expected values into a counter of each UUID for fast comparison.
     expected_services = set(service_uuids)
     expected_chars = set(char_uuids)
     # Loop trying to find the expected services for the device.
     start = time.time()
     while True:
         # Find actual services discovered for the device.
         actual_services = set(self.advertised)
         # Find actual characteristics discovered for the device.
         chars = map(BluezGattCharacteristic,
                     get_provider()._get_objects(_CHARACTERISTIC_INTERFACE,
                                                 self._device.object_path))
         actual_chars = set(map(lambda x: x.uuid, chars))
         # Compare actual discovered UUIDs with expected and return true if at
         # least the expected UUIDs are available.
         if actual_services >= expected_services and actual_chars >= expected_chars:
             # Found at least the expected services!
             return True
         # Couldn't find the devices so check if timeout has expired and try again.
         if time.time()-start >= timeout_sec:
             return False
         time.sleep(1)
Exemplo n.º 2
0
 def setUp(self):
     input_join = partial(os.path.join, inputdir)
     expected_join = partial(os.path.join, expecteddir)
     outnames = ['contig-0.fastq', 'contig-1000000.fastq', 'contig-2000000.fastq']
     self.fastqs = map(input_join, ['1.R1.unmap.fastq', '1.R2.unmap.fastq'])
     self.amos = input_join('regression.afg')
     self.expected_files = map(expected_join, outnames)
     self.result_files = outnames #map(output_join, outnames)
Exemplo n.º 3
0
def filtered_forts(starting_location, origin, forts, proximity, visited_forts={}, reverse=False):
    forts = filter(lambda f: is_active_pokestop(f[0], visited_forts=visited_forts, starting_location=starting_location,
                                                proximity=proximity),
                   map(lambda x: (x, distance_in_meters(origin, (x['latitude'], x['longitude']))), forts))

    sorted_forts = sorted(forts, key=lambda x: x[1], reverse=reverse)
    return sorted_forts
Exemplo n.º 4
0
def wait_instances_running(ec2, instances):
    """
    Wait until no instance in the given iterable is 'pending'. Yield every instance that
    entered the running state as soon as it does.

    :param boto.ec2.connection.EC2Connection ec2: the EC2 connection to use for making requests
    :param Iterator[Instance] instances: the instances to wait on
    :rtype: Iterator[Instance]
    """
    running_ids = set()
    other_ids = set()
    while True:
        pending_ids = set()
        for i in instances:
            if i.state == 'pending':
                pending_ids.add(i.id)
            elif i.state == 'running':
                assert i.id not in running_ids
                running_ids.add(i.id)
                yield i
            else:
                assert i.id not in other_ids
                other_ids.add(i.id)
                yield i
        log.info('%i instance(s) pending, %i running, %i other.',
                 *map(len, (pending_ids, running_ids, other_ids)))
        if not pending_ids:
            break
        seconds = max(a_short_time, min(len(pending_ids), 10 * a_short_time))
        log.info('Sleeping for %is', seconds)
        time.sleep(seconds)
        for attempt in retry_ec2():
            with attempt:
                instances = ec2.get_only_instances(list(pending_ids))
Exemplo n.º 5
0
    def print_dot_chart(self):
        """Print a dot output graph representing the workflow."""
        print("digraph toil_graph {")
        print("# This graph was created from job-store: %s" %
              self.jobStoreName)

        # Make job IDs to node names map
        jobsToNodeNames = dict(
            enumerate(map(lambda job: job.jobStoreID, self.jobsToReport)))

        # Print the nodes
        for job in set(self.jobsToReport):
            print(
                '%s [label="%s %s"];' %
                (jobsToNodeNames[job.jobStoreID], job.jobName, job.jobStoreID))

        # Print the edges
        for job in set(self.jobsToReport):
            for level, jobList in enumerate(job.stack):
                for childJob in jobList:
                    # Check, b/c successor may be finished / not in the set of jobs
                    if childJob.jobStoreID in jobsToNodeNames:
                        print('%s -> %s [label="%i"];' %
                              (jobsToNodeNames[job.jobStoreID],
                               jobsToNodeNames[childJob.jobStoreID], level))
        print("}")
 def list_services(self):
     """Return a list of GattService objects that have been discovered for
     this device.
     """
     return map(BluezGattService,
                get_provider()._get_objects(_SERVICE_INTERFACE,
                                            self._device.object_path))
Exemplo n.º 7
0
 def list_descriptors(self):
     """Return list of GATT descriptors that have been discovered for this
     characteristic.
     """
     paths = self._props.Get(_CHARACTERISTIC_INTERFACE, 'Descriptors')
     return map(BluezGattDescriptor,
                get_provider()._get_objects_by_path(paths))
Exemplo n.º 8
0
Arquivo: 10-init.py Projeto: UBTC/wipy
def maxN(a, n):
    if not isinstance(a, list) or not isinstance(a, ndarray): return False
    if n>len(a): n=len(a)
    b = a[:]
    for i in range(len(a)): b[i] = (b[i], i)
    b.sort(key = lambda x: x[0], reverse = True)
    return array([b[i][0] for i in range(n)]), array(map(int, [b[i][1] for i in range(n)]))
Exemplo n.º 9
0
 def list_characteristics(self):
     """Return list of GATT characteristics that have been discovered for this
     service.
     """
     paths = self._props.Get(_SERVICE_INTERFACE, 'Characteristics')
     return map(BluezGattCharacteristic,
                get_provider()._get_objects_by_path(paths))
Exemplo n.º 10
0
 def test_get_seqs_by_ctg(self):
     iids_by_ctg = [[1, 2, 4], [1, 2], [3, 5], [4], [6], [2, 4]]
     get_seq = lambda i: 'ACGT' if i in [2, 4] else 'A'*i
     reds = [make_mock_red(i, i, get_seq(i), 'D'*len(get_seq(i)))
                      for i in range(1, len(iids_by_ctg)+ 1)]
     fastq_records = [make_seq_record(get_seq(i)) for i in range(1, len(iids_by_ctg)+1)]
     saved_records = fastq_records[:]
     random.shuffle(fastq_records)
     result = filter(bool, map(tuple, a2f.get_seqs_by_ctg(fastq_records, reds, iids_by_ctg)))
     getters_by_ctg = map(lambda A: itemgetter(*[i - 1 for i in A]), iids_by_ctg)
     raw_expected = [get(saved_records) for get in getters_by_ctg]
     expected = filter(bool, map(lambda a: (a,) if type(a) is not tuple else a, raw_expected))
     # Bio.Seq objects can't be compared directly need to get __dict__ attribute
     dicter = lambda tup: [obj.seq.tostring() for obj in tup]
     self.assertEquals(map(dicter, expected), map(dicter, result))
     pass
Exemplo n.º 11
0
 def disconnect_devices(self, service_uuids):
     """Disconnect any connected devices that have any of the specified
     service UUIDs.
     """
     # Get list of connected devices with specified services.
     cbuuids = map(uuid_to_cbuuid, service_uuids)
     for device in self._central_manager.retrieveConnectedPeripheralsWithServices_(cbuuids):
         self._central_manager.cancelPeripheralConnection_(device)
Exemplo n.º 12
0
    def failure_summary(self):
        """Return test summary string based on fuzz logger results.

        :return: Test summary string, may be multi-line.
        """
        summary = "Test Summary: {0} tests ran.\n".format(self.test_case_count)
        summary += "PASSED: {0} test cases.\n".format(self.passed_test_case_count)

        if len(self.failed_test_cases) > 0:
            summary += "FAILED: {0} test cases:\n".format(len(self.failed_test_cases))
            summary += "{0}\n".format("\n".join(map(str, self.failed_test_cases)))

        if len(self.error_test_cases) > 0:
            summary += "Errors on {0} test cases:\n".format(len(self.error_test_cases))
            summary += "{0}".format("\n".join(map(str, self.error_test_cases)))

        return summary
 def list_services(self):
     """Return a list of GattService objects that have been discovered for
     this device.
     """
     return map(
         BluezGattService,
         get_provider()._get_objects(_SERVICE_INTERFACE,
                                     self._device.object_path))
Exemplo n.º 14
0
    def test_file_generator(self):
        """Should stream data from a file."""
        row_count = 100
        header = ['col1,col2,col3']
        data = list(mock_data(row_count, len(header)))
        rows = map(lambda x: ','.join(map(str, x)), data)
        test_data = '\n'.join(header + rows)

        with patch('bcipy.acquisition.datastream.generator.open',
                   mock_open(read_data=test_data),
                   create=True):

            gen = file_data_generator(filename='foo', header_row=1)
            generated_data = [next(gen) for _ in range(row_count)]

            for i, row in enumerate(generated_data):
                self.assertEqual(row, data[i])
Exemplo n.º 15
0
 def list_characteristics(self):
     """Return list of GATT characteristics that have been discovered for this
     service.
     """
     return map(
         BluezGattCharacteristic,
         get_provider()._get_objects(_CHARACTERISTIC_INTERFACE,
                                     self._service.object_path))
 def disconnect_devices(self, service_uuids):
     """Disconnect any connected devices that have any of the specified
     service UUIDs.
     """
     # Get list of connected devices with specified services.
     cbuuids = map(uuid_to_cbuuid, service_uuids)
     for device in self._central_manager.retrieveConnectedPeripheralsWithServices_(cbuuids):
         self._central_manager.cancelPeripheralConnection_(device)
Exemplo n.º 17
0
def maxN(a, n):
    if not isinstance(a, list) or not isinstance(a, ndarray): return False
    if n > len(a): n = len(a)
    b = a[:]
    for i in range(len(a)):
        b[i] = (b[i], i)
    b.sort(key=lambda x: x[0], reverse=True)
    return array([b[i][0] for i in range(n)
                  ]), array(map(int, [b[i][1] for i in range(n)]))
Exemplo n.º 18
0
    def test_file_generator_end(self):
        """Should throw an exception when all data has been consumed"""
        row_count = 10

        header = ['col1,col2,col3']
        data = list(mock_data(row_count, len(header)))
        rows = map(lambda x: ','.join(map(str, x)), data)
        test_data = '\n'.join(header + rows)

        with patch('bcipy.acquisition.datastream.generator.open',
                   mock_open(read_data=test_data), create=True):
            gen = file_data(filename='foo', header_row=1)
            # exhaust the generator
            for _ in range(row_count):
                next(gen)

            with pytest.raises(StopIteration):
                data.append(next(gen))
Exemplo n.º 19
0
 def characteristic_changed(iface, changed_props, invalidated_props):
     # Check that this change is for a GATT characteristic and it has a
     # new value.
     if iface != _CHARACTERISTIC_INTERFACE:
         return
     if 'Value' not in changed_props:
         return
     # Send the new value to the on_change callback.
     on_change(''.join(map(chr, changed_props['Value'])))
Exemplo n.º 20
0
def flatten_multiple_seq_files(filehandles, format):
    '''
    Get a flat iterator of SeqRecords from a list of opened files.
    :param iterable filehandles: collection of valid fastq/fasta `file` objects
    :param str format: Either "fastq" or "fasta"
    :return generator of all fastq/fasta Bio.SeqRecords as a flat iterator
    '''
    open_biofile = partial(SeqIO.parse, format=format)
    return itertools.chain(*map(open_biofile, filehandles))
Exemplo n.º 21
0
 def characteristic_changed(iface, changed_props, invalidated_props):
     # Check that this change is for a GATT characteristic and it has a
     # new value.
     if iface != _CHARACTERISTIC_INTERFACE:
         return
     if 'Value' not in changed_props:
         return
     # Send the new value to the on_change callback.
     on_change(''.join(map(chr, changed_props['Value'])))
Exemplo n.º 22
0
def flatten_multiple_seq_files(filehandles, format):
    '''
    Get a flat iterator of SeqRecords from a list of opened files.
    :param iterable filehandles: collection of valid fastq/fasta `file` objects
    :param str format: Either "fastq" or "fasta"
    :return generator of all fastq/fasta Bio.SeqRecords as a flat iterator
    '''
    open_biofile = partial(SeqIO.parse, format=format)
    return itertools.chain(*map(open_biofile, filehandles))
Exemplo n.º 23
0
    def coreSSH(self, *args, **kwargs):
        """
        If strict=False, strict host key checking will be temporarily disabled.
        This is provided as a convenience for internal/automated functions and
        ought to be set to True whenever feasible, or whenever the user is directly
        interacting with a resource (e.g. rsync-cluster or ssh-cluster). Assumed
        to be False by default.

        kwargs: input, tty, appliance, collectStdout, sshOptions, strict
        """
        commandTokens = ['ssh', '-t']
        strict = kwargs.pop('strict', False)
        if not strict:
            kwargs['sshOptions'] = ['-oUserKnownHostsFile=/dev/null', '-oStrictHostKeyChecking=no'] \
                                 + kwargs.get('sshOptions', [])
        sshOptions = kwargs.pop('sshOptions', None)
        # Forward port 3000 for grafana dashboard
        commandTokens.extend(
            ['-L', '3000:localhost:3000', '-L', '9090:localhost:9090'])
        if sshOptions:
            # add specified options to ssh command
            assert isinstance(sshOptions, list)
            commandTokens.extend(sshOptions)
        # specify host
        user = kwargs.pop('user', 'core')  # CHANGED: Is this needed?
        commandTokens.append('%s@%s' % (user, str(self.publicIP)))
        appliance = kwargs.pop('appliance', None)
        if appliance:
            # run the args in the appliance
            tty = kwargs.pop('tty', None)
            ttyFlag = '-t' if tty else ''
            commandTokens += ['docker', 'exec', '-i', ttyFlag, 'toil_leader']

        inputString = kwargs.pop('input', None)
        if inputString is not None:
            kwargs['stdin'] = subprocess.PIPE
        collectStdout = kwargs.pop('collectStdout', None)
        if collectStdout:
            kwargs['stdout'] = subprocess.PIPE
        kwargs['stderr'] = subprocess.PIPE

        logger.debug('Node %s: %s', self.publicIP, ' '.join(args))
        args = list(map(pipes.quote, args))
        commandTokens += args
        logger.debug('Full command %s', ' '.join(commandTokens))
        popen = subprocess.Popen(commandTokens, **kwargs)
        stdout, stderr = popen.communicate(input=inputString)
        # at this point the process has already exited, no need for a timeout
        resultValue = popen.wait()
        # ssh has been throwing random 255 errors - why?
        if resultValue != 0:
            logger.debug('SSH Error (%s) %s' % (resultValue, stderr))
            raise RuntimeError(
                'Executing the command "%s" on the appliance returned a non-zero '
                'exit code %s with stdout %s and stderr %s' %
                (' '.join(args), resultValue, stdout, stderr))
        return stdout
Exemplo n.º 24
0
def extract_dfs_by_iids(df, iids_by_ctg):
    '''
    Get a list of "sub-frames" organized according to the organization of the iids.
    :param pandas.DataFrame df: DataFrame with a 'seq' column
    :param list iids_by_ctg: 2D list of iids (ints) organized by the contig they map to
    :return a list of sub-dataframes, where each sub-frame matches the iids in the 2D list iids_by_ctg
    '''
    get_df_subset_seqs = partial(get_df_subset, df, key='iid')
    dfs_by_ctg = map(get_df_subset_seqs, iids_by_ctg)
    return dfs_by_ctg
Exemplo n.º 25
0
 def _update_advertised(self, advertised):
     """Called when advertisement data is received."""
     # Advertisement data was received, pull out advertised service UUIDs and
     # name from advertisement data.
     if 'kCBAdvDataServiceUUIDs' in advertised:
         self._advertised = self._advertised + map(
             cbuuid_to_uuid, advertised['kCBAdvDataServiceUUIDs'])
         if 'kCBAdvDataManufacturerData' in advertised:
             self._advertised = self._advertised + list(
                 advertised['kCBAdvDataManufacturerData'])
Exemplo n.º 26
0
def extract_dfs_by_iids(df, iids_by_ctg):
    '''
    Get a list of "sub-frames" organized according to the organization of the iids.
    :param pandas.DataFrame df: DataFrame with a 'seq' column
    :param list iids_by_ctg: 2D list of iids (ints) organized by the contig they map to
    :return a list of sub-dataframes, where each sub-frame matches the iids in the 2D list iids_by_ctg
    '''
    get_df_subset_seqs = partial(get_df_subset, df, key='iid')
    dfs_by_ctg = map(get_df_subset_seqs, iids_by_ctg)
    return dfs_by_ctg
Exemplo n.º 27
0
 def mquals( self ):
     '''
         Returns the mapping qualities as a phred - 33 integer
         Truncates mquals to the same length as base qualities as older samtools
         had a bug where it would return all mapping qualities regardless of the -q and -Q 
         thresholds being set.
         It will return the empty list if it would otherwise truncate due to the reason above, but
         all the values are not the same since there would be no way to tell what qual values
         match what bases.
     '''
     # Check to make sure map qual len is same as base qual length
     if len(self._bquals) == len(self._mquals):
         return map(char_to_qual, self._mquals)
     # Otherwise we can only proceed if all items are the same
     elif len(set(self._mquals)) == 1:
         l = len(self._bquals)
         return map(char_to_qual, self._mquals[:l])
     else:
         return []
Exemplo n.º 28
0
 def _update_advertised(self, advertised, rssi=None):
     """Called when advertisement data is received."""
     # Advertisement data was received, pull out advertised service UUIDs and
     # name from advertisement data.
     if 'kCBAdvDataServiceUUIDs' in advertised:
         self._advertised = self._advertised + map(cbuuid_to_uuid, advertised['kCBAdvDataServiceUUIDs'])
     if 'kCBAdvDataServiceData' in advertised:
         self._advertised_data = advertised['kCBAdvDataServiceData']
     if rssi:
         self._rssi = rssi
Exemplo n.º 29
0
def join_non_unique_dataframes(df1, df2):
    '''
    Sourced form: http://stackoverflow.com/questions/20297021/grouping-or-merging-dataframes-pandas-python
    Get two dataframes joined on their index, even if their indices hold non-unique values.
    The joined DataFrame will have a MultiIndex; the original shared index + a cumulative count index.
    :param pandas.DataFrame df1: A DataFrame with equivalent (but non-unique) index values to df2
    :param pandas.DataFrame df2: A DataFrame with equivalent (but non-unique) index values to df1
    :return pandas.Dataframe new joined DataFrame. Will also have a "cumulative count" index
    '''
    df_multi_index_1, df_multi_index_2 = map(add_cumcount_index, (df1, df2))
    return df_multi_index_1.join(df_multi_index_2)
Exemplo n.º 30
0
def join_non_unique_dataframes(df1, df2):
    '''
    Sourced form: http://stackoverflow.com/questions/20297021/grouping-or-merging-dataframes-pandas-python
    Get two dataframes joined on their index, even if their indices hold non-unique values.
    The joined DataFrame will have a MultiIndex; the original shared index + a cumulative count index.
    :param pandas.DataFrame df1: A DataFrame with equivalent (but non-unique) index values to df2
    :param pandas.DataFrame df2: A DataFrame with equivalent (but non-unique) index values to df1
    :return pandas.Dataframe new joined DataFrame. Will also have a "cumulative count" index
    '''
    df_multi_index_1, df_multi_index_2 = map(add_cumcount_index, (df1, df2))
    return df_multi_index_1.join(df_multi_index_2)
Exemplo n.º 31
0
    def get_lattice_parameters(self):
        """Function to get the lattice parameters.

        Returns
        -------
        output : array-like
            A numpy array containing the lattice parameters.
        """

        output = np.array(map(norm, [x for x in self.simulation_cell.T]),
                          dtype=float)
        return output
Exemplo n.º 32
0
def loadModules():
    # noinspection PyUnresolvedReferences
    from toil.utils import (toilKill, toilStats, toilStatus, toilClean,
                            toilLaunchCluster, toilDestroyCluster,
                            toilSshCluster, toilRsyncCluster, toilDebugFile,
                            toilDebugJob)
    commandMapping = {
        "-".join(map(lambda x: x.lower(), re.findall('[A-Z][^A-Z]*', name))):
        module
        for name, module in iteritems(locals())
    }
    return commandMapping
Exemplo n.º 33
0
def make_fastqs_by_contigs(fastqs, amos_file, fformat='fastq'):
    '''
    Loads fastq records and amos object, get the sequences and write them to the file.
    :param list fastqs: list of valid fastq `file` objects
    :param file amos_file: single amos `file` (usually .afg extension)
    :return int 0 success code
    '''
    # Make list to end IO here (keep IO in main function, and fail immediately)
    fastq_records = list(flatten_multiple_seq_files(fastqs, fformat))
    amos_obj = amos.AMOS(amos_file)
    for f in fastqs + [amos_file]:
        f.close()
    reds = amos_obj.reds.values()
    contigs = amos_obj.ctgs.values()
    iids_by_ctg = map(get_iids, contigs)
    # Do the heavy-lifting
    seqs_by_ctg = get_seqs_by_ctg(fastq_records, reds, iids_by_ctg)
    write_to_file = partial(SeqIO.write, format=fformat)
    filenames = ("{0}.{1}".format(ctg.eid, fformat) for ctg in contigs)
    map(write_to_file, seqs_by_ctg, filenames)
    return 0
Exemplo n.º 34
0
def make_fastqs_by_contigs(fastqs, amos_file, fformat='fastq'):
    '''
    Loads fastq records and amos object, get the sequences and write them to the file.
    :param list fastqs: list of valid fastq `file` objects
    :param file amos_file: single amos `file` (usually .afg extension)
    :return int 0 success code
    '''
    # Make list to end IO here (keep IO in main function, and fail immediately)
    fastq_records = list(flatten_multiple_seq_files(fastqs, fformat))
    amos_obj = amos.AMOS(amos_file)
    for f in fastqs + [amos_file]:
        f.close()
    reds = amos_obj.reds.values()
    contigs = amos_obj.ctgs.values()
    iids_by_ctg = map(get_iids, contigs)
    # Do the heavy-lifting
    seqs_by_ctg = get_seqs_by_ctg(fastq_records, reds, iids_by_ctg)
    write_to_file = partial(SeqIO.write, format=fformat)
    filenames = ("{0}.{1}".format(ctg.eid, fformat) for ctg in contigs)
    map(write_to_file, seqs_by_ctg, filenames)
    return 0
Exemplo n.º 35
0
def int_to_binary_string(number, bit_width):
    """
    Convert a number to a binary string.

    @type  number:    int
    @param number:    (Optional, def=self._value) Number to convert
    @type  bit_width: int
    @param bit_width: (Optional, def=self.width) Width of bit string

    @rtype:  str
    @return: Bit string
    """
    return "".join(map(lambda x: str((number >> x) & 1), range(bit_width - 1, -1, -1)))
Exemplo n.º 36
0
    def test_file_generator_end(self):
        """Should throw an exception when all data has been consumed"""
        col_count = 3
        row_count = 10

        header = ['col1,col2,col3']
        file_data = [[float(cnum + rnum) for cnum in range(col_count)]
                     for rnum in range(row_count)]
        rows = map(lambda x: ','.join(map(str, x)), file_data)
        test_data = '\n'.join(header + rows)

        with patch('datastream.generator.open',
                   mock_open(read_data=test_data),
                   create=True):

            data = []
            gen = generator.file_data(filename='foo', header_row=1)
            for i in range(row_count):
                data.append(next(gen))

            with pytest.raises(StopIteration):
                data.append(next(gen))
Exemplo n.º 37
0
    def report_on_jobs(self):
        """
        Gathers information about jobs such as its child jobs and status.

        :returns jobStats: Pairings of a useful category and a list of jobs which fall into it.
        :rtype dict:
        """
        hasChildren = []
        readyToRun = []
        zombies = []
        hasLogFile = []
        hasServices = []
        services = []
        properties = set()

        for job in self.jobsToReport:
            if job.logJobStoreFileID is not None:
                hasLogFile.append(job)

            childNumber = reduce(lambda x, y: x + y, map(len, job.stack) + [0])
            if childNumber > 0:  # Total number of successors > 0
                hasChildren.append(job)
                properties.add("HAS_CHILDREN")
            elif job.command is not None:
                # Job has no children and a command to run. Indicates job could be run.
                readyToRun.append(job)
                properties.add("READY_TO_RUN")
            else:
                # Job has no successors and no command, so is a zombie job.
                zombies.append(job)
                properties.add("IS_ZOMBIE")
            if job.services:
                hasServices.append(job)
                properties.add("HAS_SERVICES")
            if job.startJobStoreID or job.terminateJobStoreID or job.errorJobStoreID:
                # These attributes are only set in service jobs
                services.append(job)
                properties.add("IS_SERVICE")

        jobStats = {
            'hasChildren': hasChildren,
            'readyToRun': readyToRun,
            'zombies': zombies,
            'hasServices': hasServices,
            'services': services,
            'hasLogFile': hasLogFile,
            'properties': properties,
            'childNumber': childNumber
        }
        return jobStats
Exemplo n.º 38
0
def s_update(name, value):
    """
    Update the value of the named primitive in the currently open request.

    :type  name:  str
    :param name:  Name of object whose value we wish to update
    :type  value: Mixed
    :param value: Updated value
    """

    if name not in map(lambda o: o.name, blocks.CURRENT.walk()):
        raise exception.SullyRuntimeError("NO OBJECT WITH NAME '%s' FOUND IN CURRENT REQUEST" % name)

    blocks.CURRENT.names[name].value = value
Exemplo n.º 39
0
    def test_file_generator(self):
        """Should stream data from a file."""
        col_count = 3
        row_count = 100

        header = ['col1,col2,col3']
        file_data = [[float(cnum + rnum) for cnum in range(col_count)]
                     for rnum in range(row_count)]
        rows = map(lambda x: ','.join(map(str, x)), file_data)
        test_data = '\n'.join(header + rows)

        with patch('datastream.generator.open',
                   mock_open(read_data=test_data),
                   create=True):

            data = []
            gen = generator.file_data(filename='foo', header_row=1)
            for i in range(row_count):
                data.append(next(gen))

            self.assertEqual(len(data), row_count)
            for i, row in enumerate(data):
                self.assertEqual(row, file_data[i])
Exemplo n.º 40
0
    def test_file_with_custom_encoder(self):
        """Should allow a custom encoder"""

        col_count = 3
        row_count = 100

        header = ['col1,col2,col3']
        data = [[float(cnum + rnum) for cnum in range(col_count)]
                for rnum in range(row_count)]
        rows = map(lambda x: ','.join(map(str, x)), data)
        test_data = '\n'.join(header + rows)

        with patch('bcipy.acquisition.datastream.generator.open',
                   mock_open(read_data=test_data), create=True):

            gen = file_data(
                filename='foo', header_row=1, encoder=CustomEncoder())
            generated_data = [next(gen) for _ in range(row_count)]

            for _count, record in generated_data:
                self.assertEqual(len(record), col_count)

            self.assertEqual(generated_data[0][0], 1)
            self.assertEqual(generated_data[99][0], 100)
Exemplo n.º 41
0
def graphFilter(store, graphs):
    """
    Create a graph-filtered session spec.

    :param store: Base session spec.
    :type store: string
    :param graphs: List of graph names. `None` means the default graph.
    :type graphs: list[string]
    :return: A session spec string.
    :rtype: string
    """
    def asGraph(x):
        if x is None: return "null"
        else: return unicode(x)
    return "%s{%s}" % (store, " ".join(map(asGraph, graphs)))
Exemplo n.º 42
0
    def test_noniterators_produce_lists(self):
        l = range(10)
        self.assertTrue(isinstance(l, list))

        l2 = zip(l, list('ABCDE')*2)
        self.assertTrue(isinstance(l2, list))

        double = lambda x: x*2
        l3 = map(double, l)
        self.assertTrue(isinstance(l3, list))

        is_odd = lambda x: x % 2 == 1
        l4 = filter(is_odd, range(10))
        self.assertEqual(l4, [1, 3, 5, 7, 9])
        self.assertTrue(isinstance(l4, list))
Exemplo n.º 43
0
 def disconnect_devices(self, service_uuids=[]):
     """Disconnect any connected devices that have the specified list of
     service UUIDs.  The default is an empty list which means all devices
     are disconnected.
     """
     service_uuids = set(service_uuids)
     for device in self.list_devices():
         # Skip devices that aren't connected.
         if not device.is_connected:
             continue
         device_uuids = set(map(lambda x: x.uuid, device.list_services()))
         if device_uuids >= service_uuids:
             # Found a device that has at least the requested services, now
             # disconnect from it.
             device.disconnect()
Exemplo n.º 44
0
def ipv4_checksum(msg):
    """
    Return IPv4 checksum of msg.
    :param msg: Message to compute checksum over.
    :type msg: bytes

    :return: IPv4 checksum of msg.
    :rtype: int
    """
    # Pad with 0 byte if needed
    if len(msg) % 2 == 1:
        msg += six.binary_type(b"\x00")

    msg_words = map(_collate_bytes, msg[0::2], msg[1::2])
    total = reduce(_ones_complement_sum_carry_16, msg_words, 0)
    return ~total & 0xffff
Exemplo n.º 45
0
    def disconnect_devices(self, service_uuids):
        """Disconnect any connected devices that have any of the specified
        service UUIDs.
        """
        print ('disconnecting devices')
        # Get list of connected devices with specified services.
        cbuuids = map(uuid_to_cbuuid, service_uuids)

        #print (dir(self._central_manager))
        # print (self._devices.list())

        # if self._central_manager is None: 
        #     print ('nothing')
        # else:  
        for device in self._central_manager.retrieveConnectedPeripheralsWithServices_(cbuuids):
            self._central_manager.cancelPeripheralConnection_(device)
Exemplo n.º 46
0
def graphFilter(store, graphs):
    """
    Create a graph-filtered session spec.

    :param store: Base session spec.
    :type store: string
    :param graphs: List of graph names. `None` means the default graph.
    :type graphs: list[string]
    :return: A session spec string.
    :rtype: string
    """
    def asGraph(x):
        if x is None: return "null"
        else: return unicode(x)

    return "%s{%s}" % (store, " ".join(map(asGraph, graphs)))
Exemplo n.º 47
0
def get_seqs_by_ctg(fastq_records, reds, iids_by_ctg):
    '''
    Transforms the fastq records and reds into pandas.DataFrame objects and joins them on the sequence string column.
    Then, this DataFrame is sliced according to ``iids_by_ctg`` and returned. The result is a 2D list of SeqRecord objects
    organized by the contigs they mapped to.
    :param list fastq_records: a collection of Bio.SeqRecord objects
    :param list reds: a collection of amos.RED objects
    :param list iids_by_ctg: a 2D list of iids (organized by contig)
    :return A 2D list of bio.SeqRecord objects organized by the contig they map to.
    '''
    fastq_df = bio_records_as_df(fastq_records)
    reds_df = amos_reds_as_df(collection=reds)
    fastq_df, reds_df = fastq_df.set_index('seq'), reds_df.set_index('seq')
    assert reds_df.shape == fastq_df.shape, "should have the same number of columns (seqs, seq_obj/iid) and rows (fastq reads / AMOS REDs."
    reds_with_seqs_df = join_non_unique_dataframes(reds_df, fastq_df)
    dfs_by_ctg = extract_dfs_by_iids(reds_with_seqs_df, iids_by_ctg)
    unfiltered_seqs_by_ctg = [df['seq_obj'] for df in dfs_by_ctg]
    if filter(series_contains_nan, unfiltered_seqs_by_ctg):
        print("Warning: AMOS records without matching fastq records found.")
    matched_seqs_by_ctg = map(get_not_nulls, unfiltered_seqs_by_ctg)
    return matched_seqs_by_ctg
Exemplo n.º 48
0
    def test_minimize_depths_complex(self):
        self.complexSetUp()
        seqs = map(sub.parse_alignment, self.raw_reads) 
        expected_matrix =  [
                [seqs[0],  seqs[1],  seqs[3]],
                [seqs[4]],
                [seqs[6], seqs[7]]
                ]
        expected_list = [  seqs[0], seqs[1], seqs[3], seqs[4], seqs[6], seqs[7] ]
        for seq in expected_list:
            seq.pick()
        expected_depths = np.array([3, 4, 6, 4, 3, 2, 2, 1])
        self.matrix.min_depth = 3
        self.matrix.minimize_depths()
        actual_matrix = self.matrix.seq_matrix
        for row in actual_matrix:
            for seq in row:
                if not seq.picked:
                    row.remove(seq)

        self.assertEquals(expected_matrix, actual_matrix)
        assert_equal(expected_depths, self.matrix.depth_array)
        self.assertEquals([seq.string for seq in expected_list], sub.flatten_and_filter_matrix(self.matrix.seq_matrix)) 
Exemplo n.º 49
0
 def test_minimize_depths_complex_more_random(self, func):
     #TODO: Refactor this so as not to duplicate code with non-random ecoli test
     self.complexSetUp()
     seqs = map(sub.parse_alignment, self.raw_reads) 
     expected_matrix =  [
             [seqs[0],  seqs[1],  seqs[2], seqs[3]],
             [seqs[4], seqs[5]],
             [seqs[6], seqs[7]]
             ]
     expected_list = seqs
     for seq in expected_list:
         seq.pick()
     expected_depths = np.array([4, 6, 7, 4, 3, 2, 2, 1])
     self.matrix.min_depth = 3
     self.matrix.more_random = True
     self.matrix.minimize_depths()
     actual_matrix = self.matrix.seq_matrix
     for row in actual_matrix:
         for seq in row:
             if not seq.picked:
                 row.remove(seq) 
     self.assertEquals(expected_matrix, actual_matrix)
     assert_equal(expected_depths, self.matrix.depth_array)
     self.assertEquals([seq.string for seq in expected_list], sub.flatten_and_filter_matrix(self.matrix.seq_matrix)) 
Exemplo n.º 50
0
 def test_vcf_file_to_df(self):
     result_df = mod.vcf_file_to_df(self.infile)
     raw_result_head = str(result_df.head()).strip()
     result_head, expected_head = map(fix_string, [raw_result_head, self.expected_head])
     self.assertEquals(expected_head, result_head)
Exemplo n.º 51
0
 def test_main(self, mockopt):
     mockopt.return_value = {'<fastqs>' : self.fastqs, '--amos' : self.amos}
     read_file = lambda fn, fn2: (open(fn).read(), open(fn2).read())
     amos2fastq_main.main()
     map(self.assertTrue, map(os.path.exists, self.result_files))
Exemplo n.º 52
0
def df_by_attrs(columns, collection):
    attr_getters = map(attr, columns)
    return collection_as_df(attr_getters, columns, collection)
Exemplo n.º 53
0
 def bquals( self ):
     '''
         Returns the base qualities as a phred - 33 integer
     '''
     return map(char_to_qual, self._bquals)
Exemplo n.º 54
0
def plain_list_of_make_array(make_array_instance):
    assert(isinstance(make_array_instance, r_ast.MakeArray))
    return map(plain_val_of_datum, make_array_instance.args)
Exemplo n.º 55
0
 def test_make_seq_matrix(self, get_reads_function):
     expected_lengths = [4, 2, 2]
     self.matrix.make_seq_matrix("mockecoli.sam", "gi|110640213|ref|NC_008253.1|")
     actual_lengths = map(len, self.matrix.seq_matrix)
     self.assertEquals(expected_lengths, actual_lengths)
Exemplo n.º 56
0
 def _update_advertised(self, advertised):
     """Called when advertisement data is received."""
     # Advertisement data was received, pull out advertised service UUIDs and
     # name from advertisement data.
     if 'kCBAdvDataServiceUUIDs' in advertised:
         self._advertised = self._advertised + map(cbuuid_to_uuid, advertised['kCBAdvDataServiceUUIDs'])