Example #1
0
def set_repeat_value(total_keys_repeat, keys, csv_file, target_filename):
    key_val_pair = []
    key_repeated = []
    with open(csv_file, 'r') as read_from, open(target_filename,
                                                'w') as write_to:
        csv_file_reader = csv.reader(read_from, delimiter=',')
        headers = next(csv_file_reader)
        values = next(csv_file_reader)
        csv_file_writer = csv.writer(write_to, delimiter=',')
        csv_file_writer.writerow(headers)
        csv_file_writer.writerow(values)
        total_keys_values = list(zip_longest(keys, values))

        # read new data, add value if key has repeat tag, write to new file
        for row in csv_file_reader:
            index = -1
            key_val_new = list(zip_longest(keys, row))
            key_val_pair = total_keys_values[:]
            key_repeated = total_keys_repeat[:]
            while key_val_new and key_repeated:
                index = index + 1
                #  if key has repeat tag, get its corresponding value, write to file
                if key_val_new[0][0] == key_repeated[0]:
                    val = key_val_pair[0][1]
                    row[index] = val
                    del key_repeated[0]
                del key_val_new[0]
                del key_val_pair[0]
            csv_file_writer.writerow(row)

    return target_filename
Example #2
0
def set_repeat_value(total_keys_repeat, keys, csv_file, target_filename):
    key_val_pair = []
    key_repeated = []
    with open(csv_file, 'r') as read_from, open(target_filename,'w') as write_to:
        csv_file_reader = csv.reader(read_from, delimiter=',')
        headers = next(csv_file_reader)
        values = next(csv_file_reader)
        csv_file_writer = csv.writer(write_to, delimiter=',')
        csv_file_writer.writerow(headers)
        csv_file_writer.writerow(values)
        total_keys_values = list(zip_longest(keys, values))

        # read new data, add value if key has repeat tag, write to new file
        for row in csv_file_reader:
            index = -1
            key_val_new = list(zip_longest(keys, row))
            key_val_pair = total_keys_values[:]
            key_repeated = total_keys_repeat[:]
            while key_val_new and key_repeated:
                index = index + 1
                #  if key has repeat tag, get its corresponding value, write to file
                if key_val_new[0][0] == key_repeated[0]:
                    val = key_val_pair[0][1]
                    row[index] = val
                    csv_file_writer.writerow(row)
                    del key_repeated[0]
                del key_val_new[0]
                del key_val_pair[0]

    return target_filename
Example #3
0
def check_journey(journey, ref_journey):
    """
    check the values in a journey
    """
    for section, ref_section in zip_longest(journey['sections'],
                                            ref_journey.sections):
        assert section.get(
            'departure_date_time') == ref_section.departure_date_time
        assert section.get(
            'arrival_date_time') == ref_section.arrival_date_time
        assert section.get(
            'base_departure_date_time') == ref_section.base_departure_date_time
        assert section.get(
            'base_arrival_date_time') == ref_section.base_arrival_date_time
        for stop_dt, ref_stop_dt in zip_longest(
                section.get('stop_date_times', []),
                ref_section.stop_date_times):
            assert stop_dt.get(
                'departure_date_time') == ref_stop_dt.departure_date_time
            assert stop_dt.get(
                'arrival_date_time') == ref_stop_dt.arrival_date_time
            assert stop_dt.get('base_departure_date_time'
                               ) == ref_stop_dt.base_departure_date_time
            assert stop_dt.get(
                'base_arrival_date_time') == ref_stop_dt.base_arrival_date_time
Example #4
0
def combineblocks(blks, imgsz, stpsz=None, fn=np.median):
    """Combine blocks from an ndarray to reconstruct ndarray signal.

    Parameters
    ----------
    blks : ndarray
      Array of blocks of a signal
    imgsz : tuple
      Tuple of the signal size
    stpsz : tuple, optional (default None, corresponds to steps of 1)
      Tuple of step sizes between neighboring blocks
    fn : function, optional (default np.median)
      Function used to resolve multivalued cells

    Returns
    -------
    imgs : ndarray
      Reconstructed signal, unknown pixels are returned as np.nan
    """

    # Construct a vectorized append function
    def listapp(x, y):
        x.append(y)

    veclistapp = np.vectorize(listapp, otypes=[np.object_])

    blksz = blks.shape[:-1]

    if stpsz is None:
        stpsz = tuple(1 for _ in blksz)

    # Calculate the number of blocks that can fit in each dimension of
    # the images
    numblocks = tuple(
        int(np.floor((a - b) / c) + 1)
        for a, b, c in zip_longest(imgsz, blksz, stpsz, fillvalue=1))

    new_shape = blksz + numblocks
    blks = np.reshape(blks, new_shape)

    # Construct an imgs matrix of empty lists
    imgs = np.empty(imgsz, dtype=np.object_)
    imgs.fill([])
    imgs = np.frompyfunc(list, 1, 1)(imgs)

    # Iterate over each block and append the values to the corresponding
    # imgs cell
    for pos in np.ndindex(numblocks):
        slices = tuple(
            slice(a * c, a * c + b)
            for a, b, c in zip_longest(pos, blksz, stpsz, fillvalue=1))
        veclistapp(imgs[slices].squeeze(), blks[(Ellipsis, ) + pos].squeeze())

    return np.vectorize(fn, otypes=[blks.dtype])(imgs)
Example #5
0
def set_repeat_value(total_keys_repeat, keys, csv_file, target_filename):
    key_val_pair = []
    key_repeated = []
    line = None
    newline = u'\n'
    with open(csv_file, 'r',
              newline=None) as read_from, open(target_filename,
                                               'w',
                                               newline=None) as write_to:
        headers = read_from.readline()
        values = read_from.readline()
        write_to.write(headers)
        write_to.write(values)
        if not isinstance(values, str):
            values = values.encode('utf-8')
        values = values.strip().split(',')
        total_keys_values = list(zip_longest(keys, values))

        # read new data, add value if key has repeat tag, write to new file
        line = read_from.readline()
        if not isinstance(line, str):
            line = line.encode('utf-8')
        row = line.strip().split(',')
        while row:
            index = -1
            key_val_new = list(zip_longest(keys, row))
            key_val_pair = total_keys_values[:]
            key_repeated = total_keys_repeat[:]
            while key_val_new and key_repeated:
                index = index + 1
                #  if key has repeat tag, get its corresponding value, write to file
                if key_val_new[0][0] == key_repeated[0]:
                    val = key_val_pair[0][1]
                    row[index] = val
                    del key_repeated[0]
                del key_val_new[0]
                del key_val_pair[0]

            line_to_write = u','.join(row)
            write_to.write(line_to_write)
            write_to.write(newline)

            # Read next line
            line = read_from.readline()
            if not isinstance(line, str):
                line = line.encode('utf-8')
            row = line.strip().split(',')
            if len(row) == 1 and '' in row:
                break

    return target_filename
Example #6
0
def combineblocks(blks, imgsz, stpsz=None, fn=np.median):
    """Combine blocks from an ndarray to reconstruct ndarray signal.

    Parameters
    ----------
    blks : ndarray
      nd array of blocks of a signal
    imgsz : tuple
      tuple of the signal size
    stpsz : tuple, optional (default None, corresponds to steps of 1)
      tuple of step sizes between neighboring blocks
    fn : function, optional (default np.median)
      the function used to resolve multivalued cells

    Returns
    -------
    imgs : ndarray
      reconstructed signal, unknown pixels are returned as np.nan
    """

    # Construct a vectorized append function
    def listapp(x, y):
        x.append(y)
    veclistapp = np.vectorize(listapp, otypes=[np.object_])

    blksz = blks.shape[:-1]

    if stpsz is None:
        stpsz = tuple(1 for _ in blksz)

    # Calculate the number of blocks that can fit in each dimension of
    # the images
    numblocks = tuple(int(np.floor((a-b)/c) + 1) for a, b, c in
                      zip_longest(imgsz, blksz, stpsz, fillvalue=1))

    new_shape = blksz + numblocks
    blks = np.reshape(blks, new_shape)

    # Construct an imgs matrix of empty lists
    imgs = np.empty(imgsz, dtype=np.object_)
    imgs.fill([])
    imgs = np.frompyfunc(list, 1, 1)(imgs)

    # Iterate over each block and append the values to the corresponding
    # imgs cell
    for pos in np.ndindex(numblocks):
        slices = tuple(slice(a*c, a*c + b) for a, b, c in
                       zip_longest(pos, blksz, stpsz, fillvalue=1))
        veclistapp(imgs[slices].squeeze(), blks[(Ellipsis, ) + pos].squeeze())

    return np.vectorize(fn, otypes=[blks.dtype])(imgs)
Example #7
0
    def base_stats( self ):
        '''
        Returns a compiled statistics dictionary for all the different base values in this column
        The dictionary must contain the following keys:

            * depth: Total depth of this column which should be self.depth
            * bqualsum: sum of the base qualities
            * mqualsum: sum of the mapping qualities
            * 'A/C/T/G/N/\*': dictionary of information about the mapping and base qualities for an individual base
                * mapq: list of all the mapping qualities for this base(int(phred - 33))
                * baseq: list of all the base qualities for this base(int(phred - 333))

        @returns the stats dictionary
        '''
        bquals = self.bquals
        mquals = self.mquals
        bases = self.bases
        bqualsum = float( sum( bquals ) )
        mqualsum = float( sum( mquals ) )
        # Lets just make sure of a few things because samtools mpileup isn't exactly documented the best
        assert len(bquals) == self.depth, "Somehow length of bases != length of Base Qualities"
        depth = self.depth
        stats = {'depth':depth,'mqualsum':mqualsum,'bqualsum':bqualsum}
        for b,bq,mq in itertools.zip_longest( bases, bquals, mquals, fillvalue=0 ):
            if b not in stats:
                stats[b] = {'baseq':[],'mapq':[]}
            stats[b]['baseq'].append(bq)
            stats[b]['mapq'].append(mq)

        return stats
 def set_pan_controls(self, controls):
     for strip, control in zip_longest(self._channel_strips, controls
                                       or []):
         if control:
             control.set_channel(PAN_MAP_CHANNEL)
             control.set_light_and_type(u'Mixer.Pan', FADER_TYPE_BIPOLAR)
         strip.set_pan_control(control)
 def set_track_select_buttons(self, buttons):
     for strip, button in zip_longest(self._channel_strips, buttons or []):
         if button:
             button.reset_state()
             button.set_on_off_values(u'Mixer.Selected',
                                      u'Mixer.Unselected')
         strip.set_select_button(button)
Example #10
0
 def _connect_parameters(self):
     parameters = self._parameter_provider.parameters[:self.controls.control_count]
     for control, parameter_info in zip_longest(self.controls, parameters):
         parameter = parameter_info.parameter if parameter_info else None
         control.mapped_parameter = parameter
         if parameter:
             control.update_sensitivities(parameter_info.default_encoder_sensitivity, parameter_info.fine_grain_encoder_sensitivity)
Example #11
0
 def _update_parameter_values(self):
     if self.is_enabled():
         for parameter, data_source in zip_longest(
                 self.parameters, self._parameter_value_data_sources):
             value_string = self.parameter_to_string(parameter)
             if data_source:
                 data_source.set_display_string(value_string)
 def set_send_controls(self, controls):
     self._send_controls = controls
     for strip, control in zip_longest(self._channel_strips, controls or []):
         if self._send_index is None:
             strip.set_send_controls(None)
         else:
             strip.set_send_controls((None, ) * self._send_index + (control,))
Example #13
0
def write_seg(dframe, sample_id=None, chrom_ids=None):
    """Format a dataframe or list of dataframes as SEG.

    To put multiple samples into one SEG table, pass `dframe` and `sample_id` as
    equal-length lists of data tables and sample IDs in matching order.
    """
    assert sample_id is not None
    if isinstance(dframe, pd.DataFrame):
        first = dframe
        first_sid = sample_id
        sids = dframes = None
    else:
        assert not isinstance(sample_id, basestring)
        dframes = iter(dframe)
        sids = iter(sample_id)
        first = next(dframes)
        first_sid = next(sids)

    if chrom_ids in (None, True):
        chrom_ids = create_chrom_ids(first)
    results = [format_seg(first, first_sid, chrom_ids)]
    if dframes is not None:
        # Unpack matching lists of data and sample IDs
        results.extend(
            format_seg(subframe, sid, chrom_ids)
            for subframe, sid in zip_longest(dframes, sids))
    return pd.concat(results)
 def _update_color_fields(self):
     for color_field_index, parameter_info in zip_longest(
             range(WIDTH), self._parameter_provider.parameters[:WIDTH]):
         parameter = parameter_info.parameter if parameter_info else None
         color = u'Device.On' if parameter else u'DefaultButton.Disabled'
         self.parameter_color_fields[color_field_index].color = color
         self.encoder_color_fields[color_field_index].color = color
Example #15
0
    def hr_update_simulation(self, hr_srv_uuid, hr_char_uuid):
        '''
            Start Notifications
            Retrieve updated value
            Stop Notifications
        '''
        global ble_hr_chrc

        srv_path = None
        chrc = None
        uuid = None
        chrc_path = None
        chars_ret = None
        ble_hr_chrc = True

        try:
            # Get HR Measurement characteristic
            services = list(zip_longest(self.srv_uuid, self.services))
            for uuid, path in services:
                if hr_srv_uuid in uuid:
                    srv_path = path
                    break

            if srv_path is None:
                print("Failure: HR UUID:", hr_srv_uuid, "not found")
                return False

            chars_ret = self.read_chars()

            for path, props in chars_ret.items():
                if path.startswith(srv_path):
                    chrc = self.bus.get_object(BLUEZ_SERVICE_NAME, path)
                    chrc_path = path
                    if hr_char_uuid in props[2]:  # uuid
                        break
            if chrc is None:
                print("Failure: Characteristics for service: ", srv_path,
                      "not found")
                return False
            # Subscribe to notifications
            print("\nSubscribe to notifications: On")
            chrc.StartNotify(dbus_interface=GATT_CHRC_IFACE)

            chrc_props_iface_obj = dbus.Interface(
                self.bus.get_object(BLUEZ_SERVICE_NAME, chrc_path),
                DBUS_PROP_IFACE)
            chrc_props_iface_obj.connect_to_signal('PropertiesChanged',
                                                   props_change_handler)

            event_loop.run()
            chrc.StopNotify(dbus_interface=GATT_CHRC_IFACE)
            time.sleep(2)
            print("\nSubscribe to notifications: Off")

            ble_hr_chrc = False
            return True

        except Exception as e:
            print(e)
            return False
Example #16
0
    def __init__(self, x=0, y=0, z=0, idx=None, value=None, as_int=False):
        """
        Parameters
        ----------
        x : number-like, Point, iterable
            x-coordinate or iterable type containing all coordinates. If iterable, values are assumed to be in order: (x,y,z).
        y : number-like, optional
            y-coordinate
        idx : int, optional
            Index of point. Useful for sequential coordinates; e.g. a point on a circle profile is sometimes easier to describe
            in terms of its index rather than x,y coords.
        value : number-like, optional
            value at point location (e.g. pixel value of an image)
        as_int : boolean
            If True, coordinates are converted to integers.
        """
        if isinstance(x, Point):
            for attr in self._attr_list:
                item = getattr(x, attr, None)
                setattr(self, attr, item)
        elif is_iterable(x):
            for attr, item in zip_longest(self._attr_list, x, fillvalue=0):
                setattr(self, attr, item)
        else:
            self.x = x
            self.y = y
            self.z = z
            self.idx = idx
            self.value = value

        if as_int:
            self.x = int(round(self.x))
            self.y = int(round(self.y))
            self.z = int(round(self.z))
Example #17
0
 def unavailable_color(self, value):
     self._unavailable_color = value
     control_elements = self._control_elements or []
     for control, element in zip_longest(self._controls,
                                         control_elements):
         if not control and element:
             self._send_unavailable_color(element)
Example #18
0
def diff_sysfs_dirs(before, after, result):  # pylint: disable=R0914
    before_files = []
    for root, _, files in os.walk(before):
        before_files.extend([os.path.join(root, f) for f in files])
    before_files = list(filter(os.path.isfile, before_files))
    files = [os.path.relpath(f, before) for f in before_files]
    after_files = [os.path.join(after, f) for f in files]
    diff_files = [os.path.join(result, f) for f in files]

    for bfile, afile, dfile in zip(before_files, after_files, diff_files):
        if not os.path.isfile(afile):
            logger.debug('sysfs_diff: {} does not exist or is not a file'.format(afile))
            continue

        with open(bfile) as bfh, open(afile) as afh:  # pylint: disable=C0321
            with open(_f(dfile), 'w') as dfh:
                for i, (bline, aline) in enumerate(zip_longest(bfh, afh), 1):
                    if aline is None:
                        logger.debug('Lines missing from {}'.format(afile))
                        break
                    bchunks = re.split(r'(\W+)', bline)
                    achunks = re.split(r'(\W+)', aline)
                    if len(bchunks) != len(achunks):
                        logger.debug('Token length mismatch in {} on line {}'.format(bfile, i))
                        dfh.write('xxx ' + bline)
                        continue
                    if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and
                            (bchunks[0] == achunks[0])):
                        # if there are only two columns and the first column is the
                        # same, assume it's a "header" column and do not diff it.
                        dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]
                    else:
                        dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)]
                    dfh.write(''.join(dchunks))
 def _update_parameter_values(self):
     super(DeviceParameterComponent, self)._update_parameter_values()
     for parameter, control in zip_longest(self.parameters,
                                           self._parameter_controls or []):
         if is_internal_parameter(parameter) and control:
             control.send_value(
                 convert_parameter_value_to_midi_value(parameter))
Example #20
0
 def _set_channel_strip_controls(self, name, controls):
     for strip, control in zip_longest(self._channel_strips, controls
                                       or []):
         set_method = getattr(strip, u'set_{}'.format(name), None)
         if not set_method:
             set_method = getattr(strip, name, None).set_control_element
         set_method(control)
Example #21
0
File: seg.py Project: tiehan/cnvkit
def write_seg(dframe, sample_id=None, chrom_ids=None):
    """Format a dataframe or list of dataframes as SEG.

    To put multiple samples into one SEG table, pass `dframe` and `sample_id` as
    equal-length lists of data tables and sample IDs in matching order.
    """
    assert sample_id is not None
    if isinstance(dframe, pd.DataFrame):
        first = dframe
        first_sid = sample_id
        sids = dframes = None
    else:
        assert not isinstance(sample_id, basestring)
        dframes = iter(dframe)
        sids = iter(sample_id)
        first = next(dframes)
        first_sid = next(sids)

    if chrom_ids is None:
        chrom_ids = create_chrom_ids(first)
    results = [format_seg(first, first_sid, chrom_ids)]
    if dframes is not None:
        # Unpack matching lists of data and sample IDs
        results.extend(
            format_seg(subframe, sid, chrom_ids)
            for subframe, sid in zip_longest(dframes, sids))
    return pd.concat(results)
Example #22
0
 def __eq__(self, other):
     if isinstance(other, collections.Iterable):
         return all(
             a == b
             for a, b in zip_longest(self, other, fillvalue=object()))
     else:
         return NotImplemented
Example #23
0
def all_almost_equal(iter1, iter2, places=None):
    """Return ``True`` if all elements in ``a`` and ``b`` are almost equal."""
    try:
        if iter1 is iter2 or iter1 == iter2:
            return True
    except ValueError:
        pass

    if iter1 is None and iter2 is None:
        return True

    if hasattr(iter1, '__array__') and hasattr(iter2, '__array__'):
        # Only get default places if comparing arrays, need to keep `None`
        # otherwise for recursive calls.
        if places is None:
            places = _places(iter1, iter2, None)
        return all_almost_equal_array(iter1, iter2, places)

    try:
        it1 = iter(iter1)
        it2 = iter(iter2)
    except TypeError:
        return almost_equal(iter1, iter2, places)

    diff_length_sentinel = object()
    for [ip1, ip2] in zip_longest(it1, it2, fillvalue=diff_length_sentinel):
        # Verify that none of the lists has ended (then they are not the
        # same size)
        if ip1 is diff_length_sentinel or ip2 is diff_length_sentinel:
            return False

        if not all_almost_equal(ip1, ip2, places):
            return False

    return True
Example #24
0
def fastqs_are_pair(fastq1=None,fastq2=None,verbose=True,fp1=None,fp2=None):
    """Check that two FASTQs form an R1/R2 pair

    Arguments:
      fastq1: first FASTQ
      fastq2: second FASTQ

    Returns:
      True if each read in fastq1 forms an R1/R2 pair with the equivalent
      read (i.e. in the same position) in fastq2, otherwise False if
      any do not form an R1/R2 (or if there are more reads in one than
      than the other).

    """
    # Use izip_longest, which will return None if either of
    # the fastqs is exhausted before the other
    i = 0
    for r1,r2 in itertools.zip_longest(
            FastqIterator(fastq_file=fastq1,fp=fp1),
            FastqIterator(fastq_file=fastq2,fp=fp2)):
        i += 1
        if verbose:
            if i%100000 == 0:
                print("Examining pair #%d" % i)
        if not r1.seqid.is_pair_of(r2.seqid):
            if verbose:
                print("Unpaired headers for read position #%d:" % i)
                print("%s\n%s" % (r1.seqid,r2.seqid))
            return False
    return True
Example #25
0
        def _message_details(element):
            if isinstance(element, string_types):
                element = [element]

            text, detail = (x or y
                            for x, y in zip_longest(element, ('', None)))
            return {'text': text, 'detail': detail}
Example #26
0
def permute_by_iteration(specs):
    """
    Runs the first iteration for all benchmarks first, before proceeding to the
    next iteration, i.e. A1, B1, C1, A2, B2, C2...  instead of  A1, A1, B1, B2,
    C1, C2...

    If multiple sections where specified in the agenda, this will run all
    sections for the first global spec first, followed by all sections for the
    second spec, etc.

    e.g. given sections X and Y, and global specs A and B, with 2 iterations,
    this will run

    X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2

    """
    groups = [list(g) for _, g in groupby(specs, lambda s: s.workload_id)]

    all_tuples = []
    for spec in chain(*groups):
        all_tuples.append([(spec, i + 1)
                           for i in range(spec.iterations)])
    for t in chain(*list(map(list, zip_longest(*all_tuples)))):
        if t is not None:
            yield t
 def set_arm_buttons(self, buttons):
     for strip, button in zip_longest(self._channel_strips, buttons or []):
         if button:
             button.reset_state()
             button.set_on_off_values('Mixer.ArmOn', 'Mixer.ArmOff')
         else:
             strip.set_arm_button(button)
Example #28
0
def flatten_tree(obj, key=''):
    if isinstance(obj, list):
        new_rows = []

        for o in obj:
            if isinstance(o, dict):
                new_rows.extend(flatten_tree(o))
            else:
                new_rows.append({key: o})

    elif isinstance(obj, dict):
        common_keys = {}
        all_rows = [[common_keys]]

        for k, v in obj.items():
            if isinstance(v, list):
                all_rows.append(flatten_tree(v, k))
            elif isinstance(v, dict):
                common_keys.update(*flatten_tree(v))
            else:
                common_keys[k] = v

        new_rows = [{k: v
                     for r in row for k, v in r.items()}
                    for row in zip_longest(*all_rows, fillvalue={})]

    else:
        new_rows = []

    return new_rows
Example #29
0
def read(filename, remove_duplicates=False, data_wrapper=DataWrapper):
    '''Read a file and return a `data_wrapper'd` data

    * Tries to guess the format and the H5 version.
    * Unpacks the first block it finds out of ('repaired', 'unraveled', 'raw')

    Parameters:
        remove_duplicates: boolean, If True removes duplicate points
        from the beginning of each section.
    '''
    with h5py.File(filename, mode='r') as h5file:
        version = get_version(h5file)
        if version == 'H5V1':
            points, groups = _unpack_v1(h5file)
        elif version == 'H5V2':
            stg = next(s for s in ('repaired', 'unraveled', 'raw')
                       if s in h5file['neuron1'])
            points, groups = _unpack_v2(h5file, stage=stg)

    if remove_duplicates:
        points, groups = _remove_duplicate_points(points, groups)

    neuron_builder = BlockNeuronBuilder()
    points[:, POINT_DIAMETER] /= 2  # Store radius, not diameter
    for id_, row in enumerate(
            zip_longest(groups, groups[1:, GPFIRST], fillvalue=len(points))):
        (point_start, section_type, parent_id), point_end = row
        neuron_builder.add_section(id_, int(parent_id), int(section_type),
                                   points[point_start:point_end])

    return neuron_builder.get_datawrapper(version, data_wrapper=data_wrapper)
 def _update_return_track_color_controls(self):
     value = 0
     if self.send_select_buttons.active_control_count:
         value = self.send_select_buttons[self.send_index].checked_color
     for strip, control in zip_longest(self._channel_strips,
                                       self.return_track_color_controls):
         control.value = value if liveobj_valid(strip.track) else 0
 def set_track_select_buttons(self, buttons):
     for strip, button in zip_longest(self._channel_strips, buttons or []):
         if button:
             button.set_on_off_values('Mixer.TrackSelected',
                                      'Mixer.TrackUnselected')
         else:
             strip.set_select_button(button)
Example #32
0
def read(filename, remove_duplicates=False, data_wrapper=DataWrapper):
    '''Read a file and return a `data_wrapper'd` data

    * Tries to guess the format and the H5 version.
    * Unpacks the first block it finds out of ('repaired', 'unraveled', 'raw')

    Parameters:
        remove_duplicates: boolean, If True removes duplicate points
        from the beginning of each section.
    '''
    with h5py.File(filename, mode='r') as h5file:
        version = get_version(h5file)
        if version == 'H5V1':
            points, groups = _unpack_v1(h5file)
        elif version == 'H5V2':
            stg = next(s for s in ('repaired', 'unraveled', 'raw')
                       if s in h5file['neuron1'])
            points, groups = _unpack_v2(h5file, stage=stg)

    if remove_duplicates:
        points, groups = _remove_duplicate_points(points, groups)

    neuron_builder = BlockNeuronBuilder()
    points[:, POINT_DIAMETER] /= 2  # Store radius, not diameter
    for id_, row in enumerate(zip_longest(groups,
                                          groups[1:, GPFIRST],
                                          fillvalue=len(points))):
        (point_start, section_type, parent_id), point_end = row
        neuron_builder.add_section(id_, int(parent_id), int(section_type),
                                   points[point_start:point_end])

    return neuron_builder.get_datawrapper(version, data_wrapper=data_wrapper)
Example #33
0
 def __eq__(self, other):
   if isinstance(other, collections.Iterable):
     return all(
         a == b
         for a, b in zip_longest(self, other, fillvalue=object()))
   else:
     return NotImplemented
Example #34
0
    def set_note_editor_matrices(self, matrices):
        self._matrices = matrices
        for editor, matrix in zip_longest(self._note_editors, matrices or []):
            if editor:
                editor.set_matrix(matrix)

        self._update_matrix_channels_for_playhead()
 def _connect_parameters(self):
     for control, parameter in zip_longest(self._parameter_controls or [], self.selected_bank):
         if liveobj_valid(control):
             if liveobj_valid(parameter):
                 control.connect_to(parameter)
             else:
                 control.release_parameter()
                 self._empty_control_slots.register_slot(control, nop, 'value')
 def _update_controls(self):
     control_elements = self._control_elements or []
     for control, element in zip_longest(self._controls, control_elements):
         if control:
             control._get_state(self._manager).set_control_element(element)
         elif element:
             element.reset_state()
             self._send_unavailable_color(element)
Example #37
0
 def __lt__(self, other):
   if not self._is_valid_operand(other):
     return AttributeError  # TODO(python3port): typically this should return NotImplemented.
                            # Returning AttributeError for now to avoid changing prior API.
   for ours, theirs in zip_longest(self._components, other._components, fillvalue=None):
     if ours != theirs:
       ours, theirs = self._fill_value_if_missing(ours, theirs)
       ours, theirs = self._stringify_if_different_types(ours, theirs)
       return ours < theirs
   return False
Example #38
0
def extractblocks(img, blksz, stpsz=None):
    """Extract blocks from an ndarray signal into an ndarray.

    Parameters
    ----------
    img : ndarray or tuple of ndarrays
      nd array of images, or tuple of images
    blksz : tuple
      tuple of block sizes, blocks are taken starting from the first index
      of img
    stpsz : tuple, optional (default None, corresponds to steps of 1)
      tuple of step sizes between neighboring blocks

    Returns
    -------
    blks : ndarray
      image blocks
    """

    # See http://stackoverflow.com/questions/16774148 and
    # sklearn.feature_extraction.image.extract_patches_2d
    if isinstance(img, tuple):
        img = np.stack(img, axis=-1)

    if stpsz is None:
        stpsz = (1,) * len(blksz)

    imgsz = img.shape

    # Calculate the number of blocks that can fit in each dimension of
    # the images
    numblocks = tuple(int(np.floor((a - b) / c) + 1) for a, b, c in
                      zip_longest(imgsz, blksz, stpsz, fillvalue=1))

    # Calculate the strides for blocks
    blockstrides = tuple(a * b for a, b in zip_longest(img.strides, stpsz,
                                                       fillvalue=1))

    new_shape = blksz + numblocks
    new_strides = img.strides[:len(blksz)] + blockstrides
    blks = np.lib.stride_tricks.as_strided(img, new_shape, new_strides)
    return np.reshape(blks, blksz + (-1,))
Example #39
0
def main(argv=None):
    """script main.

    parses command line options in sys.argv, unless *argv* is given.
    """

    if not argv:
        argv = sys.argv

    # setup command line parser
    parser = E.OptionParser(
        version="%prog version: $Id$",
        usage=globals()["__doc__"])

    parser.add_option(
        "-a", "--first-fastq-file", dest="fastq1", type="string",
        help="supply read1 fastq file")
    parser.add_option(
        "-b", "--second-fastq-file", dest="fastq2", type="string",
        help="supply read2 fastq file")

    # add common options (-h/--help, ...) and parse command line
    (options, args) = E.Start(parser, argv=argv)

    if args and len(args) == 2:
        options.fastq1, options.fastq2 = args

    fastq1 = IOTools.openFile(options.fastq1)
    fastq2 = IOTools.openFile(options.fastq2)

    E.info("iterating over fastq files")
    f1_count = 0
    for f1, f2 in zip_longest(Fastq.iterate(fastq1),
                              Fastq.iterate(fastq2)):
        if not (f1 and f2) or (not f2 and f1):
            try:
                raise PairedReadError(
                    "unpaired reads detected. Are files sorted? are "
                    "files of equal length?")
            except PairedReadError as e:
                raise PairedReadError(e).with_traceback(sys.exc_info()[2])
        else:
            assert f1.identifier.endswith("/1") and \
                f2.identifier.endswith("/2"), \
                "Reads in file 1 must end with /1 and reads in file 2 with /2"
            options.stdout.write(
                ">%s\n%s\n>%s\n%s\n" %
                (f1.identifier, f1.seq, f2.identifier, f2.seq))
            f1_count += 1

    E.info("output: %i pairs" % f1_count)

    # write footer and output benchmark information.
    E.Stop()
Example #40
0
    def get_intro_point(self):
        """
        Generator function which yields an introduction point

        Iterates through all available introduction points and try
        to pick IPs breath first across all backend instances. The
        intro point set is wrapped in `itertools.cycle` and will provided
        an infinite series of introduction points.
        """

        # Combine intro points from across the backend instances and flatten
        intro_points = zip_longest(*self.available_intro_points)
        flat_intro_points = itertools.chain.from_iterable(intro_points)
        for intro_point in itertools.cycle(flat_intro_points):
            if intro_point:
                yield intro_point
Example #41
0
 def _apply(cls, op, x, y):
     """This internal function allows the application of rich comparison operators between two
     numbers, a number and a (possibly nested) sequence of numbers, or two (flat/nested)
     sequences of numbers. When comparing two sequences, missing values are filled with NaN.
     Returns a generator expression in case sequences are involved, or a plain old boolean if
     two numbers are being compared.
     """
     x_is_iterable = isinstance(x, Iterable)
     y_is_iterable = isinstance(y, Iterable)
     if x_is_iterable and y_is_iterable:
         return (cls._apply(op, u, v) for u, v in zip_longest(x, y, fillvalue=float("NaN")))
     elif x_is_iterable:
         return (cls._apply(op, u, y) for u in x)
     elif y_is_iterable:
         return (cls._apply(op, x, v) for v in y)
     else:
         return op(cls(x), y)
Example #42
0
def averageblocks(blks, imgsz, stpsz=None):
    """Average blocks together from an ndarray to reconstruct ndarray signal.

    Parameters
    ----------
    blks : ndarray
      nd array of blocks of a signal
    imgsz : tuple
      tuple of the signal size
    stpsz : tuple, optional (default None, corresponds to steps of 1)
      tuple of step sizes between neighboring blocks

    Returns
    -------
    imgs : ndarray
      reconstructed signal, unknown pixels are returned as np.nan
    """

    blksz = blks.shape[:-1]

    if stpsz is None:
        stpsz = tuple(1 for _ in blksz)


    # Calculate the number of blocks that can fit in each dimension of
    # the images
    numblocks = tuple(int(np.floor((a-b)/c)+1) for a, b, c in
                      zip_longest(imgsz, blksz, stpsz, fillvalue=1))

    new_shape = blksz + numblocks
    blks = np.reshape(blks, new_shape)

    # Construct an imgs matrix of empty lists
    imgs = np.zeros(imgsz, dtype=blks.dtype)
    normalizer = np.zeros(imgsz, dtype=blks.dtype)

    # Iterate over each block and append the values to the corresponding
    # imgs cell
    for pos in np.ndindex(numblocks):
        slices = tuple(slice(a*c, a*c+b) for a, b, c in
                       zip(pos, blksz, stpsz))
        imgs[slices+pos[len(blksz):]] += blks[(Ellipsis, )+pos]
        normalizer[slices+pos[len(blksz):]] += blks.dtype.type(1)

    return np.where(normalizer > 0, (imgs/normalizer).astype(blks.dtype),
                    np.nan)
Example #43
0
  def _render_message(self, *msg_elements):
    # Identifies all details in this message, so that opening one can close all the others.
    detail_class = str(uuid.uuid4())

    html_fragments = ['<div>']

    detail_divs = []
    for element in msg_elements:
      # Each element can be a message or a (message, detail) pair, as received by handle_log().
      #
      # However, as an internal implementation detail, we also allow an element to be a tuple
      # (message, detail, detail_initially_visible[, detail_id])
      #
      # - If the detail exists, clicking on the text will toggle display of the detail and close
      #   all other details in this message.
      # - If detail_initially_visible is True, the detail will be displayed by default.
      #
      # We allow detail_id to be explicitly specified, so that the open/closed state can be
      # preserved through refreshes. For example, when looking at the artifact cache stats,
      # if "hits" are open and "misses" are closed, we want to remember that even after
      # the cache stats are updated and the message re-rendered.
      if isinstance(element, string_types):
        element = [element]

      # zip_longest assumes None for missing values, so this generator will pick the default for those.
      default_values = ('', None, None, False)
      (text, detail, detail_id, detail_initially_visible) = (x or y for x, y in zip_longest(element, default_values))

      htmlified_text = self._htmlify_text(text)

      if detail is None:
        html_fragments.append(htmlified_text)
      else:
        detail_id = detail_id or str(uuid.uuid4())
        detail_visibility_class = '' if detail_initially_visible else 'nodisplay'
        html_fragments.append(self._detail_a_fmt_string.format(
            text=htmlified_text, detail_id=detail_id, detail_class=detail_class))
        detail_divs.append(self._detail_div_fmt_string.format(
          detail_id=detail_id, detail=detail, detail_class=detail_class,
          detail_visibility_class=detail_visibility_class
        ))
    html_fragments.extend(detail_divs)
    html_fragments.append('</div>')

    return ''.join(html_fragments)
Example #44
0
    def _message_details(element):
      if isinstance(element, string_types):
        element = [element]

      text, detail = (x or y for x, y in zip_longest(element, ('', None)))
      return {'text': text, 'detail': detail}
Example #45
0
def main(argv=None):
    if argv is None:
        argv = sys.argv

    parser = E.OptionParser(version="%prog version",
                            usage=globals()["__doc__"])

    parser.add_option(
        "-m", "--method", dest="methods", type="choice", action="append",
        choices=("translate",
                 "translate-to-stop",
                 "truncate-at-stop",
                 "back-translate",
                 "mark-codons",
                 "apply-map",
                 "build-map",
                 "pseudo-codons",
                 "filter",
                 "interleaved-codons",
                 "map-codons",
                 "remove-gaps",
                 "mask-seg",
                 "mask-bias",
                 "mask-codons",
                 "mask-incomplete-codons",
                 "mask-stops",
                 "mask-soft",
                 "remove-stops",
                 "upper",
                 "lower",
                 "reverse-complement",
                 "sample",
                 "shuffle"),
        help="method to apply to sequences.")

    parser.add_option(
        "-p", "--parameters", dest="parameters", type="string",
        help="parameter stack for methods that require one "
        "[default=%default].")

    parser.add_option(
        "-x", "--ignore-errors", dest="ignore_errors", action="store_true",
        help="ignore errors [default = %default].")

    parser.add_option("--sample-proportion", dest="sample_proportion",
                      type="float",
                      help="sample proportion [default = %default].")

    parser.add_option(
        "--exclude-pattern", dest="exclude_pattern", type="string",
        help="exclude all sequences with ids matching pattern "
        "[default = %default].")

    parser.add_option(
        "--include-pattern", dest="include_pattern", type="string",
        help="include only sequences with ids matching pattern "
        "[default = %default].")

    parser.add_option(
        "--filter-method", dest="filter_methods", type="string",
        action="append",
        help="filtering methods to apply "
        "[default = %default].")

    parser.add_option(
        "-t", "--sequence-type", dest="type", type="choice",
        choices=("aa", "na"),
        help="sequence type (aa or na) [%default]. This option determines "
        "which characters to use for masking [default = %default].")

    parser.add_option(
        "-l", "--template-identifier", dest="template_identifier",
        type="string",
        help="template for numerical identifier [default = %default] "
        "for the operation --build-map. A %i is replaced by the position "
        "of the sequence in the file.")

    parser.set_defaults(
        methods=[],
        parameters="",
        type="na",
        aa_mask_chars="xX",
        aa_mask_char="x",
        na_mask_chars="nN",
        na_mask_char="n",
        gap_chars="-.",
        gap_char="-",
        template_identifier="ID%06i",
        ignore_errors=False,
        exclude_pattern=None,
        include_pattern=None,
        sample_proportion=None,
        filter_methods=[],
    )

    (options, args) = E.Start(parser)
    options.parameters = options.parameters.split(",")

    rx_include, rx_exclude = None, None
    if options.include_pattern:
        rx_include = re.compile(options.include_pattern)
    if options.exclude_pattern:
        rx_exclude = re.compile(options.exclude_pattern)

    iterator = FastaIterator.FastaIterator(options.stdin)

    nseq = 0

    map_seq2nid = {}

    if "apply-map" in options.methods:
        map_seq2nid = IOTools.ReadMap(open(options.parameters[0], "r"))
        del options.parameters[0]

    if options.type == "na":
        mask_chars = options.na_mask_chars
        mask_char = options.na_mask_char
    else:
        mask_chars = options.aa_mask_chars
        mask_char = options.aa_mask_char

    if "map-codons" in options.methods:
        map_codon2code = IOTools.ReadMap(open(options.parameters[0], "r"))
        del options.parameters[0]

    if "mask-soft" in options.methods:
        f = options.parameters[0]
        del options.parameters[0]
        hard_masked_iterator = FastaIterator.FastaIterator(open(f, "r"))

    if "mask-codons" in options.methods or "back-translate" in options.methods:

        # open a second stream to read sequences from
        f = options.parameters[0]
        del options.parameters[0]

        other_iterator = FastaIterator.FastaIterator(open(f, "r"))

    ninput, noutput, nerrors, nskipped = 0, 0, 0, 0

    if "sample" in options.methods:
        if not options.sample_proportion:
            raise ValueError("specify a sample proportion")
        sample_proportion = options.sample_proportion
    else:
        sample_proportion = None

    filter_min_sequence_length = None
    filter_max_sequence_length = None
    filter_id_list = None
    for f in options.filter_methods:
        if f.startswith("min-length"):
            filter_min_sequence_length = int(f.split("=")[1])
        elif f.startswith("max-length"):
            filter_max_sequence_length = int(f.split("=")[1])
        elif f.startswith("id-file"):
            filter_id_list = [line[:-1] for line in IOTools.openFile(f.split("=")[1])]

    def raiseIfNotCodon(l, title):
        '''raise ValueError if sequence length l is not divisible by
        3'''

        if l % 3 != 0:
            raise ValueError(
                "length of sequence %s not divisible by 3" % (title))

    while 1:
        try:
            cur_record = next(iterator)
        except StopIteration:
            break

        if cur_record is None:
            break
        nseq += 1
        ninput += 1

        sequence = re.sub(" ", "", cur_record.sequence)
        l = len(sequence)

        if rx_include and not rx_include.search(cur_record.title):
            nskipped += 1
            continue

        if rx_exclude and rx_exclude.search(cur_record.title):
            nskipped += 1
            continue

        if sample_proportion:
            if random.random() > sample_proportion:
                continue

        if not (filter_id_list is None or cur_record.title in filter_id_list):
            nskipped += 1
            continue

        for method in options.methods:

            if method == "translate":
                # translate such that gaps are preserved
                seq = []

                ls = len(re.sub('[%s]' % options.gap_chars, sequence, ""))

                if ls % 3 != 0:
                    msg = "length of sequence %s (%i) not divisible by 3" % (
                        cur_record.title, ls)
                    nerrors += 1
                    if options.ignore_errors:
                        E.warn(msg)
                        continue
                    else:
                        raise ValueError(msg)

                for codon in [sequence[x:x + 3] for x in range(0, l, 3)]:
                    aa = Genomics.MapCodon2AA(codon)
                    seq.append(aa)

                sequence = "".join(seq)

            elif method == "back-translate":
                # translate from an amino acid alignment to codon alignment
                seq = []

                try:
                    other_record = next(other_iterator)
                except StopIteration:
                    raise ValueError("run out of sequences")

                if cur_record.title != other_record.title:
                    raise "sequence titles don't match: %s %s" % (
                        cur_record.title, other_record.title)

                other_sequence = re.sub(
                    "[ %s]" % options.gap_chars, "", other_record.sequence)

                if len(other_sequence) % 3 != 0:
                    raise ValueError(
                        "length of sequence %s not divisible by 3" %
                        (other_record.title))

                r = re.sub("[%s]" % options.gap_chars, "", sequence)
                if len(other_sequence) != len(r) * 3:
                    raise ValueError(
                        "length of sequences do not match: %i vs %i" %
                        (len(other_sequence), len(r)))

                x = 0
                for aa in sequence:
                    if aa in options.gap_chars:
                        c = options.gap_char * 3
                    else:
                        c = other_sequence[x:x + 3]
                        x += 3
                    seq.append(c)

                sequence = "".join(seq)

            elif method == "pseudo-codons":
                raiseIfNotCodon(l, cur_record.title)
                seq = []

                for codon in [sequence[x:x + 3] for x in range(0, l, 3)]:

                    aa = Genomics.MapCodon2AA(codon)
                    seq.append(aa)

                sequence = "   ".join(seq)

            elif method == "reverse-complement":
                sequence = string.translate(
                    sequence, string.maketrans("ACGTacgt", "TGCAtgca"))[::-1]

            elif method in ("mask-stops", "remove-stops"):
                c = []
                codon = []
                new_sequence = []

                if method == "mask-stops":
                    char = options.na_mask_char
                elif method == "remove-stops":
                    char = options.gap_char

                for x in sequence:

                    if x not in options.gap_chars:
                        codon.append(x.upper())

                    c.append(x)

                    if len(codon) == 3:
                        codon = "".join(codon).upper()
                        # mask all non-gaps
                        if Genomics.IsStopCodon(codon):

                            for x in c:
                                if x in options.gap_chars:
                                    new_sequence.append(x)
                                else:
                                    new_sequence.append(char)
                        else:
                            new_sequence += c

                        c = []
                        codon = []

                new_sequence += c

                sequence = "".join(new_sequence)

            elif method == "mask-soft":
                # Get next hard masked record and extract sequence and length
                try:
                    cur_hm_record = next(hard_masked_iterator)
                except StopIteration:
                    break
                hm_sequence = re.sub(" ", "", cur_hm_record.sequence)
                lhm = len(hm_sequence)
                new_sequence = []

                # Check lengths of unmasked and soft masked sequences the same
                if l != lhm:
                    raise ValueError(
                        "length of unmasked and hard masked sequences not "
                        "identical for record %s" %
                        (cur_record.title))

                # Check if hard masked seq contains repeat (N), if so replace N
                # with lowercase sequence from unmasked version
                if sequence == hm_sequence:
                    pass
                else:
                    for x, y in zip_longest(sequence, hm_sequence):
                        if y == "N":
                            new_sequence += x.lower()
                        else:
                            new_sequence += x.upper()
                sequence = "".join(new_sequence)

            elif method == "map-codons":
                raiseIfNotCodon(l, cur_record.title)
                seq = []

                for codon in (sequence[x:x + 3].upper()
                              for x in range(0, l, 3)):

                    if codon not in map_codon2code:
                        aa = "X"
                    else:
                        aa = map_codon2code[codon]
                    seq.append(aa)

                sequence = "".join(seq)

            elif method == "interleaved-codons":
                raiseIfNotCodon(l, cur_record.title)
                seq = []

                for codon in [sequence[x:x + 3] for x in range(0, l, 3)]:

                    aa = Genomics.MapCodon2AA(codon)
                    seq.append("%s:%s" % (aa, codon))

                sequence = " ".join(seq)

            elif method == "translate-to-stop":
                seq = []

                for codon in [sequence[x:x + 3] for x in range(0, l, 3)]:

                    if Genomics.IsStopCodon(codon):
                        break

                    aa = Genomics.MapCodon2AA(codon)
                    seq.append(aa)

                sequence = "".join(seq)

            elif method == "truncate-at-stop":
                seq = []

                for codon in [sequence[x:x + 3] for x in range(0, l, 3)]:

                    if Genomics.IsStopCodon(codon):
                        break
                    seq.append(codon)

                sequence = "".join(seq)

            elif method == "remove-gaps":

                seq = []
                for s in sequence:
                    if s in options.gap_chars:
                        continue
                    seq.append(s)

                sequence = "".join(seq)

            elif method == "upper":
                sequence = sequence.upper()

            elif method == "lower":
                sequence = sequence.lower()

            elif method == "mark-codons":
                raiseIfNotCodon(l, cur_record.title)
                seq = []

                sequence = " ".join([sequence[x:x + 3]
                                     for x in range(0, l, 3)])

            elif method == "apply-map":
                id = re.match("^(\S+)", cur_record.title).groups()[0]
                if id in map_seq2nid:
                    rest = cur_record.title[len(id):]
                    cur_record.title = map_seq2nid[id] + rest

            elif method == "build-map":
                # build a map of identifiers
                id = re.match("^(\S+)", cur_record.title).groups()[0]
                new_id = options.template_identifier % nseq
                if id in map_seq2nid:
                    raise "duplicate fasta entries - can't map those: %s" % id
                map_seq2nid[id] = new_id
                cur_record.title = new_id

            elif method == "mask-bias":
                masker = Masker.MaskerBias()
                sequence = masker(sequence)

            elif method == "mask-seg":
                masker = Masker.MaskerSeg()
                sequence = masker(sequence)

            elif method == "shuffle":
                s = list(sequence)
                random.shuffle(s)
                sequence = "".join(s)

            elif method == "mask-incomplete-codons":
                seq = list(sequence)
                for x in range(0, l, 3):
                    nm = len([x for x in seq[x:x + 3] if x in mask_chars])
                    if 0 < nm < 3:
                        seq[x:x + 3] = [mask_char] * 3
                sequence = "".join(seq)

            elif method == "mask-codons":
                # mask codons based on amino acids given as reference
                # sequences.
                other_record = next(other_iterator)

                if other_record is None:
                    raise ValueError("run out of sequences.")

                if cur_record.title != other_record.title:
                    raise ValueError(
                        "sequence titles don't match: %s %s" %
                        (cur_record.title, other_record.title))

                other_sequence = re.sub(" ", "", other_record.sequence)

                if len(other_sequence) * 3 != len(sequence):
                    raise ValueError(
                        "sequences for %s don't have matching lengths %i - %i" %
                        (cur_record.title, len(other_sequence) * 3,
                         len(sequence)))

                seq = list(sequence)
                c = 0
                for x in other_sequence:
                    if x in options.aa_mask_chars:
                        if x.isupper():
                            seq[c:c + 3] = [options.na_mask_char.upper()] * 3
                        else:
                            seq[c:c + 3] = [options.na_mask_char.lower()] * 3
                    c += 3

                sequence = "".join(seq)

        l = len(sequence)
        if filter_min_sequence_length is not None and \
           l < filter_min_sequence_length:
            nskipped += 1

        if filter_max_sequence_length is not None and \
           l > filter_max_sequence_length:
            nskipped += 1
            continue

        options.stdout.write(">%s\n%s\n" % (cur_record.title, sequence))
        noutput += 1

    if "build-map" in options.methods:
        p = options.parameters[0]
        if p:
            outfile = IOTools.openFile(p, "w")
        else:
            outfile = options.stdout

        outfile.write("old\tnew\n")
        for old_id, new_id in list(map_seq2nid.items()):
            outfile.write("%s\t%s\n" % (old_id, new_id))
        if p:
            outfile.close()

    E.info("ninput=%i, noutput=%i, nskipped=%i, nerrors=%i" %
           (ninput, noutput, nskipped, nerrors))

    E.Stop()
Example #46
0
 def __iter__( self ):
     '''
         Returns an iterator that zips together bases, base qualities and mapping qualities
         Since mquals sometimes may be missing izip_longest will fill it with 0's
     '''
     return itertools.zip_longest( self.bases, self.bquals, self.mquals, fillvalue=0 )
Example #47
0
def main(input_config_file=None,input_values_file=None,target_file_name_prefix=None,
         file_identifier=None,output_dir_path=None,part_size=None,input_version=None,
         input_is_keygen=None,input_is_encrypt=None,input_is_keyfile=None):
    try:
        if all(arg is None for arg in [input_config_file,input_values_file,target_file_name_prefix,
                                       file_identifier,output_dir_path]):
            parser = argparse.ArgumentParser(prog='./mfg_gen.py',
                                             description="Create binary files from input config and values file",
                                             formatter_class=argparse.RawDescriptionHelpFormatter)

            parser.add_argument('--conf',
                                dest='config_file',
                                help='the input configuration csv file',
                                default=None)

            parser.add_argument('--values',
                                dest='values_file',
                                help='the input values csv file',
                                default=None)

            parser.add_argument('--prefix',
                                dest='prefix',
                                help='the unique name as each filename prefix')

            parser.add_argument('--fileid',
                                dest='fileid',
                                help='the unique file identifier(any key in values file) \
                                as each filename suffix (Default: numeric value(1,2,3...)')

            parser.add_argument('--outdir',
                                dest='outdir',
                                default=os.getcwd(),
                                help='the output directory to store the files created\
                                (Default: current directory)')

            parser.add_argument("--size",
                                dest='part_size',
                                help='Size of NVS Partition in bytes (must be multiple of 4096)')

            parser.add_argument("--version",
                                dest="version",
                                help='Set version. Default: v2',
                                choices=['v1','v2'],
                                default='v2',
                                type=str.lower)

            parser.add_argument("--keygen",
                                dest="keygen",
                                help='Generate keys for encryption. Default: false',
                                choices=['true','false'],
                                default='false',
                                type=str.lower)

            parser.add_argument("--encrypt",
                                dest="encrypt",
                                help='Set encryption mode. Default: false',
                                choices=['true','false'],
                                default='false',
                                type=str.lower)

            parser.add_argument("--keyfile",
                                dest="keyfile",
                                help='File having key for encryption (Applicable only if encryption mode is true)',
                                default=None)

            args = parser.parse_args()

            args.outdir = os.path.join(args.outdir, '')

            input_config_file = args.config_file
            input_values_file = args.values_file
            target_file_name_prefix = args.prefix
            output_dir_path = args.outdir
            part_size = args.part_size
            input_version = args.version
            input_is_keygen = args.keygen
            input_is_encrypt = args.encrypt
            input_is_keyfile = args.keyfile
            file_identifier = ''
            print_arg_str = "Invalid.\nTo generate binary --conf, --values, --prefix and --size arguments are mandatory.\
            \nTo generate encryption keys --keygen argument is mandatory."
            print_encrypt_arg_str = "Missing parameter. Enter --keygen or --keyfile."

            if args.fileid:
                file_identifier = args.fileid

            if input_config_file and input_is_encrypt.lower() == 'true' and input_is_keygen.lower() == 'true' and input_is_keyfile:
                sys.exit('Invalid. Cannot provide both --keygen and --keyfile argument together.')

            nvs_partition_gen.check_input_args(input_config_file, input_values_file, part_size, input_is_keygen,
                                               input_is_encrypt, input_is_keyfile, input_version, print_arg_str,
                                               print_encrypt_arg_str, output_dir_path)

            if not input_config_file and input_is_keygen:
                if input_is_encrypt == 'true':
                    sys.exit("Invalid.\nOnly --keyfile and --outdir arguments allowed.\n")
                # Generate Key Only
                nvs_partition_gen.nvs_part_gen(input_filename=input_config_file, output_filename=input_values_file,
                                               input_part_size=part_size, is_key_gen=input_is_keygen,
                                               encrypt_mode=input_is_encrypt, key_file=input_is_keyfile,
                                               version_no=input_version, output_dir=output_dir_path)
                exit(0)

            if not (input_config_file and input_values_file and target_file_name_prefix and part_size):
                sys.exit(print_arg_str)

        keys_in_values_file = []
        keys_in_config_file = []
        config_data_to_write = []
        key_value_data = []
        csv_file_list = []
        keys_repeat = []
        is_empty_line = False
        files_created = False
        file_identifier_value = '0'
        output_target_dir = ''
        target_values_file = None
        output_file_prefix = None

        # Verify config file is not empty
        if os.stat(input_config_file).st_size == 0:
            raise SystemExit("Oops...config file: %s is empty." % input_config_file)

        # Verify values file is not empty
        if os.stat(input_values_file).st_size == 0:
            raise SystemExit("Oops...values file: %s is empty." % input_values_file)

        # Verify config file does not have empty lines
        csv_config_file = open(input_config_file,'r')
        try:
            config_file_reader = csv.reader(csv_config_file, delimiter=',')
            for config_data in config_file_reader:
                for data in config_data:
                    empty_line = data.strip()
                    if empty_line is '':
                        is_empty_line = True
                    else:
                        is_empty_line = False
                        break
                if is_empty_line:
                    raise SystemExit("Oops...config file: %s cannot have empty lines. " % input_config_file)
                if not config_data:
                    raise SystemExit("Oops...config file: %s cannot have empty lines." % input_config_file)

            csv_config_file.seek(0)

            # Extract keys from config file
            for config_data in config_file_reader:
                if 'namespace' not in config_data:
                    keys_in_config_file.append(config_data[0])
                if 'REPEAT' in config_data:
                    keys_repeat.append(config_data[0])

            csv_config_file.close()
        except Exception as e:
            print(e)
        finally:
            csv_config_file.close()

        is_empty_line = False
        # Verify values file does not have empty lines
        csv_values_file = open(input_values_file, 'r')
        try:
            values_file_reader = csv.reader(csv_values_file, delimiter=',')
            for values_data in values_file_reader:
                for data in values_data:
                    empty_line = data.strip()
                    if empty_line is '':
                        is_empty_line = True
                    else:
                        is_empty_line = False
                        break
                if is_empty_line:
                    raise SystemExit("Oops...values file: %s cannot have empty lines." % input_values_file)
                if not values_data:
                    raise SystemExit("Oops...values file: %s cannot have empty lines." % input_values_file)

            csv_values_file.seek(0)

            # Extract keys from values file
            keys_in_values_file = next(values_file_reader)

            csv_values_file.close()
        except Exception as e:
            print(e)
            exit(1)
        finally:
            csv_values_file.close()

        # Verify file identifier exists in values file
        if file_identifier:
            if file_identifier not in keys_in_values_file:
                raise SystemExit('Oops...target_file_identifier: %s does not exist in values file.\n' % file_identifier)

        # Verify data in the input_config_file and input_values_file
        verify_data_in_file(input_config_file, input_values_file, keys_in_config_file,
                            keys_in_values_file, keys_repeat)

        # Add config data per namespace to `config_data_to_write` list
        config_data_to_write = add_config_data_per_namespace(input_config_file)

        try:
            with open(input_values_file, 'r') as csv_values_file:
                values_file_reader = csv.reader(csv_values_file, delimiter=',')
                keys = next(values_file_reader)

            filename, file_ext = os.path.splitext(input_values_file)
            target_filename = filename + "_created" + file_ext
            if keys_repeat:
                target_values_file = set_repeat_value(keys_repeat, keys, input_values_file, target_filename)
            else:
                target_values_file = input_values_file

            csv_values_file = open(target_values_file, 'r')

            values_file_reader = csv.reader(csv_values_file, delimiter=',')
            next(values_file_reader)
            for values_data_line in values_file_reader:
                key_value_data = list(zip_longest(keys_in_values_file,values_data_line))

                # Get file identifier value from values file
                file_identifier_value = get_fileid_val(file_identifier, keys_in_config_file,
                                                       keys_in_values_file, values_data_line, key_value_data, file_identifier_value)

                key_value_pair = key_value_data[:]

                # Create new directory(if doesn't exist) to store csv file generated
                output_target_dir = create_dir("csv/", output_dir_path)

                # Verify if output csv file does not exist
                csv_filename = target_file_name_prefix + "-" + file_identifier_value + ".csv"
                csv_file_list.append(csv_filename)
                output_csv_file = output_target_dir + csv_filename
                if os.path.isfile(output_csv_file):
                    raise SystemExit("Target csv file: %s already exists.`" % output_csv_file)

                # Add values corresponding to each key to csv target file
                add_data_to_file(config_data_to_write, key_value_pair, output_csv_file)

                # Create new directory(if doesn't exist) to store bin file generated
                output_target_dir = create_dir("bin/", output_dir_path)

                # Verify if output bin file does not exist
                # todo for keys
                output_file_prefix = target_file_name_prefix + "-" + file_identifier_value
                output_bin_file = output_target_dir + output_file_prefix + ".bin"
                if os.path.isfile(output_bin_file):
                    raise SystemExit("Target csv file: %s already exists.`" % output_bin_file)

                # Create output csv and bin file
                if input_is_keygen.lower() == 'true' and input_is_keyfile:
                    input_is_keyfile = os.path.basename(input_is_keyfile)
                nvs_partition_gen.nvs_part_gen(input_filename=output_csv_file, output_filename=output_bin_file,
                                               input_part_size=part_size, is_key_gen=input_is_keygen,
                                               encrypt_mode=input_is_encrypt, key_file=input_is_keyfile,
                                               version_no=input_version, encr_key_prefix=output_file_prefix, output_dir=output_dir_path)
                print("CSV Generated: ", str(output_csv_file))

                files_created = True

            csv_values_file.close()
        except Exception as e:
            print(e)
            exit(1)
        finally:
            csv_values_file.close()
        return csv_file_list, files_created, target_values_file

    except ValueError as err:
        print(err)
    except Exception:
        raise
def grouper(iterable, n, fillvalue=None):
    "Collect data into fixed-length chunks or blocks"
    # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
    args = [iter(iterable)] * n
    return zip_longest(*args, fillvalue=fillvalue)