Example #1
0
 def load_object(self, path, type_name, fixed=False, n_copy=1, init_pose=None, randomly_place_on=None,
                 color=None, **kwargs):
     self._calls.append(dict(listitems({'func': self.load_object}) + listitems(locals())))
     for _ in range(n_copy):
         o = load_model(path, fixed_base=fixed, **kwargs)
         if color is not None:
             p.changeVisualShape(o, -1, rgbaColor=color)
         self.add_object(WorldObject(o, type_name, fixed=fixed), init_pose, randomly_place_on)
Example #2
0
def _get_keys(event_dict):
    keys = []
    stack = listitems(event_dict)
    while stack:
        k, v = stack.pop()
        keys.append(k)
        if isinstance(v, collections.Mapping):
            stack.extend(listitems(v))
    keys.sort()
    return keys
Example #3
0
 def create_shape(self,
                  geom,
                  type_name,
                  fixed=False,
                  n_copy=1,
                  init_pose=None,
                  randomly_place_on=None,
                  **kwargs):
     self._calls.append(
         dict(listitems({'func': self.create_shape}) + listitems(locals())))
     for _ in range(n_copy):
         o = SHAPES[geom](**kwargs)
         p.changeVisualShape(o, -1, rgbaColor=kwargs['color'])
         self.add_object(WorldObject(o, type_name, fixed=fixed), init_pose,
                         randomly_place_on)
Example #4
0
 def get_mud_list(self):
     """
     Returns a sorted list of connected Muds.
     """
     muds = listitems(self)
     muds.sort()
     return [value for key, value in muds]
def loadmap(name):
    """
    :param name: Variable name as defined in XML settings or a filename of a netCDF or PCRaster map
    load a static map either value or pcraster map or netcdf
    """
    settings = LisSettings.instance()
    value = settings.binding[name]
    filename = value

    res = None
    flagmap = False

    # Try first to load the value from settings
    try:
        res = float(value)
        flagmap = False
        load = True
    except ValueError:
        try:
            # try to read a pcraster map
            res = pcraster.readmap(value)
            flagmap = True
            load = True
        except:
            load = False

    if not load:
        # read a netcdf (single one not a stack)
        filename = '{}.{}'.format(os.path.splitext(value)[0], 'nc')

        # get mapextend of netcdf map
        # and calculate the cutting
        cut0, cut1, cut2, cut3 = CutMap.get_cuts(filename)

        # load netcdf map but only the rectangle needed
        nf1 = Dataset(filename, 'r')
        value = listitems(nf1.variables)[-1][0]  # get the last variable name
        mapnp = nf1.variables[value][cut2:cut3, cut0:cut1]
        nf1.close()

        # check if integer map (like outlets, lakes etc)
        checkint = str(mapnp.dtype)
        if checkint == "int16" or checkint == "int32":
            mapnp[mapnp.mask] = -9999
            res = numpy_operations.numpy2pcr(Nominal, mapnp, -9999)
        elif checkint == "int8":
            res = numpy_operations.numpy2pcr(Nominal, mapnp, 0)
        else:
            mapnp[np.isnan(mapnp)] = -9999
            res = numpy_operations.numpy2pcr(Scalar, mapnp, -9999)

        # if the map is a ldd
        if value.split('.')[0][-3:] == 'ldd':
            # FIXME weak...filename must contain 'ldd' string
            res = operations.ldd(operations.nominal(res))
        flagmap = True

    if settings.flags['checkfiles']:
        checkmap(name, filename, res, flagmap, 0)
    return res
Example #6
0
 def get_mud_list(self):
     """
     Returns a sorted list of connected Muds.
     """
     muds = listitems(self)
     muds.sort()
     return [value for key, value in muds]
Example #7
0
def diff(path, want, have):
    want = normalize(want)
    have = normalize(have)

    for (toleration_path, toleration_check) in listitems(tolerations):
        if fnmatchcase(path, toleration_path) and toleration_check(want, have):
            return

    if isinstance(want, dict) and isinstance(have, dict):
        for difference in diff_dicts(path, want, have):
            yield difference

    elif isinstance(want, list) and isinstance(have, list):
        for difference in diff_lists(path, want, have):
            yield difference

    elif isinstance(want, basestring) and isinstance(have, basestring):
        if want != have:
            if "\n" in want:
                yield diff_not_equal(path, want, have)
            else:
                yield not_equal(path, want, have)

    elif want != have:
        yield not_equal(path, want, have)
Example #8
0
    def commit_all(self):
        """Commit all outstanding buffer blocks.

        This is a synchronous call, and will not return until all buffer blocks
        are uploaded.  Raises KeepWriteError() if any blocks failed to upload.

        """
        self.repack_small_blocks(force=True, sync=True)

        with self.lock:
            items = listitems(self._bufferblocks)

        for k,v in items:
            if v.state() != _BufferBlock.COMMITTED and v.owner:
                v.owner.flush(sync=False)

        with self.lock:
            if self._put_queue is not None:
                self._put_queue.join()

                err = []
                for k,v in items:
                    if v.state() == _BufferBlock.ERROR:
                        err.append((v.locator(), v.error))
                if err:
                    raise KeepWriteError("Error writing some blocks", err, label="block")

        for k,v in items:
            # flush again with sync=True to remove committed bufferblocks from
            # the segments.
            if v.owner:
                v.owner.flush(sync=True)
Example #9
0
        def genericLookup(*args, **kwargs):
            """
            This function returns the mocked DBS data

            :param args: positional arguments it was called with
            :param kwargs: named arguments it was called with
            :return: the dictionary that DBS would have returned
            """
            logging.info("%s: Calling mock genericLookup",
                         self.__class__.__name__)
            for key, value in listitems(kwargs):
                # json dumps/loads converts strings to unicode strings, do the same with kwargs
                if isinstance(value, str):
                    kwargs[key] = _unicode(value)
            if kwargs:
                signature = '%s:%s' % (item, sorted(kwargs.items()))
            else:
                signature = item

            try:
                return MOCK_DATA[signature]
            except KeyError:
                msg = "Rucio mock API failed to find key for signature: {}".format(
                    signature)
                raise KeyError(msg)
Example #10
0
    def __init__(self, mrn, study_instance_uid, institutional_roi,
                 physician_roi, roi_name, roi_type, volume, min_dose,
                 mean_dose, max_dose, dvh_str, roi_coord, surface_area):

        for key, value in listitems(locals()):
            if key != 'self':
                setattr(self, key, value)
Example #11
0
    def __init__(self, mrn, study_instance_uid, plan_name, fx_grp_name,
                 fx_grp_number, fx_grp_count, fx_dose, fxs, rx_dose,
                 rx_percent, normalization_method, normalization_object):

        for key, value in listitems(locals()):
            if key != 'self':
                setattr(self, key, value)
Example #12
0
def loadLAI(value, pcrvalue, i, pcr=False):
    """
    load Leaf are map stacks  or water use maps stacks
    """
    pcrmap = False
    settings = LisSettings.instance()
    flags = settings.flags

    try:
        map = iterReadPCRasterMap(pcrvalue)
        filename = pcrvalue
        pcrmap = True
    except:
        filename = os.path.splitext(value)[0] + '.nc'
        # get mapextend of netcdf map
        # and calculate the cutting
        cut0, cut1, cut2, cut3 = mapattrNetCDF(filename)
        nf1 = iterOpenNetcdf(filename, "", 'r')
        value = listitems(nf1.variables)[-1][0]  # get the last variable name
        mapnp = nf1.variables[value][i, cut2:cut3, cut0:cut1]
        nf1.close()
        mapC = compressArray(mapnp, pcr=False, name=filename)
        # mapnp[np.isnan(mapnp)] = -9999
        # map = numpy2pcr(Scalar, mapnp, -9999)
        # if check use a pcraster map
        if flags['checkfiles'] or pcr:
            map = decompress(mapC)
    if pcrmap: mapC = compressArray(map, name=filename)
    if flags['checkfiles']:
        checkmap(os.path.basename(pcrvalue), filename, map, True, 0)
    map_out = map if pcr else mapC
    if flags['nancheck']:
        nanCheckMap(map_out, filename, "'LAI*Maps' or 'WFractionMaps'")
    return map_out
Example #13
0
def diff(path, want, have):
  want = normalize(want)
  have = normalize(have)

  for (toleration_path, toleration_check) in listitems(tolerations):
    if fnmatchcase(path, toleration_path) and toleration_check(want, have):
      return

  if isinstance(want, dict) and isinstance(have, dict):
    for difference in diff_dicts(path, want, have):
      yield difference

  elif isinstance(want, list) and isinstance(have, list):
    for difference in diff_lists(path, want, have):
      yield difference

  elif isinstance(want, basestring) and isinstance(have, basestring):
    if want != have:
      if "\n" in want:
        yield diff_not_equal(path, want, have)
      else:
        yield not_equal(path, want, have)

  elif want != have:
    yield not_equal(path, want, have)
def is_uid_in_all_keys(uid, uid_kwlist):
    key_answer = {}
    # Initialize a False value for each key
    for key in list(uid_kwlist):
        key_answer[key] = False
    # search for uid in each keyword fof uid_kwlist
    for key, value in listitems(uid_kwlist):
        if uid in value:
            key_answer[key] = True

    final_answer = True
    # Product of all answer[key] values (except 'unique')
    for key, value in listitems(key_answer):
        if key not in 'unique':
            final_answer *= value
    return final_answer
Example #15
0
    def update_player_xref(self):
        '''
        Adds missing players to player_xref table and updates dfs_salaries afterwards
        '''
        self.db.update(update_dfs_salaries_ids())
        missing = self.db.select_dict(
            missing_salaries_ids(source='fantasylabs'))

        if missing:
            nbapq = """SELECT nbacom_player_id as id, display_first_last as n FROM players"""
            nbadict = {}
            nbacount = defaultdict(int)
            for p in self.db.select_dict(nbapq):
                nbadict[p['id']] = p['n']
                nbacount[p['n']] += 1

            # loop through missing players
            # filter out players with duplicate names - need to manually resolve those
            # then look for direct match where name is not duplicated
            # then try to match using names
            insq = """INSERT INTO player_xref (nbacom_player_id, source, source_player_id, source_player_name)
                      VALUES ({}, 'fantasylabs', {}, '{}');"""

            for p in missing:
                if nbacount[p['n']] > 1:
                    logging.error('need to manually resolve {}'.format(p))
                    continue
                match = [k for k, v in listitems(nbadict) if v == p['n']]
                if match:
                    self.db.update(insq.format(match[0], p['id'], p['n']))
                    logging.debug('added to xref: {}'.format(p))
                    continue
                match = [
                    k for k, v in listitems(nbadict) if v == match_player(
                        p['n'], list(nbadict.values()), threshold=.8)
                ]
                if match:
                    self.db.update(insq.format(match[0], p['id'], p['n']))
                    logging.debug('added to xref: {}'.format(p))
                else:
                    logging.error('need to manually resolve {}'.format(p))

            # now update dfs_salaries nbacom_player_id from player_xref
            self.db.update(update_dfs_salaries_ids())

        else:
            logging.info('no missing ids in dfs_salaries')
Example #16
0
    def get_channel_list(self):
        """
        Returns a sorted list of cached channels.

        """
        channels = listitems(self)
        channels.sort()
        return [value for key, value in channels]
Example #17
0
 def __hash__(self):
     """
     Calculate the value of the hash
     """
     value = self.run.__hash__()
     value += hash(frozenset(listitems(
         self.eventsPerLumi)))  # Hash that represents the dictionary
     return value
Example #18
0
    def get_channel_list(self):
        """
        Returns a sorted list of cached channels.

        """
        channels = listitems(self)
        channels.sort()
        return [value for key, value in channels]
Example #19
0
    def _merge_options(self, source, destination):
        for key, value in listitems(source):
            if isinstance(value, dict):
                node = destination.setdefault(key, {})
                self._merge_options(value, node)
            else:
                destination[key] = value

        return destination
Example #20
0
    def store(self, stream, linesep=os.linesep):
        """
        Serialize this section and write it to a stream
        """

        for k, v in listitems(self):
            write_key_val(stream, k, v, linesep)

        stream.write(linesep)
Example #21
0
def fake_authz_headers(hmac_key,
                       method='HNLogin',
                       login='******',
                       name='Test User',
                       dn="/test/dn",
                       roles={},
                       format="list"):
    """Create fake authentication and authorisation headers compatible
    with the CMSWEB front-ends. Assumes you have the HMAC signing key
    the back-end will use to validate the headers.

    :arg str hmac_key: binary key data for signing headers.
    :arg str method: authentication method, one of X509Cert, X509Proxy,
      HNLogin, HostIP, AUCookie or None.
    :arg str login: account login name.
    :arg str name: account user name.
    :arg str dn: account X509 subject.
    :arg dict roles: role dictionary, each role with 'site' and 'group' lists.
    :returns: list of header name, value tuples to add to a HTTP request."""
    headers = {'cms-auth-status': 'OK', 'cms-authn-method': method}

    if login:
        headers['cms-authn-login'] = login

    if name:
        headers['cms-authn-name'] = name

    if dn:
        headers['cms-authn-dn'] = dn

    for name, role in viewitems(roles):
        name = 'cms-authz-' + authz_canonical(name)
        headers[name] = []
        for r in 'site', 'group':
            if r in role:
                headers[name].extend(
                    ["%s:%s" % (r, authz_canonical(v)) for v in role[r]])
        headers[name] = " ".join(headers[name])

    prefix = suffix = ""
    hkeys = list(headers)
    for hk in sorted(hkeys):
        if hk != 'cms-auth-status':
            prefix += "h%xv%x" % (len(hk), len(headers[hk]))
            suffix += "%s%s" % (hk, headers[hk])

    msg = prefix + "#" + suffix
    if PY3:
        hmac_key = encodeUnicodeToBytes(hmac_key)
        msg = encodeUnicodeToBytes(msg)
    cksum = hmac.new(hmac_key, msg, hashlib.sha1).hexdigest()
    headers['cms-authn-hmac'] = cksum
    if format == "list":
        return listitems(headers)
    else:
        return headers
Example #22
0
 def __init__(self, **kwargs):
     for key, value in listitems(kwargs):
         setattr(self, key, value)
     # ugly preservation of `default if kwarg is None else kwarg` semantic
     self.host = getattr(self, 'host', None) or '127.0.0.1'
     self.port = getattr(self, 'port', None) or 5551
     self.context = getattr(self, 'context', None) or zmq.Context()
     self.socket = self.context.socket(zmq.SUB)
     self.socket.connect('tcp://%s:%d' % (self.host, int(self.port)))
     self.queue = {}
Example #23
0
def _validateTypeAndSetDefault(sourceDict, stepDefault):
    # check primitive time and remvoe if the values is composite type.
    for key, value in listitems(sourceDict):  # ACHTUNG! dict size changes while iterating
        if key not in stepDefault and value in [[], {}, None, "None"]:
            del sourceDict[key]

    # set missing composite type defaut.
    for category in stepDefault:
        if (category not in sourceDict) or (category in sourceDict and not sourceDict[category]):
            sourceDict[category] = stepDefault[category]
Example #24
0
 def collection_file_paths(self, col, path_prefix='.'):
     """Return a list of file paths by recursively go through the entire collection `col`"""
     file_paths = []
     for name, item in listitems(col):
         if isinstance(item, arvados.arvfile.ArvadosFile):
             file_paths.append(os.path.join(path_prefix, name))
         elif isinstance(item, arvados.collection.Subcollection):
             new_prefix = os.path.join(path_prefix, name)
             file_paths += self.collection_file_paths(item, path_prefix=new_prefix)
     return file_paths
Example #25
0
    def stop(self):
        """
        Stop the server
        """
        cherrypy.engine.exit()
        cherrypy.engine.stop()

        # Ensure the next server that's started gets fresh objects
        for name, server in listitems(getattr(cherrypy, 'servers', {})):
            server.unsubscribe()
            del cherrypy.servers[name]
Example #26
0
def get_study_instance_uids(**kwargs):
    uids = {}
    complete_list = []
    for key, value in listitems(kwargs):
        uids[key] = QuerySQL(key, value).study_instance_uid
        complete_list.extend(uids[key])

    uids['unique'] = list(set(complete_list))
    uids['union'] = [uid for uid in uids['unique'] if is_uid_in_all_keys(uid, uids)]

    return uids
Example #27
0
def write_sql_connection_settings(config):

    text = [
        "%s %s" % (key, value) for key, value in listitems(config) if value
    ]
    text = '\n'.join(text)

    rel_path = "preferences/sql_connection.cnf"
    abs_file_path = os.path.join(SCRIPT_DIR, rel_path)

    with open(abs_file_path, "w") as text_file:
        text_file.write(text)
Example #28
0
def write_sql_connection_settings(config):
    """
    :param config: a dict with keys 'host', 'dbname', 'port' and optionally 'user' and 'password'
    """

    text = ["%s %s" % (key, value) for key, value in listitems(config) if value]
    text = '\n'.join(text)

    abs_file_path = get_settings('sql')

    with open(abs_file_path, "w") as text_file:
        text_file.write(text)
Example #29
0
    def getOutputFile(self, fileName, outputModule, step):
        """
        _getOutputFile_

        Takes a fileRef object and returns a DataStructs/File object as output
        """

        outputMod = self.getOutputModule(step=step, outputModule=outputModule)

        if not outputMod:
            return None

        fileRef = getattr(outputMod.files, fileName, None)
        newFile = File(locations=set())

        # Locations
        newFile.setLocation(getattr(fileRef, "location", None))

        # Runs
        runList = fileRef.runs.listSections_()
        for run in runList:
            lumis = getattr(fileRef.runs, run)
            if isinstance(lumis, dict):
                newRun = Run(int(run), *listitems(lumis))
            else:
                newRun = Run(int(run), *lumis)
            newFile.addRun(newRun)

        newFile["lfn"] = getattr(fileRef, "lfn", None)
        newFile["pfn"] = getattr(fileRef, "pfn", None)
        newFile["events"] = int(getattr(fileRef, "events", 0))
        newFile["size"] = int(getattr(fileRef, "size", 0))
        newFile["branches"] = getattr(fileRef, "branches", [])
        newFile["input"] = getattr(fileRef, "input", [])
        newFile["inputpfns"] = getattr(fileRef, "inputpfns", [])
        newFile["branch_hash"] = getattr(fileRef, "branch_hash", None)
        newFile["catalog"] = getattr(fileRef, "catalog", "")
        newFile["guid"] = getattr(fileRef, "guid", "")
        newFile["module_label"] = getattr(fileRef, "module_label", "")
        newFile["checksums"] = getattr(fileRef, "checksums", {})
        newFile["merged"] = bool(getattr(fileRef, "merged", False))
        newFile["dataset"] = getattr(fileRef, "dataset", {})
        newFile["acquisitionEra"] = getattr(fileRef, 'acquisitionEra', None)
        newFile["processingVer"] = getattr(fileRef, 'processingVer', None)
        newFile["validStatus"] = getattr(fileRef, 'validStatus', None)
        newFile["globalTag"] = getattr(fileRef, 'globalTag', None)
        newFile["prep_id"] = getattr(fileRef, 'prep_id', None)
        newFile['configURL'] = getattr(fileRef, 'configURL', None)
        newFile['inputPath'] = getattr(fileRef, 'inputPath', None)
        newFile["outputModule"] = outputModule
        newFile["fileRef"] = fileRef

        return newFile
    def x_include_ticker(self, attr, old, new):
        if new and not self.multi_var_reg_vars[self.x.value]:
            self.multi_var_reg_vars[self.x.value] = True
        if not new and new in list(self.multi_var_reg_vars
                                   ) and self.multi_var_reg_vars[self.x.value]:
            clear_source_selection(self.sources, 'multi_var_include')
            self.multi_var_reg_vars[self.x.value] = False

        included_vars = [
            key for key, value in listitems(self.multi_var_reg_vars) if value
        ]
        included_vars.sort()
        self.sources.multi_var_include.data = {'var_name': included_vars}
Example #31
0
    def __init__(self, mrn, study_instance_uid, beam_number, beam_name,
                 fx_group, fxs, fx_grp_beam_count, beam_dose, beam_mu,
                 radiation_type, beam_energy_min, beam_energy_max, beam_type,
                 control_point_count, gantry_start, gantry_end, gantry_rot_dir,
                 gantry_range, gantry_min, gantry_max, collimator_start,
                 collimator_end, collimator_rot_dir, collimator_range,
                 collimator_min, collimator_max, couch_start, couch_end,
                 couch_rot_dir, couch_range, couch_min, couch_max,
                 beam_dose_pt, isocenter, ssd, treatment_machine, scan_mode,
                 scan_spot_count, beam_mu_per_deg, beam_mu_per_cp):

        for key, value in listitems(locals()):
            if key != 'self':
                setattr(self, key, value)
Example #32
0
    def set_form():
        """
        Instantiates QuickConverter form to publish a form.
        """

        if project:
            args = (post and dict(listitems(post))) or {}
            args['project'] = project.pk
        else:
            args = post

        form = QuickConverter(args, files)

        return form.publish(owner, id_string=id_string, created_by=user)
Example #33
0
    def set_form():
        """
        Instantiates QuickConverter form to publish a form.
        """

        if project:
            args = (post and dict(listitems(post))) or {}
            args['project'] = project.pk
        else:
            args = post

        form = QuickConverter(args, files)

        return form.publish(owner, id_string=id_string, created_by=user)
Example #34
0
    def write_test_collection(self,
                              strip_manifest=False,
                              contents={
                                  'foo.txt': 'foo',
                                  'bar.txt': 'bar',
                                  'subdir/baz.txt': 'baz',
                              }):
        c = collection.Collection()
        for path, data in listitems(contents):
            with c.open(path, 'wb') as f:
                f.write(data)
        c.save_new()

        return (c.manifest_locator(), c.portable_data_hash(),
                c.manifest_text(strip=strip_manifest))
Example #35
0
def min_distances(study_instance_uid, roi_name):
    """
    This function will recalculate the min, mean, median, and max PTV distances an roi based on data in the SQL DB.
    :param study_instance_uid: uid as specified in SQL DB
    :param roi_name: roi_name as specified in SQL DB
    """

    oar_coordinates_string = DVH_SQL().query(
        'dvhs', 'roi_coord_string',
        "study_instance_uid = '%s' and roi_name = '%s'" %
        (study_instance_uid, roi_name))

    ptv_coordinates_strings = DVH_SQL().query(
        'dvhs', 'roi_coord_string',
        "study_instance_uid = '%s' and roi_type like 'PTV%%'" %
        study_instance_uid)

    if ptv_coordinates_strings:

        oar_coordinates = roi_form.get_roi_coordinates_from_string(
            oar_coordinates_string[0][0])

        ptvs = [
            roi_form.get_planes_from_string(ptv[0])
            for ptv in ptv_coordinates_strings
        ]
        tv_coordinates = roi_form.get_roi_coordinates_from_planes(
            roi_geom.union(ptvs))

        try:
            data = roi_geom.min_distances_to_target(oar_coordinates,
                                                    tv_coordinates)
            dth = roi_geom.dth(data)
            dth_string = ','.join(['%.3f' % num for num in dth])

            data_map = {
                'dist_to_ptv_min': round(float(np.min(data)), 2),
                'dist_to_ptv_mean': round(float(np.mean(data)), 2),
                'dist_to_ptv_median': round(float(np.median(data)), 2),
                'dist_to_ptv_max': round(float(np.max(data)), 2),
                'dth_string': dth_string
            }

            for key, value in listitems(data_map):
                update_dvhs_table(study_instance_uid, roi_name, key, value)

        except:
            print('dist_to_ptv calculation failure, skipping')
Example #36
0
    def write_test_collection(self,
                              strip_manifest=False,
                              contents = {
                                  'foo.txt' : 'foo',
                                  'bar.txt' : 'bar',
                                  'subdir/baz.txt' : 'baz',
                              }):
        c = collection.Collection()
        for path, data in listitems(contents):
            with c.open(path, 'wb') as f:
                f.write(data)
        c.save_new()

        return (c.manifest_locator(),
                c.portable_data_hash(),
                c.manifest_text(strip=strip_manifest))
Example #37
0
    def commit_all(self):
        """Commit all outstanding buffer blocks.

        This is a synchronous call, and will not return until all buffer blocks
        are uploaded.  Raises KeepWriteError() if any blocks failed to upload.

        """
        self.repack_small_blocks(force=True, sync=True)

        with self.lock:
            items = listitems(self._bufferblocks)

        for k, v in items:
            if v.state() != _BufferBlock.COMMITTED and v.owner:
                # Ignore blocks with a list of owners, as if they're not in COMMITTED
                # state, they're already being committed asynchronously.
                if isinstance(v.owner, ArvadosFile):
                    v.owner.flush(sync=False)

        with self.lock:
            if self._put_queue is not None:
                self._put_queue.join()

                err = []
                for k, v in items:
                    if v.state() == _BufferBlock.ERROR:
                        err.append((v.locator(), v.error))
                if err:
                    raise KeepWriteError("Error writing some blocks",
                                         err,
                                         label="block")

        for k, v in items:
            # flush again with sync=True to remove committed bufferblocks from
            # the segments.
            if v.owner:
                if isinstance(v.owner, ArvadosFile):
                    v.owner.flush(sync=True)
                elif isinstance(v.owner, list) and len(v.owner) > 0:
                    # This bufferblock is referenced by many files as a result
                    # of repacking small blocks, so don't delete it when flushing
                    # its owners, just do it after flushing them all.
                    for owner in v.owner:
                        owner.flush(sync=True)
                    self.delete_bufferblock(k)
Example #38
0
    def commit_all(self):
        """Commit all outstanding buffer blocks.

        This is a synchronous call, and will not return until all buffer blocks
        are uploaded.  Raises KeepWriteError() if any blocks failed to upload.

        """
        self.repack_small_blocks(force=True, sync=True)

        with self.lock:
            items = listitems(self._bufferblocks)

        for k,v in items:
            if v.state() != _BufferBlock.COMMITTED and v.owner:
                # Ignore blocks with a list of owners, as if they're not in COMMITTED
                # state, they're already being committed asynchronously.
                if isinstance(v.owner, ArvadosFile):
                    v.owner.flush(sync=False)

        with self.lock:
            if self._put_queue is not None:
                self._put_queue.join()

                err = []
                for k,v in items:
                    if v.state() == _BufferBlock.ERROR:
                        err.append((v.locator(), v.error))
                if err:
                    raise KeepWriteError("Error writing some blocks", err, label="block")

        for k,v in items:
            # flush again with sync=True to remove committed bufferblocks from
            # the segments.
            if v.owner:
                if isinstance(v.owner, ArvadosFile):
                    v.owner.flush(sync=True)
                elif isinstance(v.owner, list) and len(v.owner) > 0:
                    # This bufferblock is referenced by many files as a result
                    # of repacking small blocks, so don't delete it when flushing
                    # its owners, just do it after flushing them all.
                    for owner in v.owner:
                        owner.flush(sync=True)
                    self.delete_bufferblock(k)
Example #39
0
File: CORE.py Project: CoAxLab/radd
    def get_ksdata(self, nlevels=1):
        self.ksDataCond = {'corRT':[], 'errRT':[], 'goAcc':[], 'stopAcc':[]}
        self.ksDataFlat = deepcopy(self.ksDataCond)


        df = self.data.copy()
        self.ksDataFlat['corRT'].append(df[(df.ttype=='go')&(df.acc==1)].rt.values)
        self.ksDataFlat['errRT'].append(df[(df.ttype=='stop')&(df.acc==0)].rt.values)
        self.ksDataFlat['goAcc'].append(df[df.ttype=='go'].acc.mean())

        if self.ssd_method=='all':
            nssd = self.fitparams.ssd_info[1]
            self.ksDataFlat['stopAcc'].append(df.iloc[:, 3:3+nssd].mean())
        else:
            self.ksDataFlat['stopAcc'].append(df[df.ttype=='stop'].acc.mean())

        for col, vals in listitems(self.clmap):
            for lvl in vals:
                if lvl.isalnum():
                    try:
                        lvl = int(lvl)
                    except ValueError:
                        lvl = lvl
                dfi = self.data[self.data[col]==lvl]
                self.ksDataCond['corRT'].append(dfi[(dfi.ttype=='go')&(dfi.acc==1)].rt.values)
                self.ksDataCond['errRT'].append(dfi[(dfi.ttype=='stop')&(dfi.acc==0)].rt.values)
                self.ksDataCond['goAcc'].append(dfi[dfi.ttype=='go'].acc.mean())

                if self.ssd_method=='all':
                    dfStop = self.observedDF[self.observedDF[col]==lvl]
                    nssd = self.fitparams.ssd_info[1]
                    self.ksDataCond['stopAcc'].append(dfStop.iloc[:, 3:3+nssd].mean().values)
                else:
                    self.ksDataCond['stopAcc'].append(dfi[dfi.ttype=='stop'].acc.mean())

                #self.ksDataCond['stopAcc'].append(dfi[(dfi.ttype=='stop')&(dfi.probe==1)].acc.mean())

        if nlevels==1:
            return self.ksDataFlat
        return self.ksDataCond
Example #40
0
def restore_signal_handlers():
    for sigcode, orig_handler in listitems(orig_signal_handlers):
        signal.signal(sigcode, orig_handler)
Example #41
0
    def __new__(cls, name, bases, attrs):
        """
        Field shortcut creation:

        Takes field names `db_*` and creates property wrappers named
        without the `db_` prefix. So db_key -> key

        This wrapper happens on the class level, so there is no
        overhead when creating objects.  If a class already has a
        wrapper of the given name, the automatic creation is skipped.

        Notes:
            Remember to document this auto-wrapping in the class
            header, this could seem very much like magic to the user
            otherwise.
        """

        attrs["typename"] = cls.__name__
        attrs["path"] =  "%s.%s" % (attrs["__module__"], name)
        attrs["_is_deleted"] = False

        # set up the typeclass handling only if a variable _is_typeclass is set on the class
        def create_wrapper(cls, fieldname, wrappername, editable=True, foreignkey=False):
            "Helper method to create property wrappers with unique names (must be in separate call)"
            def _get(cls, fname):
                "Wrapper for getting database field"
                if _GA(cls, "_is_deleted"):
                    raise ObjectDoesNotExist("Cannot access %s: Hosting object was already deleted." % fname)
                return _GA(cls, fieldname)
            def _get_foreign(cls, fname):
                "Wrapper for returning foreignkey fields"
                if _GA(cls, "_is_deleted"):
                    raise ObjectDoesNotExist("Cannot access %s: Hosting object was already deleted." % fname)
                return _GA(cls, fieldname)
            def _set_nonedit(cls, fname, value):
                "Wrapper for blocking editing of field"
                raise FieldError("Field %s cannot be edited." % fname)
            def _set(cls, fname, value):
                "Wrapper for setting database field"
                if _GA(cls, "_is_deleted"):
                    raise ObjectDoesNotExist("Cannot set %s to %s: Hosting object was already deleted!" % (fname, value))
                _SA(cls, fname, value)
                # only use explicit update_fields in save if we actually have a
                # primary key assigned already (won't be set when first creating object)
                update_fields = [fname] if _GA(cls, "_get_pk_val")(_GA(cls, "_meta")) is not None else None
                _GA(cls, "save")(update_fields=update_fields)
            def _set_foreign(cls, fname, value):
                "Setter only used on foreign key relations, allows setting with #dbref"
                if _GA(cls, "_is_deleted"):
                    raise ObjectDoesNotExist("Cannot set %s to %s: Hosting object was already deleted!" % (fname, value))
                try:
                    value = _GA(value, "dbobj")
                except AttributeError:
                    pass
                if isinstance(value, (basestring, int)):
                    value = to_str(value, force_string=True)
                    if (value.isdigit() or value.startswith("#")):
                        # we also allow setting using dbrefs, if so we try to load the matching object.
                        # (we assume the object is of the same type as the class holding the field, if
                        # not a custom handler must be used for that field)
                        dbid = dbref(value, reqhash=False)
                        if dbid:
                            model = _GA(cls, "_meta").get_field(fname).model
                            try:
                                value = model._default_manager.get(id=dbid)
                            except ObjectDoesNotExist:
                                # maybe it is just a name that happens to look like a dbid
                                pass
                _SA(cls, fname, value)
                # only use explicit update_fields in save if we actually have a
                # primary key assigned already (won't be set when first creating object)
                update_fields = [fname] if _GA(cls, "_get_pk_val")(_GA(cls, "_meta")) is not None else None
                _GA(cls, "save")(update_fields=update_fields)
            def _del_nonedit(cls, fname):
                "wrapper for not allowing deletion"
                raise FieldError("Field %s cannot be edited." % fname)
            def _del(cls, fname):
                "Wrapper for clearing database field - sets it to None"
                _SA(cls, fname, None)
                update_fields = [fname] if _GA(cls, "_get_pk_val")(_GA(cls, "_meta")) is not None else None
                _GA(cls, "save")(update_fields=update_fields)

            # wrapper factories
            fget = lambda cls: _get(cls, fieldname)
            if not editable:
                fset = lambda cls, val: _set_nonedit(cls, fieldname, val)
            elif foreignkey:
                fget = lambda cls: _get_foreign(cls, fieldname)
                fset = lambda cls, val: _set_foreign(cls, fieldname, val)
            else:
                fset = lambda cls, val: _set(cls, fieldname, val)
            fdel = lambda cls: _del(cls, fieldname) if editable else _del_nonedit(cls,fieldname)
            # set docstrings for auto-doc
            fget.__doc__ = "A wrapper for getting database field `%s`." % fieldname
            fset.__doc__ = "A wrapper for setting (and saving) database field `%s`." % fieldname
            fdel.__doc__ = "A wrapper for deleting database field `%s`." % fieldname
            # assigning
            attrs[wrappername] = property(fget, fset, fdel)
            #type(cls).__setattr__(cls, wrappername, property(fget, fset, fdel))#, doc))

        # exclude some models that should not auto-create wrapper fields
        if cls.__name__ in ("ServerConfig", "TypeNick"):
            return
        # dynamically create the wrapper properties for all fields not already handled (manytomanyfields are always handlers)
        for fieldname, field in ((fname, field) for fname, field in listitems(attrs)
                                  if fname.startswith("db_") and type(field).__name__ != "ManyToManyField"):
            foreignkey = type(field).__name__ == "ForeignKey"
            wrappername = "dbid" if fieldname == "id" else fieldname.replace("db_", "", 1)
            if wrappername not in attrs:
                # makes sure not to overload manually created wrappers on the model
                create_wrapper(cls, fieldname, wrappername, editable=field.editable, foreignkey=foreignkey)

        return super(SharedMemoryModelBase, cls).__new__(cls, name, bases, attrs)
Example #42
0
 def __getitem__(self, index):
     attr, (local, upstream) = listitems(self.container)[index]
     return Difference(attr, local, upstream)
Example #43
0
def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
    global api_client

    logger = logging.getLogger('arvados.arv_put')
    logger.setLevel(logging.INFO)
    args = parse_arguments(arguments)
    status = 0
    if api_client is None:
        api_client = arvados.api('v1')

    # Determine the name to use
    if args.name:
        if args.stream or args.raw:
            logger.error("Cannot use --name with --stream or --raw")
            sys.exit(1)
        elif args.update_collection:
            logger.error("Cannot use --name with --update-collection")
            sys.exit(1)
        collection_name = args.name
    else:
        collection_name = "Saved at {} by {}@{}".format(
            datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"),
            pwd.getpwuid(os.getuid()).pw_name,
            socket.gethostname())

    if args.project_uuid and (args.stream or args.raw):
        logger.error("Cannot use --project-uuid with --stream or --raw")
        sys.exit(1)

    # Determine the parent project
    try:
        project_uuid = desired_project_uuid(api_client, args.project_uuid,
                                            args.retries)
    except (apiclient_errors.Error, ValueError) as error:
        logger.error(error)
        sys.exit(1)

    if args.progress:
        reporter = progress_writer(human_progress)
    elif args.batch_progress:
        reporter = progress_writer(machine_progress)
    else:
        reporter = None

    # If this is used by a human, and there's at least one directory to be
    # uploaded, the expected bytes calculation can take a moment.
    if args.progress and any([os.path.isdir(f) for f in args.paths]):
        logger.info("Calculating upload size, this could take some time...")
    bytes_expected = expected_bytes_for(args.paths, follow_links=args.follow_links)

    try:
        writer = ArvPutUploadJob(paths = args.paths,
                                 resume = args.resume,
                                 use_cache = args.use_cache,
                                 filename = args.filename,
                                 reporter = reporter,
                                 bytes_expected = bytes_expected,
                                 num_retries = args.retries,
                                 replication_desired = args.replication,
                                 put_threads = args.threads,
                                 name = collection_name,
                                 owner_uuid = project_uuid,
                                 ensure_unique_name = True,
                                 update_collection = args.update_collection,
                                 logger=logger,
                                 dry_run=args.dry_run,
                                 follow_links=args.follow_links)
    except ResumeCacheConflict:
        logger.error("\n".join([
            "arv-put: Another process is already uploading this data.",
            "         Use --no-cache if this is really what you want."]))
        sys.exit(1)
    except CollectionUpdateError as error:
        logger.error("\n".join([
            "arv-put: %s" % str(error)]))
        sys.exit(1)
    except ArvPutUploadIsPending:
        # Dry run check successful, return proper exit code.
        sys.exit(2)
    except ArvPutUploadNotPending:
        # No files pending for upload
        sys.exit(0)

    # Install our signal handler for each code in CAUGHT_SIGNALS, and save
    # the originals.
    orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
                            for sigcode in CAUGHT_SIGNALS}

    if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0:
        logger.warning("\n".join([
            "arv-put: Resuming previous upload from last checkpoint.",
            "         Use the --no-resume option to start over."]))

    if not args.dry_run:
        writer.report_progress()
    output = None
    try:
        writer.start(save_collection=not(args.stream or args.raw))
    except arvados.errors.ApiError as error:
        logger.error("\n".join([
            "arv-put: %s" % str(error)]))
        sys.exit(1)
    except ArvPutUploadIsPending:
        # Dry run check successful, return proper exit code.
        sys.exit(2)
    except ArvPutUploadNotPending:
        # No files pending for upload
        sys.exit(0)
    except PathDoesNotExistError as error:
        logger.error("\n".join([
            "arv-put: %s" % str(error)]))
        sys.exit(1)

    if args.progress:  # Print newline to split stderr from stdout for humans.
        logger.info("\n")

    if args.stream:
        if args.normalize:
            output = writer.manifest_text(normalize=True)
        else:
            output = writer.manifest_text()
    elif args.raw:
        output = ','.join(writer.data_locators())
    else:
        try:
            if args.update_collection:
                logger.info("Collection updated: '{}'".format(writer.collection_name()))
            else:
                logger.info("Collection saved as '{}'".format(writer.collection_name()))
            if args.portable_data_hash:
                output = writer.portable_data_hash()
            else:
                output = writer.manifest_locator()
        except apiclient_errors.Error as error:
            logger.error(
                "arv-put: Error creating Collection on project: {}.".format(
                    error))
            status = 1

    # Print the locator (uuid) of the new collection.
    if output is None:
        status = status or 1
    else:
        stdout.write(output)
        if not output.endswith('\n'):
            stdout.write('\n')

    for sigcode, orig_handler in listitems(orig_signal_handlers):
        signal.signal(sigcode, orig_handler)

    if status != 0:
        sys.exit(status)

    # Success!
    return output
Example #44
0
                  'u:36':COMPOSITION({'C': 2, 'H': 4}),
                  'u:121':COMPOSITION({'C': 4, 'H': 6, 'N': 2, 'O': 2}),
                  'u:188':COMPOSITION({'C': -6, 'C[13]': 6}),
                  'u:199':COMPOSITION({'H[2]': 4, 'C': 2}),
                  'u:374':COMPOSITION({'H': -1}),
                  'u:1020':COMPOSITION({'C': 8, 'H': 12, 'O': 3}),
                  'u:1356':COMPOSITION({'C': 5, 'H': 9, 'O': 7, 'P': 1}),
                  'DSS':COMPOSITION({'C': 8, 'H': 10, 'O': 2}),
                  '*':COMPOSITION({})
                 })


aaModMass = dict([(name, comp.mass()) for name, comp in viewitems(aaModComp)])
"""A dictionary with exact monoisotopic masses of peptide modifications."""
#TODO change all modification instances from "UNIMOD:X" to "u:X"
for accession, composition in listitems(aaModMass):
    if accession.startswith('u:'):
        aaModMass[accession.replace('u:', 'UNIMOD:')] = composition


# --- Define additional constants --- #
atomicMassH = 1.00782504
atomicMassProton = 1.00727646677


expasy_rules = dict()
""" The dictionary expasy_rules contains regular expressions for cleavage rules
of the most popular proteolytic enzymes. The rules were copied from
`Pyteomics <http://pythonhosted.org/pyteomics/>`_ and initially taken from
the PeptideCutter tool at `Expasy
<http://ca.expasy.org/tools/peptidecutter/peptidecutter_enzymes.html>`_.