Exemplo n.º 1
0
def print_label_on_image(frame, top_labels):
    labels = [(label_index[index], "{0:.2f}".format(prob)) for (index, prob) in top_labels]

    font = cv2.FONT_HERSHEY_COMPLEX_SMALL
    rect_color = (0, 0, 0)
    text_color = (255, 255, 255)
    font_scale = 0.45
    thickness = 1
    start_pt = (10, 10)
    extra_space = (4, 10)

    label_offset = 0
    label_num = 0
    for label, prob in labels:
        if label_num > 0:
            font_scale = .3
        rect_pt = (start_pt[0], start_pt[1] + label_offset)
        text_size = cv2.getTextSize(label, font, font_scale, thickness)[0]
        prob_size = cv2.getTextSize(prob, font, font_scale, thickness)[0]
        prob_offset = (prob_size[0] + extra_space[0], 0)
        text_top = tuple(map(sum, list(zip(rect_pt, extra_space))))
        rect_ops_pt = tuple(map(sum, list(zip(text_top, text_size, extra_space, prob_offset))))
        text_bot = (text_top[0], rect_ops_pt[1] - extra_space[1])
        prob_bot = (text_top[0] + text_size[0] + extra_space[0], text_bot[1])
        cv2.rectangle(frame, rect_pt, rect_ops_pt, rect_color, thickness=cv2.cv.CV_FILLED)
        cv2.putText(frame, label, text_bot, font, font_scale, text_color, thickness)
        cv2.putText(frame, prob, prob_bot, font, font_scale, text_color, thickness)
        label_offset += rect_ops_pt[1] - rect_pt[1]
        label_num += 1

    return frame
Exemplo n.º 2
0
 def f(node2):
     if node2 not in visited:
         visited.add(node2)
         for i in followOnEdges:
             augmentedAdjacencyList[node2].add(i)
         list(map(f, childAdjacencyList[node2]))
         list(map(f, followOnAdjacencyList[node2]))
    def test_timezone_inheritance(self):
        parent = Organization(id=101, name='parentOrg')
        org = Organization(id=102, name='org', partOf_id=101)

        # test that with no timezones set, defaults to UTC
        with SessionScope(db):
            db.session.add(parent)
            db.session.add(org)
            db.session.commit()
        parent, org = map(db.session.merge, (parent, org))
        assert org.timezone == 'UTC'

        # test that timezone-less child org inherits from parent
        parent.timezone = 'Asia/Tokyo'
        with SessionScope(db):
            db.session.add(parent)
            db.session.commit()
        parent, org = map(db.session.merge, (parent, org))
        assert org.timezone == 'Asia/Tokyo'

        # test that child org with timezone does NOT inherit from parent
        org.timezone = 'Europe/Rome'
        with SessionScope(db):
            db.session.add(org)
            db.session.commit()
        org = db.session.merge(org)
        assert org.timezone == 'Europe/Rome'
Exemplo n.º 4
0
    def from_cg_string(self, cg_string):
        '''
        Populate this structure from the string
        representation of a graph.
        '''
        # Reading the bulge_graph-part of the file
        self.from_bg_string(cg_string)

        #Reading the part of the file responsible for 3D information
        lines = cg_string.split('\n')
        for line in lines:
            line = line.strip()
            parts = line.split()
            if len(parts) == 0:
                continue
            if parts[0] == 'coord':
                name = parts[1]
                self.coords[name] = np.array([list(map(float, parts[2:5])), list(map(float, parts[5:8]))])
            if parts[0] == 'twist':
                name = parts[1]
                self.twists[name] = np.array([list(map(float, parts[2:5])), list(map(float, parts[5:8]))])
            if parts[0] == 'longrange':
                self.longrange[parts[1]].add(parts[2])
                self.longrange[parts[2]].add(parts[1])

            if parts[0] == 'sampled':
                self.sampled[parts[1]] = [parts[2]] + list(map(int, parts[3:]))
            if parts[0] == 'project':
                self.project_from=np.array(parts[1:], dtype=float)
Exemplo n.º 5
0
	def __init__(self, z, df, ss=(0,0), ms=(0,0), eij=0, X0=None):
		_SPM0D.__init__(self, 'F', z, df)
		self.ss        = tuple( map(float, ss) )
		self.ms        = tuple( map(float, ms) )
		self.eij       = eij
		self.X0        = X0
		self.isanova   = True
Exemplo n.º 6
0
def get_dc_params(FIRST_GET_DC, specimen, treat_type, yn):
    """
    Prompts user for DC field data if not provided, just an encapsulation function for the above program and should not be used elsewhere.

    Parameters
    -----------
    FIRST_GET_DC : is this the first time you are asking for DC data?
    specimen : what specimen do you want DC data for?
    treat_type : what kind of step was it? PTM, Tail, in field, zero field?
    yn : is DC field constant or varrying? (y = constant, n = varrying)

    Returns
    -----------
    GET_DC_PARAMS : weather or not to rerun this function
    FIRST_GET_DC : same as above
    yn : same as above
    DC_FIELD : field strength in Tesla
    DC_PHI : field azimuth
    DC_THETA : field polar angle
    """
    if FIRST_GET_DC:
        yn = input("Is the DC field used in this IZZI study constant or does it varry between specimen or step? (y=const) [y/N]: ")
        FIRST_GET_DC = False
    if "y" == yn: DC_FIELD,DC_PHI,DC_THETA = list(map(float, eval(input("What DC field, Phi, and Theta was used for all steps? (float (in microTesla),float,float): ")))); GET_DC_PARAMS=False
    else: DC_FIELD,DC_PHI,DC_THETA = list(map(float,eval(input("What DC field, Phi, and Theta was used for specimen %s and step %s? (float (in microTesla),float,float): "%(str(specimen),str(treat_type))))))
    return GET_DC_PARAMS,FIRST_GET_DC,yn,DC_FIELD*1e-6,DC_PHI,DC_THETA
Exemplo n.º 7
0
def get_differing_images(source_env, target_env):
  """Return the images that differ between Kubernetes environments.

  :param Dict[KubeObject, Dict] source_env: The Kubernetes objects in the
      source environment.
  :param Dict[KubeObject, Dict] target_env: The Kubernetes objects in the
      target environment.
  :return: A dictionary mapping image names to source label and target label.
  :rtype: Dict[str, (str, str)]
  """
  source_objs = frozenset(source_env)
  target_objs = frozenset(target_env)
  # XXX: What about missing objects?
  diffs = {}
  for obj in source_objs & target_objs:
    source_images = list(map(Image.parse, sorted(iter_images(source_env[obj]))))
    target_images = list(map(Image.parse, sorted(iter_images(target_env[obj]))))
    while source_images and target_images:
      source, target = source_images[0], target_images[0]
      if source.name == target.name:
        if source.label != target.label:
          diffs[source.name] = (source.label, target.label)
        source_images, target_images = source_images[1:], target_images[1:]
      elif source.name < target.name:
        # XXX: What about images that are in the source env but not in the
        # target env?
        source_images, target_images = source_images[1:], target_images
      else:
        # XXX: What about images that are in the target env but not in the
        # source env?
        source_images, target_images = source_images, target_images[1:]
  return diffs
Exemplo n.º 8
0
    def sort(self, key=None):
        """Sort the bins in this array (in-place).

        Optional argument 'key' is one of:

            - a function that computes a sorting key from a CopyNumArray row
            - a string identifier for an existing data column
            - a list/array/iterable of precomputed keys equal in length to the
              number of rows in this CopyNumArray.

        By default, bins are sorted by chromosomal coordinates.
        """
        if key is None:
            # Sort by chrom, then by start position
            chrom_keys = list(map(core.sorter_chrom, self.data['chromosome']))
            order = numpy.lexsort((self.data['start'], chrom_keys))
        else:
            # Sort by the given key, using a stable sort algorithm
            if isinstance(key, basestring):
                keys = self.data[key]
            elif callable(key):
                keys = list(map(key, self.data))
            else:
                if not len(key) == len(self):
                    raise ValueError("Sort key, as an array, must have the "
                                     "same length as the CopyNumArray to sort "
                                     "(%d vs. %d)." % (len(key), len(self)))
                keys = key
            order = numpy.argsort(keys, kind='mergesort')
        self.data = self.data.take(order)
Exemplo n.º 9
0
def bpseq_output(seq, pairs, header_lines=None, removed=None):
    """Return sequence and structure information in bpseq format

    seq -- string of sequence characters, no validation
    pairs -- Pairs object or list of tuples
    header_lines -- list of header lines at the top of the file

    Return value is a formatted string
    """
    result = []
    if header_lines:
        result.extend(header_lines)
    partners = pairs.toPartners(len(seq))
    for idx, (seq_symbol, partner_idx) in enumerate(zip(seq, partners)):
        if partner_idx is None:
            partner_idx = -1
        result.append(
            ' '.join(map(str, [idx + 1, seq_symbol, partner_idx + 1])))

    if removed is not None:
        result.append('# REMOVED BASE PAIRS')
        partners = removed.toPartners(len(seq))
        for idx, (seq_symbol, partner_idx) in enumerate(zip(seq, partners)):
            if partner_idx is None:
                partner_idx = -1
            result.append(
                ' '.join(map(str, [idx + 1, seq_symbol, partner_idx + 1])))

    return '\n'.join(result)
Exemplo n.º 10
0
    def http_get(self, series_slug, identifier):
        """Query series information.

        :param series_slug: series slug. E.g.: tvdb1234
        :param identifier:
        """
        series_identifier = SeriesIdentifier.from_slug(series_slug)
        if not series_identifier:
            return self._bad_request('Invalid series slug')

        series = Series.find_by_identifier(series_identifier)
        if not series:
            return self._not_found('Series not found')

        if identifier == 'backlogged':
            # TODO: revisit
            allowed_qualities = self._parse(self.get_argument('allowed', default=None), str)
            allowed_qualities = list(map(int, allowed_qualities.split(','))) if allowed_qualities else []
            preferred_qualities = self._parse(self.get_argument('preferred', default=None), str)
            preferred_qualities = list(map(int, preferred_qualities.split(','))) if preferred_qualities else []
            new, existing = series.get_backlogged_episodes(allowed_qualities=allowed_qualities,
                                                           preferred_qualities=preferred_qualities)
            data = {'new': new, 'existing': existing}
            return self._ok(data=data)

        return self._bad_request('Invalid request')
Exemplo n.º 11
0
def get_dc_params(FIRST_GET_DC, specimen, treat_type, yn):
    if FIRST_GET_DC:
        yn = input("Is the DC field used in this IZZI study constant or does it varry between specimen or step? (y=const) [y/N]: ")
        FIRST_GET_DC = False
    if "y" == yn: DC_FIELD,DC_PHI,DC_THETA = list(map(float, eval(input("What DC field, Phi, and Theta was used for all steps? (float (in microTesla),float,float): ")))); GET_DC_PARAMS=False
    else: DC_FIELD,DC_PHI,DC_THETA = list(map(float,eval(input("What DC field, Phi, and Theta was used for specimen %s and step %s? (float (in microTesla),float,float): "%(str(specimen),str(treat_type))))))
    return GET_DC_PARAMS,FIRST_GET_DC,yn,DC_FIELD*1e-6,DC_PHI,DC_THETA
Exemplo n.º 12
0
 def test_all_covered_once_two_iterators(self):
     img = np.zeros((80, 80))
     img2 = np.zeros((80, 80))
     for dd, norm in fph.offsets():
         dx, dy = dd
         if 40 + dx >= 0 and 40 + dy >= 0:
             try:
                 img[40 + dx, 40 + dy] = img[40 + dx, 40 + dy] + 1
             except IndexError:
                 pass
         if (dx > 40 or dx < -40) and (dy > 40 or dy < -40):
             break
     for dd, norm in fph.offsets():
         dx, dy = dd
         if 40 + dx >= 0 and 40 + dy >= 0:
             try:
                 img2[40 + dx, 40 + dy] = img2[40 + dx, 40 + dy] + 1
             except IndexError:
                 pass
         if (dx > 40 or dx < -40) and (dy > 40 or dy < -40):
             break
     self.assertEqual(np.max(img), 1)
     self.assertEqual(
         np.min(img), 1, " - ".join(map(str, np.transpose(np.where(img == 0)))))
     self.assertEqual(np.max(img2), 1)
     self.assertEqual(
         np.min(img2), 1, " - ".join(map(str, np.transpose(np.where(img2 == 0)))))
def main(argv=None):
    """
    :param argv: the command line args
    :return: nothing
    """
    if argv is None:
        argv = sys.argv

    # data = "0 71 87 99 99 99 101 103 113 114 115 128 128 128 129 129 147 147 163 163 163 172 199 202 216 216 228 228 241 243 243 246 256 261 262 262 264 277 300 310 310 315 315 315 327 331 335 344 345 356 371 371 376 391 409 409 409 411 418 424 424 428 442 443 444 444 444 459 463 478 482 484 499 510 523 523 523 531 538 543 543 547 558 570 572 572 581 587 587 591 607 610 612 625 646 652 659 660 670 671 673 683 686 686 686 686 687 706 706 709 715 738 739 744 754 759 774 784 785 786 787 788 809 814 815 815 833 833 834 837 853 853 858 868 872 885 887 887 902 902 903 914 922 932 934 943 947 952 956 967 981 986 986 996 1000 1001 1002 1005 1014 1030 1031 1031 1050 1050 1061 1069 1070 1080 1094 1095 1097 1101 1114 1115 1115 1130 1130 1130 1133 1148 1149 1159 1165 1168 1183 1193 1196 1197 1197 1202 1224 1229 1229 1230 1233 1243 1258 1261 1267 1277 1278 1293 1296 1296 1296 1311 1311 1312 1325 1329 1331 1332 1346 1356 1357 1365 1376 1376 1395 1395 1396 1412 1421 1424 1425 1426 1430 1440 1440 1445 1459 1470 1474 1479 1483 1492 1494 1504 1512 1523 1524 1524 1539 1539 1541 1554 1558 1568 1573 1573 1589 1592 1593 1593 1611 1611 1612 1617 1638 1639 1640 1641 1642 1652 1667 1672 1682 1687 1688 1711 1717 1720 1720 1739 1740 1740 1740 1740 1743 1753 1755 1756 1766 1767 1774 1780 1801 1814 1816 1819 1835 1839 1839 1845 1854 1854 1856 1868 1879 1883 1883 1888 1895 1903 1903 1903 1916 1927 1942 1944 1948 1963 1967 1982 1982 1982 1983 1984 1998 2002 2002 2008 2015 2017 2017 2017 2035 2050 2055 2055 2070 2081 2082 2091 2095 2099 2111 2111 2111 2116 2116 2126 2149 2162 2164 2164 2165 2170 2180 2183 2183 2185 2198 2198 2210 2210 2210 2224 2227 2254 2263 2263 2263 2279 2279 2297 2297 2298 2298 2298 2311 2312 2313 2323 2325 2327 2327 2327 2339 2355 2426"
    # n = 358
    # data_as_list = list(map(int, data.split(' ')))
    #
    # result = leaderboard_sequence(data_as_list, n, _all_masses)



    data = "114 1199 245 1312 358 128 356 922 702 709 1184 1053 959 373 959 724 1087 587 603 131 0 1298 472 1184 840 595 356 1289 128 131 99 845 1058 230 1070 586 583 1426 455 114 1068 700 817 484 1062 472 344 368 686 1167 227 1295 99 1327 475 341 364 1198 823 1295 1181 831 726 1070 1181 467 504 1186 598 228 839 345 259 240 1196 828 495 1312 954 843 712 1190 840 242 823 1085 114 1327 942 717 358 609 695 245 482 823 603 1068 1050 967 586 1298 472 581 242 1298 944 740 231 951 931 376 1199 596 128 1195 103 954 714 467 830 1082 137 236 339 1312 971 731 954 459 603 1323 227 1081"
    m = 16
    n = 330

    start = time.clock()

    data_as_list = list(map(int, data.split(' ')))
    result = convolution_cyclopeptide_sequencing(data_as_list, m, n)

    end = time.clock()

    print(end - start)

    print ("-".join(map(str, result)))
Exemplo n.º 14
0
    def find_horizontal_guides(self, item_hedges, pdy, width, excluded_items):
        view = self.view
        item = self.item
        i2v = self.view.get_matrix_i2v
        margin = self.MARGIN
        items = []
        for y in item_hedges:
            items.append(
                view.get_items_in_rectangle((0, y - margin, width, margin * 2))
            )
        try:
            guides = list(
                map(Guide, reduce(set.union, list(map(set, items))) - excluded_items)
            )
        except TypeError:
            guides = []

        # Translate edges to canvas or view coordinates
        hedges = set()
        for g in guides:
            for y in g.horizontal():
                hedges.add(i2v(g.item).transform_point(0, y)[1])

        dy, edges_y = self.find_closest(item_hedges, hedges)
        return dy, edges_y
Exemplo n.º 15
0
def create_paired_matrix(binary_1, binary_2, id_cleaning=None):
    # id_cleaning is a function object that turns a read ID that contains pair member information (like XYZ_1234:5678/1) into an
    # ID that identifies the pair itself (like XYZ_1234:5678) so we can match the two DF's IDs. In the above case a suitable
    # id_cleaning function would be lambda x: x[:-2] (gets rid of /1 and /2 from the end). Sometimes it's trickier as pair member
    # information can be somewhere in the middle, and sometimes it's not even required at all as both pair members have the same ID
    # just in two different files (1000 genomes).

    if id_cleaning is not None:
        binary_1.index = list(map(id_cleaning, binary_1.index))
        binary_2.index = list(map(id_cleaning, binary_2.index))

    common_read_ids = binary_1.index.intersection(binary_2.index)
    only_1 = binary_1.index.difference(binary_2.index)
    only_2 = binary_2.index.difference(binary_1.index)

    b_1 = binary_1.loc[common_read_ids]
    b_2 = binary_2.loc[common_read_ids]
    b_12 = b_1 * b_2  # elementwise AND
    b_ispaired = b_12.any(axis=1)  # reads with at least one allele w/ paired hits
    b_paired = b_12.loc[b_ispaired]
    b_mispaired = b_1.loc[~b_ispaired] + b_2.loc[~b_ispaired]  # elementwise AND where two ends only hit different alleles
    b_unpaired = pd.concat([binary_1.loc[only_1], binary_2.loc[only_2]])  # concatenation for reads w/ just one end mapping anywhere

    if VERBOSE:
        print(now(), ('Alignment pairing completed. %d paired, %d unpaired, %d discordant ' %
            (b_paired.shape[0], b_unpaired.shape[0], b_mispaired.shape[0])))

    return b_paired, b_mispaired, b_unpaired
Exemplo n.º 16
0
    def update_occupancies_tkn(self, tfile, sorted_nodes):
        """
          Update the occupancy in the Graph and the total simulation time
        """
        # http://www.regular-expressions.info/floatingpoint.html
        reg_flt = re.compile(b'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?.')

        lastlines = s.check_output(['tail', '-2', tfile]).strip().split(b'\n')
        if not reg_flt.match(lastlines[0]):
            raise TrafoAlgoError('Cannot parse simulation output', tfile)
        else:
            if reg_flt.match(lastlines[1]):
                time = float(lastlines[1].split()[0])
                iterations = None
                tot_occ = sum(map(float, lastlines[1].split()[1:]))
                for e, occu in enumerate(lastlines[1].split()[1:]):
                    ss = sorted_nodes[e][0]
                    self.node[ss]['occupancy'] = float(occu)/tot_occ
            else :
                time = float(lastlines[0].split()[0])
                iterations = int(lastlines[-1].split()[-1])
                tot_occ = sum(map(float, lastlines[0].split()[1:]))
                for e, occu in enumerate(lastlines[0].split()[1:]):
                    ss = sorted_nodes[e][0]
                    self.node[ss]['occupancy'] = float(occu)/tot_occ

        return time, iterations
Exemplo n.º 17
0
Arquivo: pymr.py Projeto: kn45/Pymr
 def run(self):
     self._setup()
     stdin_strip = map(lambda l: l.rstrip('\r\n'), sys.stdin)
     for key, kvalues in groupby(stdin_strip, self._get_kv(0)):
         values = map(self._get_kv(1), kvalues)
         self._reduce(key, values)
     self._cleanup()
 def __pow__(self, other):
     if self.offset != 0:
         raise TypeError("cannot exponentiate units with non-zero offset")
     if isinstance(other, int):
         return PhysicalUnit(other*self.names, pow(self.factor, other),
                             list(map(lambda x,p=other: x*p, self.powers)))
     if isinstance(other, float):
         inv_exp = 1./other
         rounded = int(N.floor(inv_exp+0.5))
         if abs(inv_exp-rounded) < 1.e-10:
             if reduce(lambda a, b: a and b,
                       list(map(lambda x, e=rounded: x%e == 0, self.powers))):
                 f = pow(self.factor, other)
                 p = list(map(lambda x,p=rounded: x/p, self.powers))
                 if reduce(lambda a, b: a and b,
                           list(map(lambda x, e=rounded: x%e == 0,
                               list(self.names.values())))):
                     names = self.names/rounded
                 else:
                     names = NumberDict()
                     if f != 1.:
                         names[str(f)] = 1
                     for i in range(len(p)):
                         names[_base_names[i]] = p[i]
                 return PhysicalUnit(names, f, p)
             else:
                 raise TypeError('Illegal exponent')
     raise TypeError('Only integer and inverse integer exponents allowed')
Exemplo n.º 19
0
def parse_ratefile(rfile, binary=False):
    """Return the content of a barriers rates-file.

    Args:
      rfile (str): Filename of a barriers rates file.
      binary (bool, optional): Set to True if the rates file is in binary format.
        Defaults to False.

    Returns:
      [[flt],[flt]]: A rate matrix.
    """
    if binary:
        with open(rfile) as rf:
            dim, = unpack('i', rf.read(4))
            rm = []
            for e in range(dim):
                col = []
                for e in range(dim):
                    r, = unpack('d', rf.read(8))
                    col.append(r)
                rm.append(col)
            RM = list(map(list, list(zip(*rm))))
    else:
        RM = []
        with open(rfile) as rates:
            for line in rates:
                RM.append((list(map(float, line.strip().split()))))

    return RM
Exemplo n.º 20
0
def count(delta, file_path):
    """
    Increments counter file and returns the max number of times the file
    has been modified. Counter data must be in the form:
    concurrent tasks, max concurrent tasks (counter should be initialized to 0,0)

    :param int delta: increment value
    :param str file_path: path to shared counter file
    :return int max concurrent tasks:
    """
    fd = os.open(file_path, os.O_RDWR)
    try:
        fcntl.flock(fd, fcntl.LOCK_EX)
        try:
            s = os.read(fd, 10)
            value, maxValue = list(map(int, s.split(',')))
            value += delta
            if value > maxValue: maxValue = value
            os.lseek(fd, 0, 0)
            os.ftruncate(fd, 0)
            os.write(fd, ','.join(map(str, (value, maxValue))))
        finally:
            fcntl.flock(fd, fcntl.LOCK_UN)
    finally:
        os.close(fd)
    return maxValue
 def 下載指令(self):
     下載xml網址 = r'curl "{}?d={{{}}}&l={{{}}}&c={{{}}}"  --create-dirs -o "資料/九階教材/#1/#2_#3.xml"'.format(
         self.網站網址,
         ','.join(網站方言編號()),
         ','.join(map(str, range(1, 10))),
         ','.join(map(str, range(1, 11))),
     )
     return 下載xml網址
Exemplo n.º 22
0
 def _unicode_to_str(self, data):
     if isinstance(data, basestring):
         return str(data)
     elif isinstance(data, collections.Mapping):
         return dict(list(map(self._unicode_to_str, iter(data.items()))))
     elif isinstance(data, collections.Iterable):
         return type(data)(list(map(self._unicode_to_str, data)))
     else:
         return data
Exemplo n.º 23
0
Arquivo: osx.py Projeto: bhyvex/grr
  def Run(self, unused_args):
    """Enumerate all MAC addresses."""
    libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
    ifa = Ifaddrs()
    p_ifa = ctypes.pointer(ifa)
    libc.getifaddrs(ctypes.pointer(p_ifa))

    addresses = {}
    macs = {}
    ifs = set()

    m = p_ifa
    while m:
      ifname = ctypes.string_at(m.contents.ifa_name)
      ifs.add(ifname)
      try:
        iffamily = ord(m.contents.ifa_addr[1])
        if iffamily == 0x2:  # AF_INET
          data = ctypes.cast(m.contents.ifa_addr, ctypes.POINTER(Sockaddrin))
          ip4 = "".join(map(chr, data.contents.sin_addr))
          address_type = rdf_client.NetworkAddress.Family.INET
          address = rdf_client.NetworkAddress(
              address_type=address_type, packed_bytes=ip4)
          addresses.setdefault(ifname, []).append(address)

        if iffamily == 0x12:  # AF_LINK
          data = ctypes.cast(m.contents.ifa_addr, ctypes.POINTER(Sockaddrdl))
          iflen = data.contents.sdl_nlen
          addlen = data.contents.sdl_alen
          macs[ifname] = "".join(
              map(chr, data.contents.sdl_data[iflen:iflen + addlen]))

        if iffamily == 0x1E:  # AF_INET6
          data = ctypes.cast(m.contents.ifa_addr, ctypes.POINTER(Sockaddrin6))
          ip6 = "".join(map(chr, data.contents.sin6_addr))
          address_type = rdf_client.NetworkAddress.Family.INET6
          address = rdf_client.NetworkAddress(
              address_type=address_type, packed_bytes=ip6)
          addresses.setdefault(ifname, []).append(address)
      except ValueError:
        # Some interfaces don't have a iffamily and will raise a null pointer
        # exception. We still want to send back the name.
        pass

      m = m.contents.ifa_next

    libc.freeifaddrs(p_ifa)

    for interface in ifs:
      mac = macs.setdefault(interface, "")
      address_list = addresses.setdefault(interface, "")
      args = {"ifname": interface}
      if mac:
        args["mac_address"] = mac
      if address_list:
        args["addresses"] = address_list
      self.SendReply(rdf_client.Interface(**args))
Exemplo n.º 24
0
Arquivo: linux.py Projeto: bhyvex/grr
  def Run(self, unused_args):
    """Enumerate all interfaces and collect their MAC addresses."""
    libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
    ifa = Ifaddrs()
    p_ifa = ctypes.pointer(ifa)
    libc.getifaddrs(ctypes.pointer(p_ifa))

    addresses = {}
    macs = {}
    ifs = set()

    m = p_ifa
    while m:
      ifname = ctypes.string_at(m.contents.ifa_name)
      ifs.add(ifname)
      try:
        iffamily = ord(m.contents.ifa_addr[0])
        # TODO(hanuszczak): There are some Python 3-incompatible `chr` usages
        # here, they should be fixed.
        if iffamily == 0x2:  # AF_INET
          data = ctypes.cast(m.contents.ifa_addr, ctypes.POINTER(Sockaddrin))
          ip4 = "".join(map(chr, data.contents.sin_addr))
          address_type = rdf_client.NetworkAddress.Family.INET
          address = rdf_client.NetworkAddress(
              address_type=address_type, packed_bytes=ip4)
          addresses.setdefault(ifname, []).append(address)

        if iffamily == 0x11:  # AF_PACKET
          data = ctypes.cast(m.contents.ifa_addr, ctypes.POINTER(Sockaddrll))
          addlen = data.contents.sll_halen
          macs[ifname] = "".join(map(chr, data.contents.sll_addr[:addlen]))

        if iffamily == 0xA:  # AF_INET6
          data = ctypes.cast(m.contents.ifa_addr, ctypes.POINTER(Sockaddrin6))
          ip6 = "".join(map(chr, data.contents.sin6_addr))
          address_type = rdf_client.NetworkAddress.Family.INET6
          address = rdf_client.NetworkAddress(
              address_type=address_type, packed_bytes=ip6)
          addresses.setdefault(ifname, []).append(address)
      except ValueError:
        # Some interfaces don't have a iffamily and will raise a null pointer
        # exception. We still want to send back the name.
        pass

      m = m.contents.ifa_next

    libc.freeifaddrs(p_ifa)

    for interface in ifs:
      mac = macs.setdefault(interface, "")
      address_list = addresses.setdefault(interface, "")
      args = {"ifname": interface}
      if mac:
        args["mac_address"] = mac
      if addresses:
        args["addresses"] = address_list
      self.SendReply(rdf_client.Interface(**args))
Exemplo n.º 25
0
def corpus(path, encoding="utf-8"):
    """ Yields sentences of (word, tag)-tuples from the given corpus,
        which is a .txt file with a sentence on each line, 
        with slash-encoded tokens (e.g., the/DT cat/NN).
    """
    for s in open(path, encoding=encoding):
        s = list(map(lambda w: w.split("/"), s.strip().split(" ")))
        s = list(map(lambda w: (w[0].replace("&slash;", "/"), w[1]), s))
        yield s
Exemplo n.º 26
0
	def get_all_sample_rates(self):
		'''Get available sample rate combinations for the current performance level and channel combination.'''
		rates = self._cmd('GET_ALL_SAMPLE_RATES')
		self.sample_rates = []
		for line in rates.split('\n'):
			if len(line):
				digital, analog = list(map(int, map(str.strip, line.split(','))))
				self.sample_rates.append((digital, analog))
		return self.sample_rates
Exemplo n.º 27
0
def pblock_044(content):
    stage_number = -1

    cfs = sxml.Coefficients(
        cf_transfer_function_type=pcftype(get1(content, b'05')),
        input_units=sxml.Units(name=punit(get1(content, b'06'))),
        output_units=sxml.Units(name=punit(get1(content, b'07'))),
        numerator_list=list(map(pcfu, getn(content, b'09-10'))),
        denominator_list=list(map(pcfu, getn(content, b'12-13'))))

    return stage_number, cfs
Exemplo n.º 28
0
def map_inplace(function, list, depth=0, slice=None):
  if depth <= 0:
    if slice is None:
      list[:] = map(function, list)
    else:
      list[slice] = map(function,
        itertools.islice(list, slice.start, slice.stop, slice.step))
  else:
    for item in list:
      map_inplace(function, item, depth - 1, slice)
  return list
Exemplo n.º 29
0
	def get_connected_devices(self):
		devices = self._cmd('GET_CONNECTED_DEVICES')
		self.connected_devices = []
		for dev in devices.split('\n')[:-1]:
			active = False
			try:
				index, name, type, id, active = list(map(str.strip, dev.split(',')))
			except ValueError:
				index, name, type, id = list(map(str.strip, dev.split(',')))
			self.connected_devices.append(ConnectedDevice(type, name, id, index, active))
		return self.connected_devices
Exemplo n.º 30
0
  def LookupClients(self, keywords):
    """Returns a list of client URNs associated with keywords.

    Args:
      keywords: The list of keywords to search by.

    Returns:
      A list of client URNs.

    Raises:
      ValueError: A string (single keyword) was passed instead of an iterable.
    """
    if isinstance(keywords, basestring):
      raise ValueError(
          "Keywords should be an iterable, not a string (got %s)." % keywords)

    start_time, end_time, filtered_keywords, unversioned_keywords = (
        self._AnalyzeKeywords(keywords))

    last_seen_map = None
    if unversioned_keywords:
      last_seen_map = {}

    # TODO(user): Make keyword index datetime aware so that
    # AsMicrosecondsSinceEpoch is unnecessary.

    raw_results = self.Lookup(
        list(map(self._NormalizeKeyword, filtered_keywords)),
        start_time=start_time.AsMicrosecondsSinceEpoch(),
        end_time=end_time.AsMicrosecondsSinceEpoch(),
        last_seen_map=last_seen_map)
    if not raw_results:
      return []

    if unversioned_keywords:
      universal_last_seen_raw = {}
      self.ReadPostingLists(
          list(map(self._NormalizeKeyword, raw_results)),
          start_time=start_time.AsMicrosecondsSinceEpoch(),
          end_time=end_time.AsMicrosecondsSinceEpoch(),
          last_seen_map=universal_last_seen_raw)

      universal_last_seen = {}
      for (_, client_id), ts in iteritems(universal_last_seen_raw):
        universal_last_seen[client_id] = ts

      old_results = set()
      for keyword in unversioned_keywords:
        for result in raw_results:
          if last_seen_map[(keyword, result)] < universal_last_seen[result]:
            old_results.add(result)
      raw_results -= old_results

    return [rdf_client.ClientURN(result) for result in raw_results]
Exemplo n.º 31
0
 def __init__(self, iterable):
     self._stream = list(map(Token.make, iterable))
     self.i = 0
Exemplo n.º 32
0
 def setDomainsAndRestrictions(self, lyr, lyrName, domainDict,
                               multiColumnsDict, domLayerDict):
     """
     Adjusts the domain restriction to all attributes in the layer
     :param lyr:
     :param lyrName:
     :param domainDict:
     :param multiColumnsDict:
     :param domLayerDict:
     :return:
     """
     lyrAttributes = [i for i in lyr.fields()]
     pkIdxList = lyr.primaryKeyAttributes()
     for i in range(len(lyrAttributes)):
         attrName = lyrAttributes[i].name()
         if attrName == 'id' or 'id_' in lyrAttributes[i].name(
         ) or i in pkIdxList:
             lyr.editFormConfig().setReadOnly(i, True)
         else:
             if lyrName in domainDict.keys():
                 if attrName in list(domainDict[lyrName]['columns'].keys()):
                     refTable = domainDict[lyrName]['columns'][attrName][
                         'references']
                     refPk = domainDict[lyrName]['columns'][attrName][
                         'refPk']
                     otherKey = domainDict[lyrName]['columns'][attrName][
                         'otherKey']
                     valueDict = domainDict[lyrName]['columns'][attrName][
                         'values']
                     isMulti = self.checkMulti(lyrName, attrName,
                                               multiColumnsDict)
                     if isMulti:
                         #make filter
                         if 'constraintList' in list(
                                 domainDict[lyrName]['columns']
                             [attrName].keys()):
                             #make editDict
                             if lyrName in domLayerDict:
                                 if attrName in domLayerDict[lyrName]:
                                     filter = '{0} in ({1})'.format(
                                         refPk, ','.join(
                                             map(
                                                 str, domainDict[lyrName]
                                                 ['columns'][attrName]
                                                 ['constraintList'])))
                                     allowNull = domainDict[lyrName][
                                         'columns'][attrName]['nullable']
                                     dom = domLayerDict[lyrName][attrName]
                                     editDict = {
                                         'Layer': dom.id(),
                                         'Key': refPk,
                                         'Value': otherKey,
                                         'AllowMulti': True,
                                         'AllowNull': allowNull,
                                         'FilterExpression': filter
                                     }
                                     widgetSetup = QgsEditorWidgetSetup(
                                         'ValueRelation', editDict)
                                     lyr.setEditorWidgetSetup(
                                         i, widgetSetup)
                     else:
                         #filter value dict
                         constraintList = domainDict[lyrName]['columns'][
                             attrName]['constraintList']
                         valueRelationDict = dict()
                         for key in list(valueDict.keys()):
                             if len(constraintList) > 0:
                                 if key in constraintList:
                                     valueRelationDict[
                                         valueDict[key]] = str(key)
                             else:
                                 valueRelationDict[valueDict[key]] = str(
                                     key)
                         widgetSetup = QgsEditorWidgetSetup(
                             'ValueMap', {'map': valueRelationDict})
                         lyr.setEditorWidgetSetup(i, widgetSetup)
     return lyr
Exemplo n.º 33
0
def save_memmap_each(fnames,
                     dview=None,
                     base_name=None,
                     resize_fact=(1, 1, 1),
                     remove_init=0,
                     idx_xy=None,
                     xy_shifts=None,
                     add_to_movie=0,
                     border_to_0=0):
    """
    Create several memory mapped files using parallel processing

    Parameters:
    -----------
    fnames: list of str
        list of path to the filenames

    dview: ipyparallel dview
        used to perform computation in parallel. If none it will be signle thread

    base_name str
        BaseName for the file to be creates. If not given the file itself is used

    resize_fact: tuple
        resampling factors for each dimension x,y,time. .1 = downsample 10X

    remove_init: int
        number of samples to remove from the beginning of each chunk

    idx_xy: slice operator
        used to perform slicing of the movie (to select a subportion of the movie)

    xy_shifts: list
        x and y shifts computed by a motion correction algorithm to be applied before memory mapping

    add_to_movie: float
        if movie too negative will make it positive

    border_to_0: int
        number of pixels on the border to set to the minimum of the movie

    Returns:
    --------
    fnames_tot: list
        paths to the created memory map files

    """
    order = 'C'
    pars = []
    if xy_shifts is None:
        xy_shifts = [None] * len(fnames)

    if type(resize_fact) is not list:
        resize_fact = [resize_fact] * len(fnames)

    for idx, f in enumerate(fnames):
        if base_name is not None:
            pars.append([
                f, base_name + '{:04d}'.format(idx), resize_fact[idx],
                remove_init, idx_xy, order, xy_shifts[idx], add_to_movie,
                border_to_0
            ])
        else:
            pars.append([
                f,
                os.path.splitext(f)[0], resize_fact[idx], remove_init, idx_xy,
                order, xy_shifts[idx], add_to_movie, border_to_0
            ])

    if dview is not None:
        if 'multiprocessing' in str(type(dview)):
            fnames_new = dview.map_async(save_place_holder, pars).get(4294967)
        else:
            fnames_new = dview.map_sync(save_place_holder, pars)
    else:
        fnames_new = list(map(save_place_holder, pars))

    return fnames_new
Exemplo n.º 34
0
def extract_patch_coordinates(dims, rf, stride, border_pix=0):
    """
    Partition the FOV in patches

    and return the indexed in 2D and 1D (flatten, order='F') formats

    Parameters:
    ----------
    dims: tuple of int
        dimensions of the original matrix that will be divided in patches

    rf: tuple of int
        radius of receptive field, corresponds to half the size of the square patch

    stride: tuple of int
        degree of overlap of the patches
    """
    dims_large = dims
    dims = np.array(dims) - border_pix * 2

    coords_flat = []
    shapes = []
    iters = [
        list(range(rf[i], dims[i] - rf[i], 2 * rf[i] - stride[i])) +
        [dims[i] - rf[i]] for i in range(len(dims))
    ]

    coords = np.empty(list(map(len, iters)) + [len(dims)], dtype=np.object)
    for count_0, xx in enumerate(iters[0]):
        coords_x = np.arange(xx - rf[0], xx + rf[0] + 1)
        coords_x = coords_x[(coords_x >= 0) & (coords_x < dims[0])]
        coords_x += border_pix

        for count_1, yy in enumerate(iters[1]):
            coords_y = np.arange(yy - rf[1], yy + rf[1] + 1)
            coords_y = coords_y[(coords_y >= 0) & (coords_y < dims[1])]
            coords_y += border_pix

            if len(dims) == 2:
                idxs = np.meshgrid(coords_x, coords_y)

                coords[count_0, count_1] = idxs
                shapes.append(idxs[0].shape[::-1])

                coords_ = np.ravel_multi_index(idxs, dims_large, order='F')
                coords_flat.append(coords_.flatten())
            else:  # 3D data

                if border_pix > 0:
                    raise Exception(
                        'The parameter border pix must be set to 0 for 3D data since border removal is not implemented'
                    )

                for count_2, zz in enumerate(iters[2]):
                    coords_z = np.arange(zz - rf[2], zz + rf[2] + 1)
                    coords_z = coords_z[(coords_z >= 0) & (coords_z < dims[2])]
                    idxs = np.meshgrid(coords_x, coords_y, coords_z)
                    shps = idxs[0].shape
                    shapes.append([shps[1], shps[0], shps[2]])
                    coords[count_0, count_1, count_2] = idxs
                    coords_ = np.ravel_multi_index(idxs, dims, order='F')
                    coords_flat.append(coords_.flatten())

    for i, c in enumerate(coords_flat):
        assert len(c) == np.prod(shapes[i])

    return map(np.sort, coords_flat), shapes
Exemplo n.º 35
0
num_cols = 0
while True:
    line = sys.stdin.readline().strip('\n').split()
    if line == []:
        break
    if line == ['[']:  # deal with the case that the first row only contains "["
        continue
    if line[0] == '[':  # drop the "[" in the first row
        line = line[1:]
    if line[-1] == ']':  # drop the "]" in the last row
        line = line[:-1]
    if num_cols == 0:
        num_cols = len(line)  # initialize
    if len(line) != num_cols:
        raise Exception("All rows should be of the same length")
    line = list(map(float, line))  # string to float
    if max(line) > 1:
        raise Excetion("Element value in the matrix should be normalized and no larger than 1")
    line = [int(x * 255) for x in line]  # float to integer ranging from 0 to 255
    matrix.append(line)
    num_rows += 1

if args.color == 3:
    if num_cols % 3 != 0:
        raise Exception("Number of columns should be a multiple of 3 in the color mode")
    width = num_rows
    height = old_div(num_cols, 3)
    # reform the image matrix
    image_array = [[0 for i in range(width * 3)] for j in range(height)]
    for i in range(height):
        for j in range(width):
Exemplo n.º 36
0
def parse_args(argv=None):
    prog = autoblue("py") + autoyellow("Load")
    desc = autored(pyload.core.info().description)
    epilog = autogreen(
        "*** Please refer to the included `README.md` for further details ***")

    ap = argparse.ArgumentParser(prog=prog,
                                 description=desc,
                                 epilog=epilog,
                                 add_help=False)
    pg = ap.add_argument_group(autogreen("Optional arguments"))
    sp = ap.add_subparsers(title=autogreen("Commands"),
                           dest='command',
                           help=''.join(autored("Available sub-commands ("),
                                        autoyellow("`COMMAND --help`"),
                                        autored(" for detailed help)")))

    sc = (('start', "Start process instance"), ('quit',
                                                "Terminate process instance"),
          ('restart', "Restart process instance"), ('setup', "Setup package"),
          ('status', "Show process PID"), ('version', "Show package version"),
          ('info', "Show package info"))
    for prog, desc in sc:
        desc = autored(desc)
        prsr = sp.add_parser(prog,
                             description=desc,
                             epilog=epilog,
                             help=desc,
                             add_help=False)
        globals()['sp_' + prog] = prsr

    for prsr in pg, sp_start, sp_stop, sp_restart, sp_status, sp_setup, sp_version:
        prsr.add_argument('-h',
                          '--help',
                          action='help',
                          help=autored("Show this help message and exit"))

    for prsr in pg, sp_start, sp_stop, sp_restart, sp_status, sp_setup:
        profile_help = ''.join(autored("Config profile to use ("),
                               autoyellow("`default`"),
                               autored(" if missing)"))
        configdir_help = autored("Change path of config directory")
        prsr.add_argument('-p', '--profile', help=profile_help)
        prsr.add_argument('-c', '--configdir', help=configdir_help)

    for prsr in pg, sp_start, sp_restart:
        debug_help = ''.join(autored("Enable debug mode ("),
                             autoyellow("`-dd`"),
                             autored(" for extended debug)"))
        # webdebug_help = autored("Enable webserver debugging")
        refresh_help = ''.join(
            autored("Remove compiled files and temp folder ("),
            autoyellow("`-rr`"),
            autored(" to restore default login credentials "),
            autoyellow("`admin|pyload`"), autored(")"))
        webui_help = ''.join(autored("Enable webui interface at entered "),
                             autoyellow("`IP address:Port number`"),
                             autored(" (use defaults if missing)"))
        remote_help = ''.join(
            autored("Enable remote api interface at entered "),
            autoyellow("`IP address:Port number`"),
            autored(" (use defaults if missing)"))
        daemon_help = autored("Run as daemon")
        prsr.add_argument('-d', '--debug', action='count', help=debug_help)
        # prsr.add_argument('-w', '--webdebug', action='count', help=webdebug_help)
        prsr.add_argument('-r',
                          '--refresh',
                          '--restore',
                          action='count',
                          help=refresh_help)
        prsr.add_argument('-u', '--webui', help=webui_help)
        prsr.add_argument('-a', '--rpc', help=remote_help)
        prsr.add_argument('-D',
                          '--daemon',
                          action='store_true',
                          help=daemon_help)

    wait_help = autored("Timeout for graceful exit (in seconds)")
    sp_stop.add_argument('--wait', help=wait_help)

    # NOTE: Workaround to `required subparsers` issue in Python 2
    if not set(map(operator.itemgetter(0), sc)).intersection(argv):
        argv.append('start')

    print(logo + '\n')
    return ap.parse_args(argv)
Exemplo n.º 37
0
def workerScript(jobStore,
                 config,
                 jobName,
                 jobStoreID,
                 redirectOutputToLogFile=True):
    """
    Worker process script, runs a job. 
    
    :param str jobName: The "job name" (a user friendly name) of the job to be run
    :param str jobStoreLocator: Specifies the job store to use
    :param str jobStoreID: The job store ID of the job to be run
    
    :return int: 1 if a job failed, or 0 if all jobs succeeded
    """

    configureRootLogger()
    setLogLevel(config.logLevel)

    ##########################################
    #Create the worker killer, if requested
    ##########################################

    logFileByteReportLimit = config.maxLogFileSize

    if config.badWorker > 0 and random.random() < config.badWorker:
        # We need to kill the process we are currently in, to simulate worker
        # failure. We don't want to just send SIGKILL, because we can't tell
        # that from a legitimate OOM on our CI runner. We're going to send
        # SIGUSR1 so our terminations are distinctive, and then SIGKILL if that
        # didn't stick. We definitely don't want to do this from *within* the
        # process we are trying to kill, so we fork off. TODO: We can still
        # leave the killing code running after the main Toil flow is done, but
        # since it's now in a process instead of a thread, the main Python
        # process won't wait around for its timeout to expire. I think this is
        # better than the old thread-based way where all of Toil would wait
        # around to be killed.

        killTarget = os.getpid()
        sleepTime = config.badWorkerFailInterval * random.random()
        if os.fork() == 0:
            # We are the child
            # Let the parent run some amount of time
            time.sleep(sleepTime)
            # Kill it gently
            os.kill(killTarget, signal.SIGUSR1)
            # Wait for that to stick
            time.sleep(0.01)
            try:
                # Kill it harder. Hope the PID hasn't already been reused.
                # If we succeeded the first time, this will OSError
                os.kill(killTarget, signal.SIGKILL)
            except OSError:
                pass
            # Exit without doing any of Toil's cleanup
            os._exit(0)

        # We don't need to reap the child. Either it kills us, or we finish
        # before it does. Either way, init will have to clean it up for us.

    ##########################################
    #Load the environment for the jobGraph
    ##########################################

    #First load the environment for the jobGraph.
    with jobStore.readSharedFileStream("environment.pickle") as fileHandle:
        environment = safeUnpickleFromStream(fileHandle)
    env_blacklist = {
        "TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE", "HOME", "LOGNAME", "USER",
        "DISPLAY", "JAVA_HOME"
    }
    for i in environment:
        if i == "PATH":
            # Handle path specially. Sometimes e.g. leader may not include
            # /bin, but the Toil appliance needs it.
            if i in os.environ and os.environ[i] != '':
                # Use the provided PATH and then the local system's PATH
                os.environ[i] = environment[i] + ':' + os.environ[i]
            else:
                # Use the provided PATH only
                os.environ[i] = environment[i]
        elif i not in env_blacklist:
            os.environ[i] = environment[i]
    # sys.path is used by __import__ to find modules
    if "PYTHONPATH" in environment:
        for e in environment["PYTHONPATH"].split(':'):
            if e != '':
                sys.path.append(e)

    toilWorkflowDir = Toil.getLocalWorkflowDir(config.workflowID,
                                               config.workDir)

    ##########################################
    #Setup the temporary directories.
    ##########################################

    # Dir to put all this worker's temp files in.
    localWorkerTempDir = tempfile.mkdtemp(dir=toilWorkflowDir)
    os.chmod(localWorkerTempDir, 0o755)

    ##########################################
    #Setup the logging
    ##########################################

    #This is mildly tricky because we don't just want to
    #redirect stdout and stderr for this Python process; we want to redirect it
    #for this process and all children. Consequently, we can't just replace
    #sys.stdout and sys.stderr; we need to mess with the underlying OS-level
    #file descriptors. See <http://stackoverflow.com/a/11632982/402891>

    #When we start, standard input is file descriptor 0, standard output is
    #file descriptor 1, and standard error is file descriptor 2.

    # Do we even want to redirect output? Let the config make us not do it.
    redirectOutputToLogFile = redirectOutputToLogFile and not config.disableWorkerOutputCapture

    #What file do we want to point FDs 1 and 2 to?
    tempWorkerLogPath = os.path.join(localWorkerTempDir, "worker_log.txt")

    if redirectOutputToLogFile:
        # Announce that we are redirecting logging, and where it will now go.
        # This is important if we are trying to manually trace a faulty worker invocation.
        logger.info("Redirecting logging to %s", tempWorkerLogPath)
        sys.stdout.flush()
        sys.stderr.flush()

        # Save the original stdout and stderr (by opening new file descriptors
        # to the same files)
        origStdOut = os.dup(1)
        origStdErr = os.dup(2)

        # Open the file to send stdout/stderr to.
        logFh = os.open(tempWorkerLogPath,
                        os.O_WRONLY | os.O_CREAT | os.O_APPEND)

        # Replace standard output with a descriptor for the log file
        os.dup2(logFh, 1)

        # Replace standard error with a descriptor for the log file
        os.dup2(logFh, 2)

        # Since we only opened the file once, all the descriptors duped from
        # the original will share offset information, and won't clobber each
        # others' writes. See <http://stackoverflow.com/a/5284108/402891>. This
        # shouldn't matter, since O_APPEND seeks to the end of the file before
        # every write, but maybe there's something odd going on...

        # Close the descriptor we used to open the file
        os.close(logFh)

    debugging = logging.getLogger().isEnabledFor(logging.DEBUG)
    ##########################################
    #Worker log file trapped from here on in
    ##########################################

    jobAttemptFailed = False
    statsDict = MagicExpando()
    statsDict.jobs = []
    statsDict.workers.logsToMaster = []
    blockFn = lambda: True
    listOfJobs = [jobName]
    job = None
    try:

        #Put a message at the top of the log, just to make sure it's working.
        logger.info("---TOIL WORKER OUTPUT LOG---")
        sys.stdout.flush()

        logProcessContext(config)

        ##########################################
        #Connect to the deferred function system
        ##########################################
        deferredFunctionManager = DeferredFunctionManager(toilWorkflowDir)

        ##########################################
        #Load the jobGraph
        ##########################################

        jobGraph = jobStore.load(jobStoreID)
        listOfJobs[0] = str(jobGraph)
        logger.debug("Parsed job wrapper")

        ##########################################
        #Cleanup from any earlier invocation of the jobGraph
        ##########################################

        if jobGraph.command == None:
            logger.debug("Wrapper has no user job to run.")
            # Cleanup jobs already finished
            f = lambda jobs: [
                z for z in [[y for y in x if jobStore.exists(y.jobStoreID)]
                            for x in jobs] if len(z) > 0
            ]
            jobGraph.stack = f(jobGraph.stack)
            jobGraph.services = f(jobGraph.services)
            logger.debug(
                "Cleaned up any references to completed successor jobs")

        #This cleans the old log file which may
        #have been left if the job is being retried after a job failure.
        oldLogFile = jobGraph.logJobStoreFileID
        if oldLogFile != None:
            jobGraph.logJobStoreFileID = None
            jobStore.update(jobGraph)  #Update first, before deleting any files
            jobStore.deleteFile(oldLogFile)

        ##########################################
        # If a checkpoint exists, restart from the checkpoint
        ##########################################

        # The job is a checkpoint, and is being restarted after previously completing
        if jobGraph.checkpoint != None:
            logger.debug("Job is a checkpoint")
            # If the checkpoint still has extant jobs in its
            # (flattened) stack and services, its subtree didn't
            # complete properly. We handle the restart of the
            # checkpoint here, removing its previous subtree.
            if len([i for l in jobGraph.stack
                    for i in l]) > 0 or len(jobGraph.services) > 0:
                logger.debug("Checkpoint has failed.")
                # Reduce the retry count
                assert jobGraph.remainingRetryCount >= 0
                jobGraph.remainingRetryCount = max(
                    0, jobGraph.remainingRetryCount - 1)
                jobGraph.restartCheckpoint(jobStore)
            # Otherwise, the job and successors are done, and we can cleanup stuff we couldn't clean
            # because of the job being a checkpoint
            else:
                logger.debug(
                    "The checkpoint jobs seems to have completed okay, removing any checkpoint files to delete."
                )
                #Delete any remnant files
                list(
                    map(
                        jobStore.deleteFile,
                        list(
                            filter(jobStore.fileExists,
                                   jobGraph.checkpointFilesToDelete))))

        ##########################################
        #Setup the stats, if requested
        ##########################################

        if config.stats:
            startClock = getTotalCpuTime()

        startTime = time.time()
        while True:
            ##########################################
            #Run the jobGraph, if there is one
            ##########################################

            if jobGraph.command is not None:
                assert jobGraph.command.startswith("_toil ")
                logger.debug("Got a command to run: %s" % jobGraph.command)
                #Load the job
                job = Job._loadJob(jobGraph.command, jobStore)
                # If it is a checkpoint job, save the command
                if job.checkpoint:
                    jobGraph.checkpoint = jobGraph.command

                # Create a fileStore object for the job
                fileStore = AbstractFileStore.createFileStore(
                    jobStore,
                    jobGraph,
                    localWorkerTempDir,
                    blockFn,
                    caching=not config.disableCaching)
                with job._executor(jobGraph=jobGraph,
                                   stats=statsDict if config.stats else None,
                                   fileStore=fileStore):
                    with deferredFunctionManager.open() as defer:
                        with fileStore.open(job):
                            # Get the next block function to wait on committing this job
                            blockFn = fileStore.waitForCommit

                            job._runner(jobGraph=jobGraph,
                                        jobStore=jobStore,
                                        fileStore=fileStore,
                                        defer=defer)

                            # When the job succeeds, start committing files immediately.
                            fileStore.startCommit(jobState=False)

                # Accumulate messages from this job & any subsequent chained jobs
                statsDict.workers.logsToMaster += fileStore.loggingMessages

            else:
                #The command may be none, in which case
                #the jobGraph is either a shell ready to be deleted or has
                #been scheduled after a failure to cleanup
                logger.debug("No user job to run, so finishing")
                break

            if AbstractFileStore._terminateEvent.isSet():
                raise RuntimeError("The termination flag is set")

            ##########################################
            #Establish if we can run another jobGraph within the worker
            ##########################################
            successorJobGraph = nextChainableJobGraph(jobGraph, jobStore)
            if successorJobGraph is None or config.disableChaining:
                # Can't chain any more jobs.
                # TODO: why don't we commit the last job's file store? Won't
                # its async uploads never necessarily finish?
                # If we do call startCommit here it messes with the job
                # itself and Toil thinks the job needs to run again.
                break

            ##########################################
            #We have a single successor job that is not a checkpoint job.
            #We transplant the successor jobGraph command and stack
            #into the current jobGraph object so that it can be run
            #as if it were a command that were part of the current jobGraph.
            #We can then delete the successor jobGraph in the jobStore, as it is
            #wholly incorporated into the current jobGraph.
            ##########################################

            # add the successor to the list of jobs run
            listOfJobs.append(str(successorJobGraph))

            #Clone the jobGraph and its stack
            jobGraph = copy.deepcopy(jobGraph)

            #Remove the successor jobGraph
            jobGraph.stack.pop()

            #Transplant the command and stack to the current jobGraph
            jobGraph.command = successorJobGraph.command
            jobGraph.stack += successorJobGraph.stack
            # include some attributes for better identification of chained jobs in
            # logging output
            jobGraph.unitName = successorJobGraph.unitName
            jobGraph.jobName = successorJobGraph.jobName
            assert jobGraph.memory >= successorJobGraph.memory
            assert jobGraph.cores >= successorJobGraph.cores

            #Build a fileStore to update the job
            fileStore = AbstractFileStore.createFileStore(
                jobStore,
                jobGraph,
                localWorkerTempDir,
                blockFn,
                caching=not config.disableCaching)

            #Update blockFn
            blockFn = fileStore.waitForCommit

            #Add successorJobGraph to those to be deleted
            fileStore.jobsToDelete.add(successorJobGraph.jobStoreID)

            #This will update the job once the previous job is done
            fileStore.startCommit(jobState=True)

            #Clone the jobGraph and its stack again, so that updates to it do
            #not interfere with this update
            jobGraph = copy.deepcopy(jobGraph)

            logger.debug("Starting the next job")

        ##########################################
        #Finish up the stats
        ##########################################
        if config.stats:
            totalCPUTime, totalMemoryUsage = getTotalCpuTimeAndMemoryUsage()
            statsDict.workers.time = str(time.time() - startTime)
            statsDict.workers.clock = str(totalCPUTime - startClock)
            statsDict.workers.memory = str(totalMemoryUsage)

        # log the worker log path here so that if the file is truncated the path can still be found
        if redirectOutputToLogFile:
            logger.info(
                "Worker log can be found at %s. Set --cleanWorkDir to retain this log",
                localWorkerTempDir)

        logger.info(
            "Finished running the chain of jobs on this node, we ran for a total of %f seconds",
            time.time() - startTime)

    ##########################################
    #Trapping where worker goes wrong
    ##########################################
    except:  #Case that something goes wrong in worker
        traceback.print_exc()
        logger.error("Exiting the worker because of a failed job on host %s",
                     socket.gethostname())
        AbstractFileStore._terminateEvent.set()

    ##########################################
    #Wait for the asynchronous chain of writes/updates to finish
    ##########################################

    blockFn()

    ##########################################
    #All the asynchronous worker/update threads must be finished now,
    #so safe to test if they completed okay
    ##########################################

    if AbstractFileStore._terminateEvent.isSet():
        jobGraph = jobStore.load(jobStoreID)
        jobAttemptFailed = True

    ##########################################
    #Cleanup
    ##########################################

    # Close the worker logging
    # Flush at the Python level
    sys.stdout.flush()
    sys.stderr.flush()
    if redirectOutputToLogFile:
        # Flush at the OS level
        os.fsync(1)
        os.fsync(2)

        # Close redirected stdout and replace with the original standard output.
        os.dup2(origStdOut, 1)

        # Close redirected stderr and replace with the original standard error.
        os.dup2(origStdErr, 2)

        # sys.stdout and sys.stderr don't need to be modified at all. We don't
        # need to call redirectLoggerStreamHandlers since they still log to
        # sys.stderr

        # Close our extra handles to the original standard output and standard
        # error streams, so we don't leak file handles.
        os.close(origStdOut)
        os.close(origStdErr)

    # Now our file handles are in exactly the state they were in before.

    # Copy back the log file to the global dir, if needed.
    # Note that we work with bytes instead of characters so we can seek
    # relative to the end (since Python won't decode Unicode backward, or even
    # interpret seek offsets in characters for us). TODO: We may get invalid or
    # just different Unicode by breaking up a character at the boundary!
    if jobAttemptFailed and redirectOutputToLogFile:
        jobGraph.logJobStoreFileID = jobStore.getEmptyFileStoreID(
            jobGraph.jobStoreID, cleanup=True)
        jobGraph.chainedJobs = listOfJobs
        with jobStore.updateFileStream(jobGraph.logJobStoreFileID) as w:
            with open(tempWorkerLogPath, 'rb') as f:
                if os.path.getsize(
                        tempWorkerLogPath) > logFileByteReportLimit != 0:
                    if logFileByteReportLimit > 0:
                        f.seek(-logFileByteReportLimit,
                               2)  # seek to last tooBig bytes of file
                    elif logFileByteReportLimit < 0:
                        f.seek(logFileByteReportLimit,
                               0)  # seek to first tooBig bytes of file
                # Dump the possibly-invalid-Unicode bytes into the log file
                w.write(f.read())  # TODO load file using a buffer
        jobStore.update(jobGraph)

    elif ((debugging or (config.writeLogsFromAllJobs
                         and not jobName.startswith(CWL_INTERNAL_JOBS)))
          and redirectOutputToLogFile):  # write log messages
        with open(tempWorkerLogPath, 'rb') as logFile:
            if os.path.getsize(
                    tempWorkerLogPath) > logFileByteReportLimit != 0:
                if logFileByteReportLimit > 0:
                    logFile.seek(-logFileByteReportLimit,
                                 2)  # seek to last tooBig bytes of file
                elif logFileByteReportLimit < 0:
                    logFile.seek(logFileByteReportLimit,
                                 0)  # seek to first tooBig bytes of file
            # Make sure lines are Unicode so they can be JSON serialized as part of the dict.
            # We may have damaged the Unicode text by cutting it at an arbitrary byte so we drop bad characters.
            logMessages = [
                line.decode('utf-8', 'skip')
                for line in logFile.read().splitlines()
            ]
        statsDict.logs.names = listOfJobs
        statsDict.logs.messages = logMessages

    if (debugging or config.stats or statsDict.workers.logsToMaster
        ) and not jobAttemptFailed:  # We have stats/logging to report back
        if USING_PYTHON2:
            jobStore.writeStatsAndLogging(
                json.dumps(statsDict, ensure_ascii=True))
        else:
            jobStore.writeStatsAndLogging(
                json.dumps(statsDict, ensure_ascii=True).encode())

    #Remove the temp dir
    cleanUp = config.cleanWorkDir
    if cleanUp == 'always' or (cleanUp == 'onSuccess' and
                               not jobAttemptFailed) or (cleanUp == 'onError'
                                                         and jobAttemptFailed):
        shutil.rmtree(localWorkerTempDir)

    #This must happen after the log file is done with, else there is no place to put the log
    if (not jobAttemptFailed) and jobGraph.command == None and len(
            jobGraph.stack) == 0 and len(jobGraph.services) == 0:
        # We can now safely get rid of the jobGraph
        jobStore.delete(jobGraph.jobStoreID)

    if jobAttemptFailed:
        return 1
    else:
        return 0
Exemplo n.º 38
0
def main(args):
    files = args.cgfiles

    #Uncomment the following line to display the files in a random order.
    #random.shuffle(files)

    #Prepare the pyplot figure
    totalFigures = len(files)
    figuresPerLine = int(math.ceil(math.sqrt(totalFigures)))
    fig, ax = plt.subplots(int(math.ceil(totalFigures / figuresPerLine)),
                           figuresPerLine,
                           squeeze=False,
                           figsize=(8, 8))

    #Background color of figure (not plot)
    if args.style == "WOB":
        fig.patch.set_facecolor('black')

    #Plot one projection per file.
    for i, file_ in enumerate(files):
        #get the subplot axes (Note: axes != axis in matplotlib)
        current_axes = ax[i // figuresPerLine, i % figuresPerLine]

        #Parse the file
        cg = ftmc.CoarseGrainRNA(file_)

        # Random projection direction, if no direction present in the file
        if args.proj_direction:
            direction = list(map(float, args.proj_direction.split(",")))
        elif cg.project_from is not None:
            direction = cg.project_from
        else:
            direction = ftuv.get_random_vector()

        #Generate the projection object
        proj = ftmp.Projection2D(cg,
                                 direction,
                                 rotation=180,
                                 project_virtual_atoms=args.virtual_atoms)

        #Simulate a reduced resolution of the image.
        if args.condense:
            proj.condense(args.condense)

        target_elems = []
        if args.show_distances:

            try:
                num_elems = int(args.show_distances)
            except:
                target_elems = args.show_distances.split(",")
            else:
                if num_elems > len(proj._coords.keys()):
                    raise ValueError(
                        "--show-distances must not be greater {} for the current projection ({}:'{}')"
                        .format(len(proj._coords.keys()), i, file_))
                elems = list(proj._coords.keys())
                random.shuffle(elems)
                while len(target_elems) < num_elems:
                    r = random.random()
                    if r < 0.4:
                        hairpins = [
                            x for x in elems
                            if x[0] == "h" and x not in target_elems
                        ]
                        if hairpins:
                            target_elems.append(hairpins[0])
                            continue
                    if r < 0.6:
                        multiloops = [
                            x for x in elems
                            if x[0] == "m" and x not in target_elems
                        ]
                        if multiloops:
                            target_elems.append(multiloops[0])
                            continue
                    others = [x for x in elems if x not in target_elems]
                    target_elems.append(others[0])
        comb = list(it.combinations(target_elems, 2))
        #print(comb, target_elems)
        if args.label_elements:
            target_elems = list(proj._coords.keys())
        line2dproperties = {}
        if args.style == "BOW":
            line2dproperties["color"] = "black"
        elif args.style == "WOB":
            line2dproperties["color"] = "white"

        #Plot the projection #
        proj.plot(current_axes,
                  margin=15,
                  linewidth=3,
                  add_labels=set(target_elems),
                  line2dproperties=line2dproperties,
                  show_distances=comb,
                  print_distances=args.print_distances)

        #Uncomment to set a substring of the filename as a title
        #current_axes.set_title(file[-15:])

        #Hide the x- and y axis.
        current_axes.get_xaxis().set_visible(False)
        current_axes.get_yaxis().set_visible(False)

        #Print the projection direction and the filename in the plot.
        if args.show_direction or args.p:
            current_axes.text(0.01,
                              0.01,
                              "Projection direction: ({},{},{})".format(
                                  round(direction[0],
                                        3), round(direction[1], 3),
                                  round(direction[2], 3)),
                              transform=current_axes.transAxes)
        if args.show_filename or args.p:
            current_axes.text(
                0.01,
                0.99,
                "File: {}".format(file_),
                transform=current_axes.transAxes,
                verticalalignment='top',
            )

        #Change the backgroundcolor of the plot area.
        if args.style == "WOB":
            current_axes.set_axis_bgcolor('black')

    #Hide additional subplots with no projection on them.
    for i in range(
            len(files),
            int(math.ceil(totalFigures / figuresPerLine)) * figuresPerLine):
        ax[i // figuresPerLine, i % figuresPerLine].axis('off')

    # Reduce the space outside of the plots and between the subplots.
    plt.subplots_adjust(left=0.025,
                        right=0.975,
                        bottom=0.025,
                        top=0.975,
                        wspace=0.05,
                        hspace=0.05)

    if args.out:
        for ofname in args.out:
            if args.out_path:
                ofname = os.path.join(args.out_path, ofname)
            ofname = os.path.expanduser(ofname)
            plt.savefig(ofname, format=ofname[-3:])
    if not args.out or args.show:
        #Show the plot and clear it from the internal memory of matplotlib.
        plt.show()
def blockwise_view(a, blockshape, aslist=False, require_aligned_blocks=True):
    """
    Return a 2N-D view of the given N-D array, rearranged so each ND block (tile)
    of the original array is indexed by its block address using the first N
    indexes of the output array.

    Note: This function is nearly identical to ``skimage.util.view_as_blocks()``, except:
          - "imperfect" block shapes are permitted (via require_aligned_blocks=False)
          - only contiguous arrays are accepted.  (This function will NOT silently copy your array.)
            As a result, the return value is *always* a view of the input.

    Args:
        a: The ND array

        blockshape: The tile shape

        aslist: If True, return all blocks as a list of ND blocks
                instead of a 2D array indexed by ND block coordinate.

        require_aligned_blocks: If True, check to make sure no data is "left over"
                                in each row/column/etc. of the output view.
                                That is, the blockshape must divide evenly into the full array shape.
                                If False, "leftover" items that cannot be made into complete blocks
                                will be discarded from the output view.

    Here's a 2D example (this function also works for ND):

    >>> a = numpy.arange(1,21).reshape(4,5)
    >>> print(a)
    [[ 1  2  3  4  5]
     [ 6  7  8  9 10]
     [11 12 13 14 15]
     [16 17 18 19 20]]

    >>> view = blockwise_view(a, (2,2), require_aligned_blocks=False)
    >>> print(view)
    [[[[ 1  2]
       [ 6  7]]
    <BLANKLINE>
      [[ 3  4]
       [ 8  9]]]
    <BLANKLINE>
    <BLANKLINE>
     [[[11 12]
       [16 17]]
    <BLANKLINE>
      [[13 14]
       [18 19]]]]

    Inspired by the 2D example shown here: http://stackoverflow.com/a/8070716/162094
    """
    assert a.flags[
        "C_CONTIGUOUS"], "This function relies on the memory layout of the array."
    blockshape = tuple(blockshape)
    outershape = tuple(numpy.array(a.shape) // blockshape)
    view_shape = outershape + blockshape

    if require_aligned_blocks:
        assert (numpy.mod(a.shape, blockshape) == 0).all(
        ), "blockshape {} must divide evenly into array shape {}".format(
            blockshape, a.shape)

    # inner strides: strides within each block (same as original array)
    intra_block_strides = a.strides

    # outer strides: strides from one block to another
    inter_block_strides = tuple(a.strides * numpy.array(blockshape))

    # This is where the magic happens.
    # Generate a view with our new strides (outer+inner).
    view = numpy.lib.stride_tricks.as_strided(a,
                                              shape=view_shape,
                                              strides=(inter_block_strides +
                                                       intra_block_strides))

    # Special handling for VigraArrays
    if _vigra_available and isinstance(a, vigra.VigraArray) and hasattr(
            a, "axistags"):
        view_axistags = vigra.AxisTags([vigra.AxisInfo() for _ in blockshape] +
                                       list(a.axistags))
        view = vigra.taggedView(view, view_axistags)

    if aslist:
        return list(map(view.__getitem__, numpy.ndindex(outershape)))
    return view
Exemplo n.º 40
0
    def _GreedyHierByLv(self, fullQtQ, n, offset, depth=0, withRoot=False):
        """Compute the weight distribution of one node of the tree by minimzing
        error locally.

        fullQtQ - the same matrix as QtQ in the Run method
        n - the size of the submatrix that is corresponding
            to current node
        offset - the location of the submatrix in fullQtQ that
                 is corresponding to current node
        depth - the depth of current node in the tree
        withRoot - whether the accurate root count is given

        Returns: error, inv, weights, queries
        error - the variance of query on current node with epsilon=1
        inv - for the query strategy (the actual weighted queries to be asked)
              matrix A, inv is the inverse matrix of A^TA
        weights - the weights of queries to be asked
        queries - the list of queries to be asked (all with weight 1)
        """
        if n == 1:
            return np.linalg.norm(fullQtQ[:, offset], 2)**2, \
                np.array([[1.0]]), \
                np.array([1.0]), [[offset, offset]]

        QtQ = fullQtQ[:, offset:offset + n]
        if (np.min(QtQ, axis=1) == np.max(QtQ, axis=1)).all():
            mat = np.zeros([n, n])
            mat.fill(util.old_div(1.0, n**2))
            return np.linalg.norm(QtQ[:, 0], 2)**2, \
                mat, np.array([1.0]), [[offset, offset + n - 1]]

        if n <= self._branch:
            bound = list(zip(list(range(n)), list(range(1, n + 1))))
        else:
            rem = n % self._branch
            step = util.old_div((n - rem), self._branch)
            swi = (self._branch - rem) * step
            sep = list(range(0, swi, step)) + list(range(swi, n,
                                                         step + 1)) + [n]
            bound = list(zip(sep[:-1], sep[1:]))

        serr, sinv, sdist, sq = list(
            zip(*[
                self._GreedyHierByLv(
                    fullQtQ, c[1] - c[0], offset + c[0], depth=depth + 1)
                for c in bound
            ]))
        invAuList = [c.sum(axis=0) for c in sinv]
        invAu = np.hstack(invAuList)
        k = invAu.sum()
        m1 = sum(
            map(
                lambda rng, v: np.linalg.norm(np.dot(QtQ[:, rng[0]:rng[1]], v),
                                              2)**2, bound, invAuList))
        m = np.linalg.norm(np.dot(QtQ, invAu), 2)**2
        sumerr = sum(serr)

        if withRoot:
            return sumerr, block_diag(*sinv), \
                np.hstack([[0], np.hstack(sdist)]), \
                [[offset, offset + n - 1]] + list(itertools.chain(*sq))

        decay = util.old_div(1.0, (self._branch**(util.old_div(depth, 2.0))))
        err1 = np.array(list(range(self._granu, 0, -1)))**2
        err2 = np.array(list(range(self._granu)))**2 * decay
        toterr = 1.0 / err1 * \
            (sumerr - ((m - m1) * decay + m1) * err2 / (err1 + err2 * k))

        err = toterr.min() * self._granu**2
        perc = 1 - util.old_div(np.argmin(toterr), float(self._granu))
        inv = (util.old_div(1.0, perc))**2 * (
            block_diag(*sinv) - (1 - perc)**2 / (perc**2 + k * (1 - perc)**2) *
            np.dot(invAu.reshape([n, 1]), invAu.reshape([1, n])))
        dist = np.hstack([[1 - perc], perc * np.hstack(sdist)])
        return err, inv, dist, \
            [[offset, offset + n - 1]] + list(itertools.chain(*sq))
Exemplo n.º 41
0
    def __init__(self,
                 store_dir,
                 step,
                 shared,
                 block_size=None,
                 tmp=None,
                 force=False):

        self.store = gf.store.Store(store_dir, 'w')

        storeconf = self.store.config

        if step == 0:
            block_size = (1, 1, storeconf.ndistances)
        else:
            if block_size is None:
                block_size = (1, 1, 1)  # QSeisR does only allow one receiver

        if len(storeconf.ns) == 2:
            block_size = block_size[1:]

        gf.builder.Builder.__init__(self,
                                    storeconf,
                                    step,
                                    block_size=block_size)

        baseconf = self.store.get_extra('qseis2d')

        conf_s = QSeisSConfigFull(**baseconf.qseis_s_config.items())
        conf_r = QSeisRConfigFull(**baseconf.qseis_r_config.items())

        conf_s.earthmodel_1d = storeconf.earthmodel_1d
        if storeconf.earthmodel_receiver_1d is not None:
            conf_r.earthmodel_receiver_1d = \
                storeconf.earthmodel_receiver_1d

        else:
            conf_r.earthmodel_receiver_1d = \
                storeconf.earthmodel_1d.extract(
                    depth_max='moho')
            # depth_max=conf_s.receiver_basement_depth*km)

        deltat = 1.0 / self.gf_config.sample_rate

        if 'time_window_min' not in shared:
            d = self.store.make_timing_params(baseconf.time_region[0],
                                              baseconf.time_region[1],
                                              force=force)

            shared['time_window_min'] = float(
                num.ceil(d['tlenmax'] / self.gf_config.sample_rate) *
                self.gf_config.sample_rate)
            shared['time_reduction'] = d['tmin_vred']

        time_window_min = shared['time_window_min']

        conf_s.nsamples = nextpow2(int(round(time_window_min / deltat)) + 1)
        conf_s.time_window = (conf_s.nsamples - 1) * deltat
        conf_r.time_reduction = shared['time_reduction']

        if step == 0:
            if 'slowness_window' not in shared:
                if conf_s.calc_slowness_window:
                    phases = [
                        storeconf.tabulated_phases[i].phases
                        for i in range(len(storeconf.tabulated_phases))
                    ]

                    all_phases = []
                    map(all_phases.extend, phases)

                    mean_source_depth = num.mean((storeconf.source_depth_min,
                                                  storeconf.source_depth_max))

                    arrivals = conf_s.earthmodel_1d.arrivals(
                        phases=all_phases,
                        distances=num.linspace(conf_s.receiver_min_distance,
                                               conf_s.receiver_max_distance,
                                               100) * cake.m2d,
                        zstart=mean_source_depth)

                    ps = num.array(
                        [arrivals[i].p for i in range(len(arrivals))])

                    slownesses = ps / (cake.r2d * cake.d2m / km)

                    shared['slowness_window'] = (0., 0.,
                                                 1.1 * float(slownesses.max()),
                                                 1.3 * float(slownesses.max()))

                else:
                    shared['slowness_window'] = conf_s.slowness_window

            conf_s.slowness_window = shared['slowness_window']

        self.qseis_s_config = conf_s
        self.qseis_r_config = conf_r
        self.qseis_baseconf = baseconf

        self.tmp = tmp
        if self.tmp is not None:
            util.ensuredir(self.tmp)

        util.ensuredir(baseconf.gf_directory)
Exemplo n.º 42
0
    def test_cpeservice(self):
        cpelist = ['cpe:/a:openbsd:openssh:5.9p1', 'cpe:/o:linux:linux_kernel']
        rep = NmapParser.parse_fromfile(self.flist_os['fullscan']['file'])
        h1 = rep.hosts.pop()
        s = h1.services[0]
        self.assertEqual(s.cpelist[0].cpestring, cpelist[0])
        self.assertEqual(s.cpelist[1].cpestring, cpelist[1])

    def test_os_class_probabilities(self):
        p = NmapParser.parse_fromfile(self.fos_class_probabilities)
        h = p.hosts.pop()
        osc = h.os_class_probabilities().pop()
        self.assertEqual(osc.type, "general purpose")
        self.assertEqual(osc.vendor, "Linux")
        self.assertEqual(osc.osfamily, "Linux")
        self.assertEqual(osc.osgen, "3.X")
        self.assertEqual(osc.accuracy, 100)

        #<osclass type="general purpose" vendor="Linux" osfamily="Linux" osgen="3.X" accuracy="100"><cpe>cpe:/o:linux:linux_kernel:3</cpe></osclass>


if __name__ == '__main__':
    test_suite = [
        'test_fp', 'test_fpv6', 'test_osmatches_new', 'test_osclasses_new',
        'test_fpv5', 'test_osmatches_old', 'test_cpeservice',
        'test_os_class_probabilities'
    ]
    suite = unittest.TestSuite(list(map(TestNmapFP, test_suite)))
    test_result = unittest.TextTestRunner(verbosity=2).run(suite)
Exemplo n.º 43
0
def optimalShrinkage(X, return_covariance=False, method='rie'):
    """This function computes a cleaned, optimal shrinkage, 
       rotationally-invariant estimator (RIE) of the true correlation 
       matrix C underlying the noisy, in-sample estimate 
       E = 1/T X * transpose(X)
       associated to a design matrix X of shape (T, N) (T measurements 
       and N features).

       One approach to getting a cleaned estimator that predates the
       optimal shrinkage, RIE estimator consists in inverting the 
       Marcenko-Pastur equation so as to replace the eigenvalues
       from the spectrum of E by an estimation of the true ones.

       This approach is known to be numerically-unstable, in addition
       to failing to account for the overlap between the sample eigenvectors
       and the true eigenvectors. How to compute such overlaps was first
       explained by Ledoit and Peche (cf. reference below). Their procedure
       was extended by Bun, Bouchaud and Potters, who also correct
       for a systematic downward bias in small eigenvalues.
       
       It is this debiased, optimal shrinkage, rotationally-invariant
       estimator that the function at hand implements.
       
       In addition to above method, this funtion also provides access to:  
       - The finite N regularization of the optimal RIE for small eigenvalues
         as provided in section 8.1 of [3] a.k.a the inverse wishart (IW) regularization.
       - The direct kernel method of O. Ledoit and M. Wolf in their 2017 paper [4]. 
         This is a direct port of their Matlab code.
        
         
       Parameters
       ----------
       X: design matrix, of shape (T, N), where T denotes the number
           of samples (think measurements in a time series), while N
           stands for the number of features (think of stock tickers).
           
       return_covariance: type bool (default: False)
           If set to True, compute the standard deviations of each individual
           feature across observations, clean the underlying matrix
           of pairwise correlations, then re-apply the standard
           deviations and return a cleaned variance-covariance matrix.
       
       method: type string, optional (default="rie")
           - If "rie" : optimal shrinkage in the manner of Bun & al.
            with no regularisation  
           - If "iw" : optimal shrinkage in the manner of Bun & al.
            with the so called Inverse Wishart regularization
           - If 'kernel': Direct kernel method of Ledoit  Wolf.

       Returns
       -------
       E_RIE: type numpy.ndarray, shape (N, N)
           Cleaned estimator of the true correlation matrix C. A sample
           estimator of C is the empirical covariance matrix E 
           estimated from X. E is corrupted by in-sample noise.
           E_RIE is the optimal shrinkage, rotationally-invariant estimator 
           (RIE) of C computed following the procedure of Joel Bun 
           and colleagues (cf. references below).
           
           If return_covariance=True, E_clipped corresponds to a cleaned
           variance-covariance matrix.

       References
       ----------
       1 "Eigenvectors of some large sample covariance matrix ensembles",
         O. Ledoit and S. Peche
         Probability Theory and Related Fields, Vol. 151 (1), pp 233-264
       2 "Rotational invariant estimator for general noisy matrices",
         J. Bun, R. Allez, J.-P. Bouchaud and M. Potters
         arXiv: 1502.06736 [cond-mat.stat-mech]
       3 "Cleaning large Correlation Matrices: tools from Random Matrix Theory",
         J. Bun, J.-P. Bouchaud and M. Potters
         arXiv: 1610.08104 [cond-mat.stat-mech]
       4 "Direct Nonlinear Shrinkage Estimation of Large-Dimensional Covariance Matrices (September 2017)", 
         O. Ledoit and M. Wolf https://ssrn.com/abstract=3047302 or http://dx.doi.org/10.2139/ssrn.3047302
    """
    
    try:
        assert isinstance(return_covariance, bool)
    except AssertionError:
        raise
        sys.exit(1)

    T, N, transpose_flag = checkDesignMatrix(X)
    if transpose_flag:
        X = X.T
        
    if not return_covariance:
        X = StandardScaler(with_mean=False,
                           with_std=True).fit_transform(X)

    ec = EmpiricalCovariance(store_precision=False,
                             assume_centered=True)
    ec.fit(X)
    E = ec.covariance_
    
    if return_covariance:
        inverse_std = 1./np.sqrt(np.diag(E))
        E *= inverse_std
        E *= inverse_std.reshape(-1, 1)

    eigvals, eigvecs = np.linalg.eigh(E)
    eigvecs = eigvecs.T

    q = N / float(T)
    lambda_N = eigvals[0]  # The smallest empirical eigenvalue,
                           # given that the function used to compute
                           # the spectrum of a Hermitian or symmetric
                           # matrix - namely np.linalg.eigh - returns
                           # the eigenvalues in ascending order.
    lambda_hats = None
    
    if method is not 'kernel':
        use_inverse_wishart = (method == 'iw')
        xis = map(lambda x: xiHelper(x, q, E), eigvals)
        Gammas = map(lambda x: gammaHelper(x, q, N, lambda_N, inverse_wishart=use_inverse_wishart), eigvals)
        xi_hats = map(lambda a, b: a * b if b > 1 else a, xis, Gammas)
        lambda_hats = xi_hats
    else:
         lambda_hats = directKernel(q, T, N, eigvals)
        
    E_RIE = np.zeros((N, N), dtype=float)
    for lambda_hat, eigvec in zip(lambda_hats, eigvecs):
        eigvec = eigvec.reshape(-1, 1)
        E_RIE += lambda_hat * eigvec.dot(eigvec.T)
        
    tmp = 1./np.sqrt(np.diag(E_RIE))
    E_RIE *= tmp
    E_RIE *= tmp.reshape(-1, 1)
    
    if return_covariance:
        std = 1./inverse_std
        E_RIE *= std
        E_RIE *= std.reshape(-1, 1)

    return E_RIE
Exemplo n.º 44
0
def test_energy_time_fit():

    # Let's generate our dataset of 4 spectra with a normalization that follows
    # a powerlaw in time

    def generate_one(K):
        # Let's generate some data with y = Powerlaw(x)

        gen_function = Powerlaw()
        gen_function.K = K

        # Generate a dataset using the power law, and a
        # constant 30% error

        x = np.logspace(0, 2, 50)

        xyl_generator = XYLike.from_function("sim_data",
                                             function=gen_function,
                                             x=x,
                                             yerr=0.3 * gen_function(x))

        y = xyl_generator.y
        y_err = xyl_generator.yerr

        # xyl = XYLike("data", x, y, y_err)

        # xyl.plot(x_scale='log', y_scale='log')

        return x, y, y_err

    time_tags = np.array([1.0, 2.0, 5.0, 10.0])

    # This is the power law that defines the normalization as a function of time

    normalizations = 0.23 * time_tags**(-1.2)

    datasets = list(map(generate_one, normalizations))

    # Now set up the fit and fit it

    time = IndependentVariable("time", 1.0, u.s)

    plugins = []

    for i, dataset in enumerate(datasets):
        x, y, y_err = dataset

        xyl = XYLike("data%i" % i, x, y, y_err)

        xyl.tag = (time, time_tags[i])

        assert xyl.tag == (time, time_tags[i], None)

        plugins.append(xyl)

    data = DataList(*plugins)

    spectrum = Powerlaw()
    spectrum.K.bounds = (0.01, 1000.0)

    src = PointSource("test", 0.0, 0.0, spectrum)

    model = Model(src)

    model.add_independent_variable(time)

    time_po = Powerlaw()
    time_po.K.bounds = (0.01, 1000)
    time_po.K.value = 2.0
    time_po.index = -1.5

    model.link(spectrum.K, time, time_po)

    jl = JointLikelihood(model, data)

    jl.set_minimizer("minuit")

    best_fit_parameters, likelihood_values = jl.fit()

    # Make sure we are within 10% of the expected result

    assert np.allclose(best_fit_parameters['value'].values,
                       [0.25496115, -1.2282951, -2.01508341],
                       rtol=0.1)
Exemplo n.º 45
0
    def __init__(self, refstd, scale=1.5, fig=None, rect=111, label='_'):
        """Set up Taylor diagram axes, i.e. single quadrant polar
        plot, using mpl_toolkits.axisartist.floating_axes. refstd is
        the reference standard deviation to be compared to.
        """

        from matplotlib.projections import PolarAxes
        import mpl_toolkits.axisartist.floating_axes as FA
        import mpl_toolkits.axisartist.grid_finder as GF

        self.refstd = refstd  # Reference standard deviation

        tr = PolarAxes.PolarTransform()

        # Correlation labels
        rlocs = NP.concatenate((old_div(NP.arange(10), 10.), [0.95, 0.99]))
        tlocs = NP.arccos(rlocs)  # Conversion to polar angles
        gl1 = GF.FixedLocator(tlocs)  # Positions
        tf1 = GF.DictFormatter(dict(list(zip(tlocs, list(map(str, rlocs))))))

        # Standard deviation axis extent
        self.smin = 0
        self.smax = scale * self.refstd
        ghelper = FA.GridHelperCurveLinear(
            tr,
            extremes=(
                0,
                old_div(NP.pi, 2),  # 1st quadrant
                self.smin,
                self.smax),
            grid_locator1=gl1,
            tick_formatter1=tf1,
        )

        if fig is None:
            fig = PLT.figure()

        ax = FA.FloatingSubplot(fig, rect, grid_helper=ghelper)
        fig.add_subplot(ax)

        # Adjust axes
        ax.axis["top"].set_axis_direction("bottom")  # "Angle axis"
        ax.axis["top"].toggle(ticklabels=True, label=True)
        ax.axis["top"].major_ticklabels.set_axis_direction("top")
        ax.axis["top"].label.set_axis_direction("top")
        ax.axis["top"].label.set_text("Correlation")

        ax.axis["left"].set_axis_direction("bottom")  # "X axis"
        ax.axis["left"].label.set_text("Standard deviation")

        ax.axis["right"].set_axis_direction("top")  # "Y axis"
        ax.axis["right"].toggle(ticklabels=True)
        ax.axis["right"].major_ticklabels.set_axis_direction("left")

        ax.axis["bottom"].set_visible(False)  # Useless

        # Contours along standard deviations
        ax.grid(False)

        self._ax = ax  # Graphical axes
        self.ax = ax.get_aux_axes(tr)  # Polar coordinates

        # Add reference point and stddev contour
        print("Reference std:", self.refstd)
        l, = self.ax.plot(
            [0], self.refstd, 'r*', ls='', ms=14, label=label, zorder=10)
        t = NP.linspace(0, old_div(NP.pi, 2))
        r = NP.zeros_like(t) + self.refstd
        self.ax.plot(t, r, 'k--', label='_')

        # Collect sample points for latter use (e.g. legend)
        self.samplePoints = [l]
Exemplo n.º 46
0
 def getJobIds(self):
     """Returns a list of the IDs of all jobs being monitored."""
     return list(map(opencue.id, self.jobMonitor.getJobProxies()))
Exemplo n.º 47
0
def filter_params(params, props):
    """
    Return list of values from dictionary for list of keys
    """
    return list(map(params.get, props))
Exemplo n.º 48
0
 def topLevelItems(self):
     return list(map(self.topLevelItem, range(self.topLevelItemCount())))
Exemplo n.º 49
0
def get_a_field_val(
    self,
    field_name,
    field_attr,
    needdata,
    row,
    sheet,
    check_file,
    sheet_of_copy_wb,
    merge_tuple_list,
    model_name,
    instance_not_dict,
    search_dict,
    update_dict,
    collection_dict,
    setting,
    sheet_of_copy_wb_para,
    f_name_call=None,
):
    #     print ('**field_name**', field_name)
    skip_this_field = field_attr.get('skip_this_field', False)
    if callable(skip_this_field):
        skip_this_field = skip_this_field(self)
    if skip_this_field:
        return True
    col_index = field_attr.get('col_index')
    func = field_attr.get('func')
    obj = False
    set_val = field_attr.get('set_val')
    if set_val != None:
        val = set_val
    elif col_index != None:  # đọc file exc
        xl_val = read_excel_cho_field(sheet, row, col_index, merge_tuple_list)
        xl_val = empty_string_to_False(xl_val)
        field_attr['excel_val'] = xl_val
        val = empty_string_to_False(xl_val)
        if field_attr.get('partern_empty_val'):
            val = empty_string_to_False(val,
                                        pt=field_attr.get('partern_empty_val'))

    elif field_attr.get('fields'):
        fields_noti_dict = instance_not_dict.setdefault('fields', {})
        obj, val = create_instance(self,
                                   field_attr,
                                   sheet,
                                   row,
                                   merge_tuple_list,
                                   needdata,
                                   fields_noti_dict,
                                   check_file=check_file,
                                   sheet_of_copy_wb=sheet_of_copy_wb,
                                   setting=setting,
                                   sheet_of_copy_wb_para=sheet_of_copy_wb_para,
                                   f_name_call=field_name)
    else:
        val = False
    try:
        field_attr['before_func_val'] = val
    except Exception as e:
        raise UserError(u'%s-%s-%s' % (field_name, field_attr, row))
    # func
    karg = field_attr.get('karg', {})
    if karg == None:
        karg = {}
    func_pre_func = field_attr.get('func_pre_func')
    required_pre = field_attr.get('required_pre')
    #     if required_pre and val == False:
    #         return 'break_loop_fields_because_one_required_field'

    if func_pre_func:
        val = func_pre_func(val, needdata, self)
    if func:
        try:
            val = func(val, needdata, **karg)
        except TypeError:
            try:
                val = func(val, needdata, self, **karg)
            except TypeError:
                try:
                    val = func(val, needdata, self, obj, **karg)
                except TypeError:
                    val = func(val, **karg)
        if isinstance(val, tuple):
            obj, val = val

    func_after_func = field_attr.get('func_after_func')
    if func_after_func:
        val = func_after_func(val, needdata, self, obj)
        if isinstance(val, tuple):
            obj, val = val


#         print ('func read model_name:%s field_name:%s'%(model_name,field_name),'val',val)

    val = replace_val_for_ci(field_attr, val, needdata)
    field_attr['val_goc'] = val

    if val == False:
        default_val = field_attr.get('default_val')
        if default_val != None:
            val = default_val  #default

    if field_attr.get('field_type') == 'float':
        try:
            val = float_round(val, precision_rounding=0.01)
        except:
            raise UserError(u'%s-%s' % (val, type(val)))

    if val != False and field_attr.get('st_is_x2m_field'):
        if isinstance(val, str):
            val = val.split(',')
            val = list(map(lambda i: empty_string_to_False(i.strip()), val))
        else:
            val = [val]
        if False in val:
            raise UserError(u'Không được có phần tử = False')

    if val == False:
        make_sign_use_old_if_False = field_attr.get(
            'make_sign_use_old_if_False', False)
        if make_sign_use_old_if_False:
            needdata['sign_use_old_if_False'] = True
        if needdata['sign_use_old_if_False']:
            val = field_attr.get('old_val', val)
    field_attr['val'] = val

    field_attr['obj'] = obj

    field_attr['old_val'] = val
    key_or_not = field_attr.get('key')
    a_field_code = True

    required = field_attr.get('required', False)
    if check_file:
        required_when_normal = field_attr.get('required', False)
        required = False
        #         if field_attr.get('instance_none_when_check_file') and key_or_not :
        if val == None and key_or_not:
            one_field_equal_none = collection_dict.setdefault(
                'one_field_equal_none', [])
            one_field_equal_none.append(field_name)
        else:
            if required_when_normal and val == False and required == False:
                collection_dict[
                    'instance_is_None_in_check_file_mode_becaused_a_required_field_in_imported_mode'] = True
                break_field = collection_dict.setdefault('break_field', [])
                break_field.append(field_name)
    if required and (
            val == False):  # val ==False <==> val ==0, val ==0 <==> val =False
        a_field_code = 'break_loop_fields_because_one_required_field'
        break_field = collection_dict.setdefault('break_field', [])
        break_field.append(field_name)
        return a_field_code  #sua 5

    if not field_attr.get('for_excel_readonly'):
        if '2many' in field_attr.get(
                'field_type', ''
        ) and val == False:  # khong add field 2many neu field do bang False
            return a_field_code
        if key_or_not == True:
            search_dict[field_name] = val
        elif key_or_not == 'Both':
            search_dict[field_name] = val
            update_dict[field_name] = val
        else:
            update_dict[field_name] = val
    valid_field_func = field_attr.get('valid_field_func')
    if valid_field_func:
        kargs_valid_field_func = field_attr.get('kargs_valid_field_func', {})
        notice_demo = {'field_name': field_name, 'f_name_call': f_name_call}
        needdata['notice_demo'] = notice_demo
        if kargs_valid_field_func == None:
            kargs_valid_field_func = {}
        kargs_valid_field_func
        print('f_name_call', f_name_call, '***kargs_valid_field_func',
              kargs_valid_field_func, 'field_attr', field_attr)
        valid_field_func(val, obj, needdata, self, **kargs_valid_field_func)

    print("row:", row, 'f_name_call', f_name_call, 'model_name: ', model_name,
          '-field: ', field_name, '-val: ', val)
    #     check_trust_type_of_value_after_all(field_attr, val, field_name, model_name)
    return a_field_code
 def _active_note_regions(self):
     all_active_regions = list(
         map(lambda e: e.active_note_regions, self._editors))
     return list(set(chain.from_iterable(all_active_regions)))
Exemplo n.º 51
0
def apply_to_patch(mmap_file, shape, dview, rf, stride, function, *args,
                   **kwargs):
    """
    apply function to patches in parallel or not

    Parameters:
    ----------
    file_name: string
        full path to an npy file (2D, pixels x time) containing the movie

    shape: tuple of three elements
        dimensions of the original movie across y, x, and time


    rf: int
        half-size of the square patch in pixel

    stride: int
        amount of overlap between patches


    dview: ipyparallel view on client
        if None

    Returns:
    -------
    results

    Raise:
    -----
    Exception('Something went wrong')

    """
    (_, d1, d2) = shape

    if not np.isscalar(rf):
        rf1, rf2 = rf
    else:
        rf1 = rf
        rf2 = rf

    if not np.isscalar(stride):
        stride1, stride2 = stride
    else:
        stride1 = stride
        stride2 = stride

    idx_flat, idx_2d = extract_patch_coordinates(d1,
                                                 d2,
                                                 rf=(rf1, rf2),
                                                 stride=(stride1, stride2))

    shape_grid = tuple(
        np.ceil((d1 * 1. / (rf1 * 2 - stride1),
                 d2 * 1. / (rf2 * 2 - stride2))).astype(np.int))
    if d1 <= rf1 * 2:
        shape_grid = (1, shape_grid[1])
    if d2 <= rf2 * 2:
        shape_grid = (shape_grid[0], 1)

    print(shape_grid)

    args_in = []

    for id_f, id_2d in zip(idx_flat[:], idx_2d[:]):

        args_in.append(
            (mmap_file.filename, id_f, id_2d, function, args, kwargs))
    print((len(idx_flat)))
    if dview is not None:
        try:
            file_res = dview.map_sync(function_place_holder, args_in)
            dview.results.clear()

        except:
            raise Exception('Something went wrong')
        finally:
            print('You may think that it went well but reality is harsh')
    else:

        file_res = list(map(function_place_holder, args_in))
    return file_res, idx_flat, shape_grid
Exemplo n.º 52
0
    def find_search_results(self, series, episodes, search_mode, forced_search=False, download_current_quality=False,
                            manual_search=False, manual_search_type='episode'):
        """Search episodes based on param."""
        self._check_auth()
        self.series = series

        results = {}
        items_list = []
        season_search = (len(episodes) > 1 or manual_search_type == 'season') and search_mode == 'sponly'

        for episode in episodes:
            if not manual_search:
                cache_results = self.cache.find_needed_episodes(
                    episode, forced_search=forced_search, down_cur_quality=download_current_quality
                )
                if cache_results:
                    for episode_no in cache_results:
                        if episode_no not in results:
                            results[episode_no] = cache_results[episode_no]
                        else:
                            results[episode_no] += cache_results[episode_no]
                    continue

            search_strings = []
            if season_search:
                search_strings = self._get_season_search_strings(episode)
            elif search_mode == 'eponly':
                search_strings = self._get_episode_search_strings(episode)

            for search_string in search_strings:
                # Find results from the provider
                items_list += self.search(
                    search_string, ep_obj=episode, manual_search=manual_search
                )

            # In season search, we can't loop in episodes lists as we
            # only need one episode to get the season string
            if search_mode == 'sponly':
                break

        # Remove duplicate items
        unique_items = self.remove_duplicate_mappings(items_list)
        log.debug('Found {0} unique items', len(unique_items))

        # categorize the items into lists by quality
        categorized_items = defaultdict(list)
        for item in unique_items:
            quality = self.get_quality(item, anime=series.is_anime)
            categorized_items[quality].append(item)

        # sort qualities in descending order
        sorted_qualities = sorted(categorized_items, reverse=True)
        log.debug('Found qualities: {0}', sorted_qualities)

        # chain items sorted by quality
        sorted_items = chain.from_iterable(
            categorized_items[quality]
            for quality in sorted_qualities
        )

        # unpack all of the quality lists into a single sorted list
        items_list = list(sorted_items)

        # Move through each item and parse it into a quality
        search_results = []
        for item in items_list:

            # Make sure we start with a TorrentSearchResult, NZBDataSearchResult or NZBSearchResult search result obj.
            search_result = self.get_result()
            search_results.append(search_result)
            search_result.item = item
            search_result.download_current_quality = download_current_quality
            # FIXME: Should be changed to search_result.search_type
            search_result.forced_search = forced_search

            (search_result.name, search_result.url) = self._get_title_and_url(item)
            (search_result.seeders, search_result.leechers) = self._get_result_info(item)

            search_result.size = self._get_size(item)
            search_result.pubdate = self._get_pubdate(item)

            search_result.result_wanted = True

            try:
                search_result.parsed_result = NameParser(parse_method=('normal', 'anime')[series.is_anime]
                                                         ).parse(search_result.name)
            except (InvalidNameException, InvalidShowException) as error:
                log.debug('Error during parsing of release name: {release_name}, with error: {error}',
                          {'release_name': search_result.name, 'error': error})
                search_result.add_cache_entry = False
                search_result.result_wanted = False
                continue

            # I don't know why i'm doing this. Maybe remove it later on all together, now i've added the parsed_result
            # to the search_result.
            search_result.series = search_result.parsed_result.series
            search_result.quality = search_result.parsed_result.quality
            search_result.release_group = search_result.parsed_result.release_group
            search_result.version = search_result.parsed_result.version
            search_result.actual_season = search_result.parsed_result.season_number
            search_result.actual_episodes = search_result.parsed_result.episode_numbers

            if not manual_search:
                if not (search_result.series.air_by_date or search_result.series.sports):
                    if search_mode == 'sponly':
                        if search_result.parsed_result.episode_numbers:
                            log.debug(
                                'This is supposed to be a season pack search but the result {0} is not a valid '
                                'season pack, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue
                        elif not [ep for ep in episodes if
                                  search_result.parsed_result.season_number == (ep.season, ep.scene_season)
                                  [ep.series.is_scene]]:
                            log.debug(
                                'This season result {0} is for a season we are not searching for, '
                                'skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue
                    else:
                        # I'm going to split these up for better readability
                        # Check if at least got a season parsed.
                        if search_result.parsed_result.season_number is None:
                            log.debug(
                                "The result {0} doesn't seem to have a valid season that we are currently trying to "
                                'snatch, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                        # Check if we at least got some episode numbers parsed.
                        if not search_result.parsed_result.episode_numbers:
                            log.debug(
                                "The result {0} doesn't seem to match an episode that we are currently trying to "
                                'snatch, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                        # Compare the episodes and season from the result with what was searched.
                        if not [searched_episode for searched_episode in episodes
                                if searched_episode.season == search_result.parsed_result.season_number and
                                (searched_episode.episode, searched_episode.scene_episode)
                                [searched_episode.series.is_scene] in
                                search_result.parsed_result.episode_numbers]:
                            log.debug(
                                "The result {0} doesn't seem to match an episode that we are currently trying to "
                                'snatch, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                    # We've performed some checks to decided if we want to continue with this result.
                    # If we've hit this, that means this is not an air_by_date and not a sports show. And it seems to be
                    # a valid result. Let's store the parsed season and episode number and continue.
                    search_result.actual_season = search_result.parsed_result.season_number
                    search_result.actual_episodes = search_result.parsed_result.episode_numbers
                else:
                    # air_by_date or sportshow.
                    search_result.same_day_special = False

                    if not search_result.parsed_result.is_air_by_date:
                        log.debug(
                            "This is supposed to be a date search but the result {0} didn't parse as one, "
                            'skipping it', search_result.name
                        )
                        search_result.result_wanted = False
                        continue
                    else:
                        # Use a query against the tv_episodes table, to match the parsed air_date against.
                        air_date = search_result.parsed_result.air_date.toordinal()
                        db = DBConnection()
                        sql_results = db.select(
                            'SELECT season, episode FROM tv_episodes WHERE indexer = ? AND showid = ? AND airdate = ?',
                            [search_result.series.indexer, search_result.series.series_id, air_date]
                        )

                        if len(sql_results) == 2:
                            if int(sql_results[0]['season']) == 0 and int(sql_results[1]['season']) != 0:
                                search_result.actual_season = int(sql_results[1]['season'])
                                search_result.actual_episodes = [int(sql_results[1]['episode'])]
                                search_result.same_day_special = True
                            elif int(sql_results[1]['season']) == 0 and int(sql_results[0]['season']) != 0:
                                search_result.actual_season = int(sql_results[0]['season'])
                                search_result.actual_episodes = [int(sql_results[0]['episode'])]
                                search_result.same_day_special = True
                        elif len(sql_results) != 1:
                            log.warning(
                                "Tried to look up the date for the episode {0} but the database didn't return proper "
                                'results, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                        # @TODO: Need to verify and test this.
                        if search_result.result_wanted and not search_result.same_day_special:
                            search_result.actual_season = int(sql_results[0]['season'])
                            search_result.actual_episodes = [int(sql_results[0]['episode'])]

        cl = []
        # Iterate again over the search results, and see if there is anything we want.
        for search_result in search_results:

            # Try to cache the item if we want to.
            cache_result = search_result.add_result_to_cache(self.cache)
            if cache_result is not None:
                cl.append(cache_result)

            if not search_result.result_wanted:
                log.debug("We aren't interested in this result: {0} with url: {1}",
                          search_result.name, search_result.url)
                continue

            log.debug('Found result {0} at {1}', search_result.name, search_result.url)

            search_result.create_episode_object()
            # result = self.get_result(episode_object, search_result)
            search_result.finish_search_result(self)

            if not search_result.actual_episodes:
                episode_number = SEASON_RESULT
                log.debug('Found season pack result {0} at {1}', search_result.name, search_result.url)
            elif len(search_result.actual_episodes) == 1:
                episode_number = search_result.actual_episode
                log.debug('Found single episode result {0} at {1}', search_result.name, search_result.url)
            else:
                episode_number = MULTI_EP_RESULT
                log.debug('Found multi-episode ({0}) result {1} at {2}',
                          ', '.join(map(str, search_result.parsed_result.episode_numbers)),
                          search_result.name,
                          search_result.url)
            if episode_number not in results:
                results[episode_number] = [search_result]
            else:
                results[episode_number].append(search_result)

        if cl:
            # Access to a protected member of a client class
            db = self.cache._get_db()
            db.mass_action(cl)

        return results
Exemplo n.º 53
0
                        order='C')

    step = np.int(old_div(d, n_chunks))
    pars = []
    for ref in range(0, d - step + 1, step):
        pars.append([fname_tot, d, tot_frames, mmap_fnames, ref, ref + step])
    # last batch should include the leftover pixels
    pars[-1][-1] = d

    if dview is not None:
        if 'multiprocessing' in str(type(dview)):
            dview.map_async(save_portion, pars).get(4294967)
        else:
            dview.map_sync(save_portion, pars)
    else:
        list(map(save_portion, pars))

    np.savez(base_name + '.npz', mmap_fnames=mmap_fnames, fname_tot=fname_tot)

    print('Deleting big mov')
    del big_mov

    return fname_tot


def save_portion(pars):
    # todo: todocument

    big_mov, d, tot_frames, fnames, idx_start, idx_end = pars
    big_mov = np.memmap(big_mov,
                        mode='r+',
def get_coco_score(pred_list, verbose, extra_vars, split):
    """
    COCO challenge metrics
    :param pred_list: dictionary of hypothesis sentences (id, sentence)
    :param verbose: if greater than 0 the metric measures are printed out
    :param extra_vars: extra variables, here are:
            extra_vars['references'] - dict mapping sample indices to list with all valid captions (id, [sentences])
            extra_vars['tokenize_f'] - tokenization function used during model training (used again for validation)
            extra_vars['detokenize_f'] - detokenization function used during model training (used again for validation)
            extra_vars['tokenize_hypotheses'] - Whether tokenize or not the hypotheses during evaluation
    :param split: split on which we are evaluating
    :return: Dictionary with the coco scores
    """
    from pycocoevalcap.bleu.bleu import Bleu
    from pycocoevalcap.meteor.meteor import Meteor
    from pycocoevalcap.meteor import accepted_langs
    from pycocoevalcap.cider.cider import Cider
    from pycocoevalcap.rouge.rouge import Rouge
    from pycocoevalcap.ter.ter import Ter

    gts = extra_vars[split]['references']
    if extra_vars.get('tokenize_hypotheses', False):
        hypo = {
            idx: list(map(extra_vars['tokenize_f'], [lines.strip()]))
            for (idx, lines) in list(enumerate(pred_list))
        }
    else:
        hypo = {
            idx: [lines.strip()]
            for (idx, lines) in list(enumerate(pred_list))
        }

    # Tokenize refereces if needed
    if extra_vars.get('tokenize_references', False):
        refs = {
            idx: list(map(extra_vars['tokenize_f'], gts[idx]))
            for idx in list(gts)
        }
    else:
        refs = gts

    # Detokenize references if needed.
    if extra_vars.get('apply_detokenization', False):
        refs = {
            idx: list(map(extra_vars['detokenize_f'], refs[idx]))
            for idx in refs
        }

    scorers = [(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
               (Ter(), "TER"), (Rouge(), "ROUGE_L"), (Cider(), "CIDEr")]
    if extra_vars.get('language', 'en') in accepted_langs:
        scorers.append((Meteor(language=extra_vars['language']), "METEOR"))

    final_scores = {}
    for scorer, method in scorers:
        score, _ = scorer.compute_score(refs, hypo)
        if type(score) == list:
            for m, s in list(zip(method, score)):
                final_scores[m] = s
        else:
            final_scores[method] = score

    if verbose > 0:
        logging.info('Computing coco scores on the %s split...' % split)
    for metric in sorted(final_scores):
        value = final_scores[metric]
        logging.info(metric + ': ' + str(value))

    return final_scores
Exemplo n.º 55
0
def _kwargs_to_qs(**kwargs):
    """Converts kwargs given to GPF to a querystring.

    :returns: the querystring.
    """
    # start with defaults
    inpOptDef = inputs_options_defaults()
    opts = {name: dct['value'] for name, dct in inpOptDef.items()}

    # clean up keys and values
    for k, v in kwargs.items():
        # pID, playerID => player_id
        if k.lower() in ('pid', 'playerid'):
            del kwargs[k]
            kwargs['player_id'] = v
        # player_id can accept rel URLs
        if k == 'player_id':
            if v.startswith('/players/'):
                kwargs[k] = utils.rel_url_to_id(v)
        # bool => 'Y'|'N'
        if isinstance(v, bool):
            kwargs[k] = 'Y' if v else 'N'
        # tm, team => team_id
        if k.lower() in ('tm', 'team'):
            del kwargs[k]
            kwargs['team_id'] = v
        # yr_min, yr_max => year_min, year_max
        if k.lower() in ('yr_min', 'yr_max'):
            del kwargs[k]
            if k.lower() == 'yr_min':
                kwargs['year_min'] = int(v)
            else:
                kwargs['year_max'] = int(v)
        # wk_min, wk_max => week_num_min, week_num_max
        if k.lower() in ('wk_min', 'wk_max'):
            del kwargs[k]
            if k.lower() == 'wk_min':
                kwargs['week_num_min'] = int(v)
            else:
                kwargs['week_num_max'] = int(v)
        # yr, year, yrs, years => year_min, year_max
        if k.lower() in ('yr', 'year', 'yrs', 'years'):
            del kwargs[k]
            if isinstance(v, collections.Iterable):
                lst = list(v)
                kwargs['year_min'] = min(lst)
                kwargs['year_max'] = max(lst)
            elif isinstance(v, basestring):
                v = list(map(int, v.split(',')))
                kwargs['year_min'] = min(v)
                kwargs['year_max'] = max(v)
            else:
                kwargs['year_min'] = v
                kwargs['year_max'] = v
        # wk, week, wks, weeks => week_num_min, week_num_max
        if k.lower() in ('wk', 'week', 'wks', 'weeks'):
            del kwargs[k]
            if isinstance(v, collections.Iterable):
                lst = list(v)
                kwargs['week_num_min'] = min(lst)
                kwargs['week_num_max'] = max(lst)
            elif isinstance(v, basestring):
                v = list(map(int, v.split(',')))
                kwargs['week_num_min'] = min(v)
                kwargs['week_num_max'] = max(v)
            else:
                kwargs['week_num_min'] = v
                kwargs['week_num_max'] = v
        # if playoff_round defined, then turn on playoff flag
        if k == 'playoff_round':
            kwargs['game_type'] = 'P'
        if isinstance(v, basestring):
            v = v.split(',')
        if not isinstance(v, collections.Iterable):
            v = [v]

    # reset values to blank for defined kwargs
    for k in kwargs:
        if k in opts:
            opts[k] = []

    # update based on kwargs
    for k, v in kwargs.items():
        # if overwriting a default, overwrite it
        if k in opts:
            # if multiple values separated by commas, split em
            if isinstance(v, basestring):
                v = v.split(',')
            elif not isinstance(v, collections.Iterable):
                v = [v]
            for val in v:
                opts[k].append(val)

    opts['request'] = [1]

    qs = '&'.join('{}={}'.format(name, val)
                  for name, vals in sorted(opts.items()) for val in vals)

    return qs
Exemplo n.º 56
0
 def parse_qstat_record(record):
     name, state_code = list(map(str.strip, record.split('|')))
     return name, Pbspro._job_states[state_code]
Exemplo n.º 57
0
def init(rate=44100, stereo=True, buffer=128):
    """setup the pyo (sound) server
    """
    global pyoSndServer, Sound, audioDriver, duplex, maxChnls
    Sound = SoundPyo
    global pyo
    try:
        assert pyo
    except NameError:  # pragma: no cover
        import pyo
        # can be needed for microphone.switchOn(), which calls init even
        # if audioLib is something else

    # subclass the pyo.Server so that we can insert a __del__ function that
    # shuts it down skip coverage since the class is never used if we have
    # a recent version of pyo

    class _Server(pyo.Server):  # pragma: no cover
        # make libs class variables so they don't get deleted first
        core = core
        logging = logging

        def __del__(self):
            self.stop()
            # make sure enough time passes for the server to shutdown
            self.core.wait(0.5)
            self.shutdown()
            # make sure enough time passes for the server to shutdown
            self.core.wait(0.5)
            # this may never get printed
            self.logging.debug('pyo sound server shutdown')

    if '.'.join(map(str, pyo.getVersion())) < '0.6.4':
        Server = _Server
    else:
        Server = pyo.Server

    # if we already have a server, just re-initialize it
    if 'pyoSndServer' in globals() and hasattr(pyoSndServer, 'shutdown'):
        pyoSndServer.stop()
        # make sure enough time passes for the server to shutdown
        core.wait(0.5)
        pyoSndServer.shutdown()
        core.wait(0.5)
        pyoSndServer.reinit(sr=rate,
                            nchnls=maxChnls,
                            buffersize=buffer,
                            audio=audioDriver)
        pyoSndServer.boot()
    else:
        if sys.platform == 'win32':
            # check for output device/driver
            devNames, devIDs = get_output_devices()
            audioDriver, outputID = _bestDriver(devNames, devIDs)
            if outputID is None:
                # using the default output because we didn't find the one(s)
                # requested
                audioDriver = 'Windows Default Output'
                outputID = pyo.pa_get_default_output()
            if outputID is not None:
                logging.info(u'Using sound driver: %s (ID=%i)' %
                             (audioDriver, outputID))
                maxOutputChnls = pyo.pa_get_output_max_channels(outputID)
            else:
                logging.warning(
                    'No audio outputs found (no speakers connected?')
                return -1
            # check for valid input (mic)
            # If no input device is available, devNames and devIDs are empty
            # lists.
            devNames, devIDs = get_input_devices()
            audioInputName, inputID = _bestDriver(devNames, devIDs)
            # Input devices were found, but requested devices were not found
            if len(devIDs) > 0 and inputID is None:
                defaultID = pyo.pa_get_default_input()
                if defaultID is not None and defaultID != -1:
                    # default input is found
                    # use the default input because we didn't find the one(s)
                    # requested
                    audioInputName = 'Windows Default Input'
                    inputID = defaultID
                else:
                    # default input is not available
                    inputID = None
            if inputID is not None:
                msg = u'Using sound-input driver: %s (ID=%i)'
                logging.info(msg % (audioInputName, inputID))
                maxInputChnls = pyo.pa_get_input_max_channels(inputID)
                duplex = bool(maxInputChnls > 0)
            else:
                maxInputChnls = 0
                duplex = False
        # for other platforms set duplex to True (if microphone is available)
        else:
            audioDriver = prefs.hardware['audioDriver'][0]
            maxInputChnls = pyo.pa_get_input_max_channels(
                pyo.pa_get_default_input())
            maxOutputChnls = pyo.pa_get_output_max_channels(
                pyo.pa_get_default_output())
            duplex = bool(maxInputChnls > 0)

        maxChnls = min(maxInputChnls, maxOutputChnls)
        if maxInputChnls < 1:  # pragma: no cover
            msg = (u'%s.init could not find microphone hardware; '
                   u'recording not available')
            logging.warning(msg % __name__)
            maxChnls = maxOutputChnls
        if maxOutputChnls < 1:  # pragma: no cover
            msg = (u'%s.init could not find speaker hardware; '
                   u'sound not available')
            logging.error(msg % __name__)
            return -1

        # create the instance of the server:
        if sys.platform == 'darwin' or sys.platform.startswith('linux'):
            # for mac/linux we set the backend using the server audio param
            pyoSndServer = Server(sr=rate,
                                  nchnls=maxChnls,
                                  buffersize=buffer,
                                  audio=audioDriver)
        else:
            # with others we just use portaudio and then set the OutputDevice
            # below
            pyoSndServer = Server(sr=rate, nchnls=maxChnls, buffersize=buffer)

        pyoSndServer.setVerbosity(1)
        if sys.platform == 'win32':
            pyoSndServer.setOutputDevice(outputID)
            if inputID is not None:
                pyoSndServer.setInputDevice(inputID)
        # do other config here as needed (setDuplex? setOutputDevice?)
        pyoSndServer.setDuplex(duplex)
        pyoSndServer.boot()
    core.wait(0.5)  # wait for server to boot before starting the sound stream
    pyoSndServer.start()

    # atexit is filo, will call stop then shutdown upon closing
    atexit.register(pyoSndServer.shutdown)
    atexit.register(pyoSndServer.stop)
    try:
        Sound()  # test creation, no play
    except pyo.PyoServerStateException:
        msg = "Failed to start pyo sound Server"
        if sys.platform == 'darwin' and audioDriver != 'portaudio':
            msg += "; maybe try prefs.general.audioDriver 'portaudio'?"
        logging.error(msg)
        core.quit()
    logging.debug('pyo sound server started')
    logging.flush()
 def is_touched(self):
     return any(
         map(
             lambda e: e and e.is_touched,
             filter(lambda e: self._can_notify_is_touched(e),
                    self.encoders)))
Exemplo n.º 59
0
 def _versionTuple(v):
     # for proper sorting: _versionTuple('10.8') < _versionTuple('10.10')
     return tuple(map(int, v.split('.')))
Exemplo n.º 60
0
def compound_electroneg(verbose=False, elements=None, stoichs=None,
                                                source='Mulliken'):

    """Estimate electronegativity of compound from elemental data.

    Uses Mulliken electronegativity by default, which uses elemental
    ionisation potentials and electron affinities. Alternatively, can
    use Pauling electronegativity, re-scaled by factor 2.86 to achieve
    same scale as Mulliken method (Nethercot, 1974)
    DOI:10.1103/PhysRevLett.33.1088 .

    Geometric mean is used (n-th root of product of components), e.g.:

    X_Cu2S = (X_Cu * X_Cu * C_S)^(1/3)

    Args:
        elements (list) : Elements given as standard elemental symbols.
        stoichs (list) : Stoichiometries, given as integers or floats.
        verbose (bool) : An optional True/False flag. If True, additional information
            is printed to the standard output. [Default: False]
        source: String 'Mulliken' or 'Pauling'; type of Electronegativity to
            use. Note that in SMACT, Pauling electronegativities are
            rescaled to a Mulliken-like scale.

    Returns:
        Electronegativity (float) : Estimated electronegativity (no units).

    """
    if type(elements[0]) == str:
        elementlist = [smact.Element(i) for i in elements]
    elif type(elements[0]) == smact.Element:
        elementlist = elements
    else:
        raise Exception("Please supply a list of element symbols or SMACT Element objects")

    stoichslist = stoichs
    # Convert stoichslist from string to float
    stoichslist = list(map(float, stoichslist))

    # Get electronegativity values for each element

    if source == 'Mulliken':
        elementlist = [old_div((el.ionpot+el.e_affinity),2.0)
                       for el in elementlist]

    elif source == 'Pauling':
        elementlist = [(2.86 * el.pauling_eneg)
                       for el in elementlist]
    else:
        raise Exception("Electronegativity type '{0}'".format(source),
                        "is not recognised")

    # Print optional list of element electronegativities.
    # This may be a useful sanity check in case of a suspicious result.
    if verbose:
        print("Electronegativities of elements=", elementlist)

    # Raise each electronegativity to its appropriate power
    # to account for stoichiometry.
    for i in range(0, len(elementlist)):
        elementlist[i] = [elementlist[i]**stoichslist[i]]

    # Calculate geometric mean (n-th root of product)
    prod = product(elementlist)
    compelectroneg = (prod)**(old_div(1.0,(sum(stoichslist))))

    if verbose:
        print("Geometric mean = Compound 'electronegativity'=", compelectroneg)

    return compelectroneg