Example #1
0
    def _analyze_result(self, mutant, mutant_response):
        """
        Analyze results of the _send_mutant method.

        In this case, check if the file was uploaded to any of the known
        directories, or one of the "default" ones like "upload" or "files".
        """
        if self._has_bug(mutant):
            return

        # Gen expr for directories where I can search for the uploaded file
        domain_path_list = set(u.get_domain_path() for u in
                               kb.kb.get_all_known_urls())

        # FIXME: Note that in all cases where I'm using kb's url_object info
        # I'll be making a mistake if the audit plugin is run before all
        # crawl plugins haven't run yet, since I'm not letting them
        # find all directories; which will make the current plugin run with
        # less information.
        url_generator = self._generate_urls(domain_path_list,
                                            mutant.uploaded_file_name)
        mutant_repeater = repeat(mutant)
        http_response_repeater = repeat(mutant_response)
        args = izip(url_generator, mutant_repeater, http_response_repeater)

        self.worker_pool.map_multi_args(self._confirm_file_upload, args)
Example #2
0
    def test_nonall_item_key_value_lists(self):
        for init in self.inits:
            dic = odict(init.items())
            omd = omdict(init.items())

            # Testing items(), keys(), values(), lists(), and listitems().
            assert omd.items() == dic.items()
            assert omd.keys() == dic.keys()
            assert omd.values() == dic.values()
            iterator = izip(omd.keys(), omd.lists(), omd.listitems())
            for key, valuelist, listitem in iterator:
                assert omd.values(key) == omd.getlist(key) == valuelist
                assert omd.items(key) == [i for i in init.items() if i[0] == key]
                assert listitem == (key, valuelist)

            # Testing iteritems(), iterkeys(), itervalues(), and iterlists().
            for key1, key2 in izip(omd.iterkeys(), dic.iterkeys()):
                assert key1 == key2
            for val1, val2 in izip(omd.itervalues(), dic.itervalues()):
                assert val1 == val2
            for item1, item2 in izip(omd.iteritems(), dic.iteritems()):
                assert item1 == item2
            for key, values in izip(omd.iterkeys(), omd.iterlists()):
                assert omd.getlist(key) == values
            iterator = izip(omd.iterkeys(), omd.iterlists(), omd.iterlistitems())
            for key, valuelist, listitem in iterator:
                assert listitem == (key, valuelist)

            # Test iteritems() and itervalues() with a key.
            for key in omd.iterkeys():
                assert list(omd.iteritems(key)) == zip(repeat(key), omd.getlist(key))
                assert list(omd.iterallitems(key)) == zip(repeat(key), omd.getlist(key))
            for nonkey in self.nonkeys:
                self.assertRaises(KeyError, omd.iteritems, nonkey)
                self.assertRaises(KeyError, omd.itervalues, nonkey)
def _WaitJob(
    apitools_client, messages_module, job_reference, progress_reporter,
    status='DONE', wait=sys.maxint):
  """Poll for a job to run until it reaches the requested status.

  Arguments:
    apitools_client: the client to be used for polling
    messages_module: The module defining messages used in apitools calls.
    job_reference: JobReference to poll.
    progress_reporter: a job_progress.ProgressReporter
      that will be called after each job poll.
    status: (optional, default 'DONE') Desired job status.
    wait: (optional, default maxint) Max wait time.

  Returns:
    The job object returned by the final status call.

  Raises:
    StopIteration: If polling does not reach the desired state before
      timing out.
    ValueError: If given an invalid wait value.
  """
  start_time = time.time()
  job = None

  # This is a first pass at wait logic: we ping at 1s intervals a few
  # times, then increase to max(3, max_wait), and then keep waiting
  # that long until we've run out of time.
  waits = itertools.chain(
      itertools.repeat(1, 8),
      xrange(2, 30, 3),
      itertools.repeat(30))
  current_wait = 0
  current_status = 'UNKNOWN'
  while current_wait <= wait:
    try:
      done, job = _PollJob(
          apitools_client, messages_module, job_reference, status=status,
          wait=wait)
      current_status = job.status.state
      if done:
        progress_reporter.Print(
            job_reference.jobId, current_wait, current_status)
        break
    except bigquery.CommunicationError as e:
      # Communication errors while waiting on a job are okay.
      logging.warning('Transient error during job status check: %s', e)
    except bigquery.BackendError as e:
      # Temporary server errors while waiting on a job are okay.
      logging.warning('Transient error during job status check: %s', e)
    for _ in xrange(waits.next()):
      current_wait = time.time() - start_time
      progress_reporter.Print(job_reference.jobId, current_wait, current_status)
      time.sleep(1)
  else:
    raise StopIteration(
        'Wait timed out. Operation not finished, in state {0}'.format(
            current_status))
  progress_reporter.Done()
  return job
Example #4
0
def setup_weights():
    Tns.w0_array = []
    Tns.w0 = Weights(Tns.w0_array, 1)
    Tns.w1_array = [[j / 63.0 for j in range(i * 8, 8 + i * 8)] for i in range(8)]
    Tns.w1 = Weights(Tns.w1_array, 1)
    Tns.w2_array = [
        [j / 63.0 for j in list(itertools.chain(*zip(itertools.repeat(NAN), range(i * 8, 4 + i * 8))))]
        for i in range(8)
    ]
    Tns.w2 = Weights(Tns.w2_array, 1)
    Tns.w3_array = (
        numpy.array(
            splice(
                [
                    (
                        list(itertools.chain(*zip(range(i * 8, 4 + i * 8), itertools.repeat(NAN)))),
                        list(itertools.chain(*zip(itertools.repeat(NAN), range(i * 8, 4 + i * 8)))),
                    )
                    for i in range(4)
                ]
            ),
            dtype=float,
        )
        / 63.0
    )
    Tns.w3 = Weights(Tns.w3_array, 1)
Example #5
0
    def test_addlist(self):
        for init in self.inits:
            omd = omdict(init)
            for nonkey in self.nonkeys:
                assert (nonkey, self.valuelist) not in omd.allitems()
                assert omd.addlist(nonkey, self.valuelist) == omd
                assert omd.getlist(nonkey) == self.valuelist
                assert (omd.allitems()[-1 * len(self.valuelist):] ==
                        zip(repeat(nonkey), self.valuelist))

            # Repeat the addlist() calls with the same items and make sure the old
            # items aren't replaced.
            oldomd = omd.copy()
            for nonkey in self.nonkeys:
                for value in self.valuelist:
                    assert (nonkey, value) in omd.allitems()
                assert omd.addlist(nonkey, self.valuelist) == omd
                assert len(omd.getlist(nonkey)) == (len(oldomd.getlist(nonkey)) +
                                                    len(self.valuelist))
                assert omd.getlist(nonkey) == oldomd.getlist(nonkey) + self.valuelist
                assert (omd.allitems()[-1 * len(self.valuelist):] ==
                        zip(repeat(nonkey), self.valuelist))

            # If an empty list is provided to addlist(), nothing is added.
            omd = omdict(init)
            for nonkey in self.nonkeys:
                assert omd.addlist(nonkey) == omd and nonkey not in omd
                assert omd.addlist(nonkey, []) == omd and nonkey not in omd
Example #6
0
  def WorkHorse(cls, tasks):
    """Runs the workhorse for the command.

    Args:
      tasks: OrderedDict {int, set(string)}: Dict from priority to set of tasks to execute at the
          priority. Note: the dict is ordered by priority.

    Return:
      (list, list): Returns a tuple of list in the form
          (successful_tasks, failed_tasks) specifying tasks that succeeded and
          ones that failed.
    """
    all_tasks = []
    dirs_to_import = {}
    dir_to_task_map = {}
    for set_tasks in tasks.itervalues():
      for task in set_tasks:
        all_tasks += [task]
        out_dir = PipelineUtils.GetOutDirForTask(task)
        publish_dir = PipelineUtils.GetPublishCurrentDirForTask(task)
        if not out_dir or not publish_dir: continue
        dirs_to_import[publish_dir] = out_dir
        dir_to_task_map[publish_dir] = (dir_to_task_map.get(publish_dir, []) + [publish_dir])

    # Check if there are any directories to publish.
    if not dirs_to_import:
      TermColor.Error('Did not find any dirs to import. Do not forget to specify publish root '
                      'using --publish_root')
      return ([], all_tasks)

    # Create all the target dirs to import to.
    for dir in dirs_to_import.itervalues():
      FileUtils.MakeDirs(dir)

    # Run all the copy tasks.
    successful_dirs = []; failed_dirs = []
    args = itertools.izip(itertools.repeat(cls), itertools.repeat('_RunSingeTask'),
                          dirs_to_import.keys(), dirs_to_import.values())
    dir_res = ExecUtils.ExecuteParallel(args, Flags.ARGS.pool_size)
    if not dir_res:
      TermColor.Error('Could not process: %s' % all_tasks)
      return ([], all_tasks)

    for (res, dir) in dir_res:
      if res == Importer.EXITCODE['SUCCESS']:
        successful_dirs += [dir]
      elif res == Importer.EXITCODE['FAILURE']:
        failed_dirs += [dir]
      else:
        TermColor.Fatal('Invalid return %d code for %s' % (res, dir))

    # Get the reverse mapping from dirs to tasks.
    successful_tasks = []; failed_tasks = []
    for i in successful_dirs:
      successful_tasks += dir_to_task_map.get(i, [])

    for i in failed_dirs:
      failed_tasks += dir_to_task_map.get(i, [])

    return (successful_tasks, failed_tasks)
Example #7
0
    def define(self, *names, **kwargs):
        """Define variable in the problem

        Variables must be defined before they can be accessed by var() or set().
        This function takes keyword arguments lower and upper to define the
        bounds of the variable (default: -inf to inf). The keyword argument types
        can be used to select the type of the variable (Only Continuous is suported).
        """

        names = tuple(names)
        lower = kwargs.get('lower', None)
        upper = kwargs.get('upper', None)
        vartype = kwargs.get('types', None)

        # Repeat values if a scalar is given
        if lower is None or isinstance(lower, numbers.Number):
            lower = repeat(lower, len(names))
        if upper is None or isinstance(upper, numbers.Number):
            upper = repeat(upper, len(names))
        if vartype is None or vartype in (VariableType.Continuous, VariableType.Binary,
                                            VariableType.Integer):
            vartype = repeat(vartype, len(names))

        lp_names = tuple(next(self._var_names) for name in names)

        # Assign default values
        vartype = (VariableType.Continuous if value is None else value for value in vartype)

        self._variables.update(izip(names, lp_names))
        for name, lower, upper, t in izip(lp_names, lower, upper, vartype):
            if t != VariableType.Continuous:
                raise ValueError('Solver does not support non-continuous types')
            self._p.add_variable(0, lower, upper, name)
Example #8
0
def _grb_add_compound_drain(self, compound, lb=None, ub=None):
    if lb is None:
        lb = options.lower_bound
    if ub is None:
        ub = options.upper_bound
    if hasattr(compound, "__iter__"):
    # we really add multiple compounds
        if hasattr(lb, "__iter__"):
            lb_iter = lb
        else:
            lb_iter = itertools.repeat(lb)
        if hasattr(ub, "__iter__"):
            ub_iter = ub
        else:
            ub_iter = itertools.repeat(ub)
        changes = [self._add_drain(cmpd, lb, ub) for (cmpd, lb, ub)\
                in itertools.izip(compound, lb_iter, ub_iter)]
        if any(changes):
            self._model.update()
        # we allow for lazy updating of the model here (better not be a bug)
        for cmpd in compound:
            var = self._drains[cmpd]
            self._add_transport(cmpd, var, -1.0)
    else:
        if self._add_drain(compound, lb, ub):
            self._model.update()
            var = self._drains[compound]
            self._add_transport(compound, var, -1.0)
Example #9
0
def _grb_set_medium(self, compound, lb=None, ub=None):
    # we allow for lazy updating of the model here (better not be a bug)
    if lb is None:
        lb = options.lower_bound
    if ub is None:
        ub = options.upper_bound
    # constrain all sources first
    for source in self._sources.itervalues():
        source.lb = 0.0
        source.ub = 0.0
    if hasattr(compound, "__iter__"):
    # we really add multiple compounds
        if hasattr(lb, "__iter__"):
            lb_iter = lb
        else:
            lb_iter = itertools.repeat(lb)
        if hasattr(ub, "__iter__"):
            ub_iter = ub
        else:
            ub_iter = itertools.repeat(ub)
        for (cmpd, lb, ub) in itertools.izip(compound, lb_iter, ub_iter):
            var = self._sources[cmpd]
            var.lb = lb
            var.ub = ub
    else:
        var = self._sources[compound]
        var.lb = lb
        var.ub = ub
Example #10
0
 def nthSuperUglyNumber(self, n, primes):
     """
     :type n: int
     :type primes: List[int]
     :rtype: int
     """
     if n<= 0:
         return 1
     ret = []
     ret.append(1)
     curvalue = list(itertools.repeat(0, len(primes)))
     indexs = list(itertools.repeat(0, len(primes)))
     
     while len(ret) < n:
         for i in range(len(primes)):
             curvalue[i] = ret[indexs[i]] * primes[i]
         temp = curvalue[0]
         for i in range(1, len(primes)):
             temp = min(temp, curvalue[i])
         for i in range(len(primes)):
             if temp == curvalue[i]:
                 indexs[i] += 1
         ret.append(temp)
     
     return ret.pop()
Example #11
0
    def __init__(self, vertices, faces, colors, name='unknown', line_mode=False):

        # sanity checks
        len_verts = len(vertices)
        self.name = name
        for face in faces:
            assert len(face) >= 3 or line_mode
            for index in face:
                assert 0 <= index < len_verts

        # convert vertices from tuple to Vector if required
        if len(vertices) > 0 and not isinstance(vertices[0], Vector):
            #print vertices
            vertices = [Vector(*v) for v in vertices]

        # if color is a single color, then convert it to a sequence of
        # identical colors, one for each face
        if isinstance(colors, Color):
            colors = repeat(colors)
        else:
            colors = repeat(Color.Red)

        self.vertices = vertices
        self.faces = [
            Face(face, color, self, source=name)
            for face, color in zip(faces, colors)
        ]
Example #12
0
def test_body_shutoff_on_deadman(test_controller):
    """verify that _body() behaves appropriately when the dead timer expires"""
    DEAD_INTERVAL = 10
    t = time.time()
    with mock.patch('pi_pwm.controllers.time.time', mock.Mock()) as time_time:
        with mock.patch('pi_pwm.controllers.time.sleep', mock.Mock()) as time_sleep:
            test_controller.dead_interval = DEAD_INTERVAL
            time_time.side_effect = itertools.repeat(t)
            test_controller.duty = 1
            assert not test_controller.is_on
            test_controller._body()
            assert test_controller.is_on
            # half-way to dead time
            time_time.side_effect = itertools.repeat(t+(DEAD_INTERVAL/2))
            test_controller._body()
            assert test_controller.is_on
            # dead time has expired
            time_time.side_effect = itertools.repeat(t+DEAD_INTERVAL)
            test_controller._body()
            assert test_controller.dead_timer == 0
            assert not test_controller.is_on
            # reset it
            test_controller.ping()
            test_controller._body()
            assert test_controller.dead_timer == 10
            assert test_controller.is_on
Example #13
0
    def crawl(self, fuzzable_request):
        """
        Searches for new URLs by adding and substracting numbers to the file
        and the parameters.

        :param fuzzable_request: A fuzzable_request instance that contains
                                     (among other things) the URL to test.
        """
        url = fuzzable_request.get_url()
        headers = Headers([("Referer", url.url_string)])

        original_response = self._uri_opener.GET(fuzzable_request.get_uri(), cache=True, headers=headers)

        if original_response.is_text_or_html() or self._fuzz_images:

            fr_generator = self._mangle_digits(fuzzable_request)
            response_repeater = repeat(original_response)
            header_repeater = repeat(headers)

            args = izip(fr_generator, response_repeater, header_repeater)

            self.worker_pool.map_multi_args(self._do_request, args)

            # I add myself so the next call to this plugin wont find me ...
            # Example: index1.html ---> index2.html --!!--> index1.html
            self._already_visited.add(fuzzable_request.get_uri())
Example #14
0
    def results(self):
        p = self.params
        pn = list(p.keys())
        results = []
        param_space = [
            dict(list(zip(pn, pset))) for pset in
            itertools.product(
                *[numpy.arange(p[k][0], p[k][1]+.000001, p[k][2]) for k in pn]
            )
        ]

        args_gen = list(zip(param_space,
                                  itertools.repeat(self.strategy_fn),
                                  itertools.repeat(self.ohlc),
                                  itertools.repeat(self.metrics)))

        if self.processes != 1:
            pool = multiprocessing.Pool(self.processes)
            try:
                results = pool.map(_embedded_backtest, args_gen)
            except KeyboardInterrupt:
                pool.close()
                pool.join()
        else:
            results = []
            for a in args_gen:
                results.append(_embedded_backtest(a))

        return pandas.DataFrame(results)
Example #15
0
def main():

    script_dir, out_dir = get_paths()

    test_files = []
    inner_html_files = []

    if len(sys.argv) > 2:
        test_iterator = itertools.izip(
            itertools.repeat(False),
            sorted(os.path.abspath(item) for item in
                   glob.glob(os.path.join(sys.argv[2], "*.dat"))))
    else:
        test_iterator = itertools.chain(
            itertools.izip(itertools.repeat(False),
                           sorted(support.get_data_files("tree-construction"))),
            itertools.izip(itertools.repeat(True),
                           sorted(support.get_data_files(
                        os.path.join("tree-construction", "scripted")))))

    for (scripted, test_file) in test_iterator:
        input_file_name = os.path.splitext(os.path.split(test_file)[1])[0]
        if scripted:
            input_file_name = "scripted_" + input_file_name
        test_data = support.TestData(test_file)
        test_filename, inner_html_file_name = make_tests(script_dir, out_dir,
                                                         input_file_name, test_data)
        if test_filename is not None:
            test_files.append(test_filename)
        if inner_html_file_name is not None:
            inner_html_files.append(inner_html_file_name)
def applyGrid(_geo_crimes, _n,_grid, _column):
    if _n > 128:
        _n = 128
        print("n was too big. Set to 128.")
    print("splitting crimes in to smaller frames to leverage paralelization")
    _l = len(_geo_crimes.index)
    _crimes_args = []
    _covered = 0
    for _i in range(_n-1):
        _a, _b = int(round(_i*(_l/_n))), int(round((_i+1)*(_l/_n)))
        _crimes_args.append(_geo_crimes[_a:_b])
        _covered = _covered + (_b - _a)
    _crimes_args.append(_geo_crimes[_covered:len(_geo_crimes.index)])
    print("{} data-chunks created.".format(len(_crimes_args)))
    
    
    print("Trying to start {} parallel processes.".format(_n))
    _pool = Pool(processes=_n)
    print("{} parallel process started.".format(_n))
    _result = _pool.starmap(_para_crimes_in_cell, zip(_crimes_args, 
                                          repeat(_grid), repeat(_column)))
    _pool.terminate()
    print("Process terminated.")
    _df = _result.pop(0)
    for _frame in _result:
        _df = _df.append(_frame)
    print("{} crimes where spatialised to their cell.".format(len(_df.index)))
    return _df
    def __init__(self, pololu_joint_names=None, dyn_joint_names=None):
        super(JointStatePublisher, self).__init__()
        self.pololu_joint_names = pololu_joint_names
        self.dyn_joint_names = dyn_joint_names
        self.rate = rospy.Rate(rospy.get_param('~sensor_rate', 15.0))
        self.base_frame_id = rospy.get_param('~base_frame_id', "base_link")

        # Initialize publisher
        self.joint_state_pub = rospy.Publisher("joint_states", JointState, queue_size=10)
        self.joint_state = JointState()
        self.joint_state.header.frame_id = self.base_frame_id


        self.joint_states_lock = RLock()

        # Subscribe to pololu servos
        if self.pololu_joint_names is not None:
            self.joint_state.name = self.pololu_joint_names
            num_pololu = len(self.pololu_joint_names)
            self.pololu_joint_positions = list(repeat(0.0, num_pololu))
            rospy.Subscriber("pololu/motor_states", MotorStateList, self.update_pololu_joint_states)

        if self.dyn_joint_names is not None:
            self.joint_state.name += self.dyn_joint_names
            num_dyn = len(self.dyn_joint_names)
            self.dyn_joint_positions = list(repeat(0.0, num_dyn))

            # Subscribe to dynamixels
            for name in self.dyn_joint_positions:
                controller = name.replace('_joint', '') + '_controller/state'
                rospy.loginfo('Subscribing to dynamixel controller: {0}'.format(controller))
                rospy.Subscriber(controller, DynamixelJointState, self.update_dyn_joint_state)
Example #18
0
def merge(A, p, q, r):
    n1 = q - p + 1
    n2 = r - q

    L = list(repeat(None, n1))
    R = list(repeat(None, n2))

    for i in range(n1):
        L[i] = A[p + i]

    for j in range(n2):
        R[j] = A[q + j + 1]

    i = 0
    j = 0
    for k in range(p, r + 1):
        if i == n1:
            A[k] = R[j]
            j += 1
        elif j == n2:
            A[k] = L[i]
            i += 1
        elif L[i] <= R[j]:
            A[k] = L[i]
            i += 1
        else:
            A[k] = R[j]
            j += 1
Example #19
0
    def __init__(self, min=1, max=1, value=None):

        if value:
            self.generator = repeat(value)
        else:
            min, max = safe_range(min, max)
            self.generator = map(randrange, repeat(min), repeat(max))
Example #20
0
    def initializeWorld(self):
        headColor = {border:DarkBlue,fill:LightGreen}
        self.colors = list(chain(repeat(defaultColor,2),
                            repeat(headColor,1),
                            repeat(defaultColor,1)
                            ))
        self.servoMode = False
        self.world.setGravity((0,0,0))
        mass = repeat((2500,0.05))
        positions = [ (0,0,0), (1,0,0) , (1,1,0), (0,1,0)]
        self.spheres = self.createSphericalBodies(positions)
        joint_pairs = zip(self.spheres,shift(self.spheres))
        self.joint_pairs = joint_pairs
        self.joints = [self.prJoint(*x) for x in joint_pairs] 
        self.motors = self.joints[1::2]
        for joint in self.joints:
            joint.setParam(ode.ParamFMax,9)
            joint.setParam(ode.ParamFMax2,9)
            joint.setParam(ode.ParamHiStop2,3.14/2-.1)
            joint.setParam(ode.ParamLoStop2,-3.14/2+.1)
            joint.setParam(ode.ParamHiStop,1)
            joint.setParam(ode.ParamLoStop,-.5)

        distances = [jointDistance(x) for x in self.joints]
        angles = self.getAngles()
        self.startingPosition = zip(distances,angles)
Example #21
0
def _array_parallel(fn, cls, genelist, chunksize=250, processes=1, **kwargs):
    """
    Returns an array of genes in `genelist`, using `bins` bins.

    `genelist` is a list of pybedtools.Interval objects

    Splits `genelist` into pieces of size `chunksize`, creating an array
    for each chunk and merging ret

    A chunksize of 25-100 seems to work well on 8 cores.
    """
    pool = multiprocessing.Pool(processes)
    chunks = list(chunker(genelist, chunksize))
    # pool.map can only pass a single argument to the mapped function, so you
    # need this trick for passing multiple arguments; idea from
    # http://stackoverflow.com/questions/5442910/
    #               python-multiprocessing-pool-map-for-multiple-arguments
    #
    results = pool.map(
        func=_array_star,
        iterable=itertools.izip(
            itertools.repeat(fn),
            itertools.repeat(cls),
            chunks,
            itertools.repeat(kwargs)))
    pool.close()
    pool.join()
    return results
Example #22
0
def _padImpurityMatrix(matrix, preChannels, postChannels):
    """Align the values of an isotope impurity matrix and fill up with 0.

    NOTE:
        The length of the rows in the "matrix" must be the sum of "preChannels"
        and "postChannels" + 1.

    :params matrix: a matrix (2d nested list) containing numbers, each isobaric
        channel must be present as a row.
    :params preChannels: number of matrix columns with a nominal mass shift < 0
        (-1, -2,..) in respect to the reporter ion mz value.
    :params postChannels: number of matrix columns with a nominal mass shift > 0
        (+1, +2,..) in respect to the reporter ion mz value.

    :returns: extended matrix, where the number of rows is unchanged but the
        length of each row is extend to the number of rows.
    """
    extendedMatrix = list()
    lastMatrixI = len(matrix)-1
    for i, line in enumerate(matrix):
        prePadding = itertools.repeat(0., i)
        postPadding = itertools.repeat(0., lastMatrixI-i)
        newLine = list(itertools.chain(prePadding, line, postPadding))
        extendedMatrix.append(newLine[preChannels:-postChannels])

    return extendedMatrix
Example #23
0
def repeatfunc(func, times=None, *args):
    """Call *func* with *args* repeatedly, returning an iterable over the
    results.

    If *times* is specified, the iterable will terminate after that many
    repetitions:

        >>> from operator import add
        >>> times = 4
        >>> args = 3, 5
        >>> list(repeatfunc(add, times, *args))
        [8, 8, 8, 8]

    If *times* is ``None`` the iterable will not terminate:

        >>> from random import randrange
        >>> times = None
        >>> args = 1, 11
        >>> take(6, repeatfunc(randrange, times, *args))  # doctest:+SKIP
        [2, 4, 8, 1, 8, 4]

    """
    if times is None:
        return starmap(func, repeat(args))
    return starmap(func, repeat(args, times))
Example #24
0
 def _argiter(self,arg):
   """return appropriate fast iterable for arg"""
   if is_scalar(arg):
     return it.repeat(arg)
   if arg.rank == 0 or na.size(arg) == 1:
     return it.repeat(arg.flat[0])
   return iter(arg)
Example #25
0
 def test_no_len_for_infinite_repeat(self):
     # The repeat() object can also be infinite
     if support.check_impl_detail(pypy=True):
         # 3.4 (PEP 424) behavior
         self.assertEqual(len(repeat(None)), NotImplemented)
     else:
         self.assertRaises(TypeError, len, repeat(None))
Example #26
0
File: base.py Project: slucia/breze
    def _make_args(self, X, Z, imp_weight=None):
        batch_size = getattr(self, 'batch_size', None)
        if batch_size is None:
            X, Z = cast_array_to_local_type(X), cast_array_to_local_type(Z)
            if imp_weight is not None:
                imp_weight = cast_array_to_local_type(imp_weight)
                data = itertools.repeat([X, Z, imp_weight])
            else:
                data = itertools.repeat([X, Z])
        elif batch_size < 1:
            raise ValueError('need strictly positive batch size')
        else:
            if imp_weight is not None:
                data = iter_minibatches([X, Z, imp_weight], self.batch_size,
                                        list(self.sample_dim) + [self.sample_dim[0]])
                data = ((cast_array_to_local_type(x),
                         cast_array_to_local_type(z),
                         cast_array_to_local_type(w)) for x, z, w in data)
            else:
                data = iter_minibatches([X, Z], self.batch_size,
                                        self.sample_dim)

                data = ((cast_array_to_local_type(x),
                         cast_array_to_local_type(z)) for x, z in data)

        args = ((i, {}) for i in data)
        return args
Example #27
0
def checkExistingGPS(GPSData, session):
    GPSData['datetime'] = GPSData.apply(lambda row: np.datetime64(
        row['Date/Time']).astype(datetime), axis=1)
    GPSData['id'] = range(GPSData.shape[0])
    maxDateGPS = GPSData['datetime'].max()
    minDateGPS = GPSData['datetime'].min()

    # round lat/lon decimal 3 for data from Files
    GPSData['lat'] = GPSData['Latitude(N)'].round(3)
    GPSData['lon'] = GPSData['Longitude(E)'].round(3)

    # Retrieve exisintg data from DB
    queryGPS = select([ArgosGps.pk_id,
                      ArgosGps.date,
                      ArgosGps.lat,
                      ArgosGps.lon,
                      ArgosGps.ptt]
                      ).where(ArgosGps.type_ == 'GPS')
    queryGPS = queryGPS.where(
        and_(ArgosGps.date >= minDateGPS, ArgosGps.date <= maxDateGPS))
    data = session.execute(queryGPS).fetchall()

    # Load data from DB into dataframe
    GPSrecords = pd.DataFrame.from_records(
        data,
        columns=[ArgosGps.pk_id.name,
                 ArgosGps.date.name,
                 ArgosGps.lat.name,
                 ArgosGps.lon.name,
                 ArgosGps.ptt.name],
        coerce_float=True)

    # round_ lat/lon decimal 3 for data from DB
    GPSrecords['lat'] = GPSrecords['lat'].round(3)
    GPSrecords['lon'] = GPSrecords['lon'].round(3)

    # apply a merge/join beetween dataframes with data from Files and data
    # from DB
    merge = pd.merge(GPSData,
                     GPSrecords,
                     left_on=['datetime', 'lat', 'lon', 'ptt'],
                     right_on=['date', 'lat', 'lon', 'FK_ptt'])
    DFToInsert = GPSData[~GPSData['id'].isin(merge['id'])]

    DFToInsert = DFToInsert.drop(['id', 'datetime', 'lat', 'lon'], 1)
    DFToInsert.columns = ['date', 'lat', 'lon',
                          'speed', 'course', 'ele', 'FK_ptt']

    DFToInsert = DFToInsert.replace('2D fix', np.nan)
    DFToInsert = DFToInsert.replace('low alt', np.nan)
    DFToInsert.loc[:, ('type')] = list(
        itertools.repeat('GPS', len(DFToInsert.index)))
    DFToInsert.loc[:, ('checked')] = list(
        itertools.repeat(0, len(DFToInsert.index)))
    DFToInsert.loc[:, ('imported')] = list(
        itertools.repeat(0, len(DFToInsert.index)))
    DFToInsert.loc[:, ('creationDate')] = list(
        itertools.repeat(datetime.now(), len(DFToInsert.index)))

    return DFToInsert
Example #28
0
def notification_remove_cc(request, case_ids, cc_list):
    '''
    Description: Remove email addresses from the notification CC list of specific TestCases

    Params:      $case_ids - Integer/Array: one or more TestCase IDs

                 $cc_list - Array: contians the email addresses that will
                            be removed from each TestCase indicated by case_ids.

    Returns:     JSON. When succeed, status is 0, and message maybe empty or
                 anything else that depends on the implementation. If something
                 wrong, status will be 1 and message will be a short description
                 to the error.
    '''

    try:
        validate_cc_list(cc_list)
    except (TypeError, ValidationError):
        raise

    try:
        tc_ids = pre_process_ids(case_ids)
        cursor = connection.writer_cursor
        ids_values = ",".join(itertools.repeat('%s', len(tc_ids)))
        email_values = ",".join(itertools.repeat('%s', len(cc_list)))
        sql = TC_REMOVE_CC % (ids_values, email_values)
        tc_ids.extend(cc_list)
        cursor.execute(sql, tc_ids)
        transaction.commit_unless_managed()
    except (TypeError, ValueError, Exception):
        raise
Example #29
0
 def dump(self, dest_filepath=os.getcwd(), parallel=False,verbose=False):      
     if self.file_pointer != None:
         self.header.unpack(self.file_pointer, dest_filepath, verbose)
         for directory in self.header.dir_list:
             final_path = os.path.join(dest_filepath, directory.dir_name)
             if not os.path.exists(final_path):
                 os.makedirs(final_path)
         if parallel:
             master_file_list = []
             for dir in self.header.dir_list:
                 master_file_list.extend(dir.file_list)
             p = Pool()
             m = Manager()
             q = m.Queue()
             args = izip(master_file_list, repeat(q), repeat(self.filepath)) # pack arguments in such a way that multiprocessing can take the
             result = p.map_async(unpack_helper, args)                
             
             # monitor loop
             while True:
                 if result.ready():
                     break
                 else:
                     size = q.qsize()
                     sys.stdout.write("%.0f%%\r" % (size * 100/ len(master_file_list)) ) # based on number of files dumped
                     time.sleep(0.1)
             
         else:
             for dir in self.header.dir_list:
                 for data in dir.file_list:                    
                     global PAK_bytes_unpacked
                     PAK_bytes_unpacked += float(data.unpack())        
                     sys.stdout.write("%.0f%%\r" % (PAK_bytes_unpacked * 100/ PAK_filesize) ) # based on number of bytes written
Example #30
0
 def __init__(self, nodes, distance=None):
     if distance is None:
         self.nodes = itertools.repeat(None)
     elif distance == 0:
         self.nodes = itertools.repeat(nodes.next())
     else:
         self.nodes = itertools.islice(nodes, 0, None, distance)
        print "chan. range start has to be smaller than range end"
        sys.exit(0)
    chan_to += 1
else:
    print "you have to specify either single chan with --chan or a range with --range"
    sys.exit(0)

num_msgs = int(options.number)
concurrency = int(options.concurrency)
# XXX: actually we can tell option parser to check that for us
if num_msgs < 0 or concurrency < 0:
    print "illegal arguments"
    sys.exit(1)

chan_generator = itertools.cycle(xrange(chan_from, chan_to))
msg_generator = itertools.repeat(options.message*int(options.repeat), num_msgs)

base_url = options.url

class ChannelStats(dict):
    """container for statistics about single push channel
    """

    def __init__(self, *args):
        dict.__init__(self)
        self['no_listener'] = 0
        self['total'] = 0
        self['errors'] = 0

    def __str__(self):
        return "total/no listener/errors: %d,%d,%d" % (self['total'], self['no_listener'], self['errors'])
Example #32
0
 def test_crc24(self):
     self.assertEqual(0xb704ce, crc24(bytearray(b"")))
     self.assertEqual(0x21cf02, crc24(bytearray(b"123456789")))
     self.assertEqual(0xe84567, crc24(repeat(0, 1024 * 1024)))
Example #33
0
# %% cell_style="center"
# chain()
for i, d in enumerate(chain(data1, data2)):
    print(f"{i}x{d}", end=" ")

# %%
# cycle() ne termine jamais non plus

for i, d in enumerate(cycle(data1)):
    print(f"{i}x{d}", end=" ")
    if i >= 10:
        break

# %%
# repeat()
padding = repeat(1000, 3)

for i, d in enumerate(chain(data1, padding, data2)):
    print(f"{i}x{d}", end=" ")

# %% [markdown] slideshow={"slide_type": "slide"}
# ### `islice()`

# %% cell_style="split"
# avec islice on peut par exemple
# sauter une ligne sur deux dans un fichier
from pathlib import Path

# on crée un fichier
with Path('islice.txt').open('w') as f:
    for i in range(6):
Example #34
0
def max_weight_matching(G, maxcardinality=False, weight="weight"):
    """Compute a maximum-weighted matching of G.

    A matching is a subset of edges in which no node occurs more than once.
    The weight of a matching is the sum of the weights of its edges.
    A maximal matching cannot add more edges and still be a matching.
    The cardinality of a matching is the number of matched edges.

    Parameters
    ----------
    G : NetworkX graph
      Undirected graph

    maxcardinality: bool, optional (default=False)
       If maxcardinality is True, compute the maximum-cardinality matching
       with maximum weight among all maximum-cardinality matchings.

    weight: string, optional (default='weight')
       Edge data key corresponding to the edge weight.
       If key not found, uses 1 as weight.


    Returns
    -------
    matching : set
        A maximal matching of the graph.

    Notes
    -----
    If G has edges with weight attributes the edge data are used as
    weight values else the weights are assumed to be 1.

    This function takes time O(number_of_nodes ** 3).

    If all edge weights are integers, the algorithm uses only integer
    computations.  If floating point weights are used, the algorithm
    could return a slightly suboptimal matching due to numeric
    precision errors.

    This method is based on the "blossom" method for finding augmenting
    paths and the "primal-dual" method for finding a matching of maximum
    weight, both methods invented by Jack Edmonds [1]_.

    Bipartite graphs can also be matched using the functions present in
    :mod:`networkx.algorithms.bipartite.matching`.

    References
    ----------
    .. [1] "Efficient Algorithms for Finding Maximum Matching in Graphs",
       Zvi Galil, ACM Computing Surveys, 1986.
    """

    #
    # The algorithm is taken from "Efficient Algorithms for Finding Maximum
    # Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986.
    # It is based on the "blossom" method for finding augmenting paths and
    # the "primal-dual" method for finding a matching of maximum weight, both
    # methods invented by Jack Edmonds.
    #
    # A C program for maximum weight matching by Ed Rothberg was used
    # extensively to validate this new code.
    #
    # Many terms used in the code comments are explained in the paper
    # by Galil. You will probably need the paper to make sense of this code.
    #

    class NoNode:
        """Dummy value which is different from any node."""

        pass

    class Blossom:
        """Representation of a non-trivial blossom or sub-blossom."""

        __slots__ = ["childs", "edges", "mybestedges"]

        # b.childs is an ordered list of b's sub-blossoms, starting with
        # the base and going round the blossom.

        # b.edges is the list of b's connecting edges, such that
        # b.edges[i] = (v, w) where v is a vertex in b.childs[i]
        # and w is a vertex in b.childs[wrap(i+1)].

        # If b is a top-level S-blossom,
        # b.mybestedges is a list of least-slack edges to neighbouring
        # S-blossoms, or None if no such list has been computed yet.
        # This is used for efficient computation of delta3.

        # Generate the blossom's leaf vertices.
        def leaves(self):
            for t in self.childs:
                if isinstance(t, Blossom):
                    yield from t.leaves()
                else:
                    yield t

    # Get a list of vertices.
    gnodes = list(G)
    if not gnodes:
        return set()  # don't bother with empty graphs

    # Find the maximum edge weight.
    maxweight = 0
    allinteger = True
    for i, j, d in G.edges(data=True):
        wt = d.get(weight, 1)
        if i != j and wt > maxweight:
            maxweight = wt
        allinteger = allinteger and (str(type(wt)).split("'")[1]
                                     in ("int", "long"))

    # If v is a matched vertex, mate[v] is its partner vertex.
    # If v is a single vertex, v does not occur as a key in mate.
    # Initially all vertices are single; updated during augmentation.
    mate = {}

    # If b is a top-level blossom,
    # label.get(b) is None if b is unlabeled (free),
    #                 1 if b is an S-blossom,
    #                 2 if b is a T-blossom.
    # The label of a vertex is found by looking at the label of its top-level
    # containing blossom.
    # If v is a vertex inside a T-blossom, label[v] is 2 iff v is reachable
    # from an S-vertex outside the blossom.
    # Labels are assigned during a stage and reset after each augmentation.
    label = {}

    # If b is a labeled top-level blossom,
    # labeledge[b] = (v, w) is the edge through which b obtained its label
    # such that w is a vertex in b, or None if b's base vertex is single.
    # If w is a vertex inside a T-blossom and label[w] == 2,
    # labeledge[w] = (v, w) is an edge through which w is reachable from
    # outside the blossom.
    labeledge = {}

    # If v is a vertex, inblossom[v] is the top-level blossom to which v
    # belongs.
    # If v is a top-level vertex, inblossom[v] == v since v is itself
    # a (trivial) top-level blossom.
    # Initially all vertices are top-level trivial blossoms.
    inblossom = dict(zip(gnodes, gnodes))

    # If b is a sub-blossom,
    # blossomparent[b] is its immediate parent (sub-)blossom.
    # If b is a top-level blossom, blossomparent[b] is None.
    blossomparent = dict(zip(gnodes, repeat(None)))

    # If b is a (sub-)blossom,
    # blossombase[b] is its base VERTEX (i.e. recursive sub-blossom).
    blossombase = dict(zip(gnodes, gnodes))

    # If w is a free vertex (or an unreached vertex inside a T-blossom),
    # bestedge[w] = (v, w) is the least-slack edge from an S-vertex,
    # or None if there is no such edge.
    # If b is a (possibly trivial) top-level S-blossom,
    # bestedge[b] = (v, w) is the least-slack edge to a different S-blossom
    # (v inside b), or None if there is no such edge.
    # This is used for efficient computation of delta2 and delta3.
    bestedge = {}

    # If v is a vertex,
    # dualvar[v] = 2 * u(v) where u(v) is the v's variable in the dual
    # optimization problem (if all edge weights are integers, multiplication
    # by two ensures that all values remain integers throughout the algorithm).
    # Initially, u(v) = maxweight / 2.
    dualvar = dict(zip(gnodes, repeat(maxweight)))

    # If b is a non-trivial blossom,
    # blossomdual[b] = z(b) where z(b) is b's variable in the dual
    # optimization problem.
    blossomdual = {}

    # If (v, w) in allowedge or (w, v) in allowedg, then the edge
    # (v, w) is known to have zero slack in the optimization problem;
    # otherwise the edge may or may not have zero slack.
    allowedge = {}

    # Queue of newly discovered S-vertices.
    queue = []

    # Return 2 * slack of edge (v, w) (does not work inside blossoms).
    def slack(v, w):
        return dualvar[v] + dualvar[w] - 2 * G[v][w].get(weight, 1)

    # Assign label t to the top-level blossom containing vertex w,
    # coming through an edge from vertex v.
    def assignLabel(w, t, v):
        b = inblossom[w]
        assert label.get(w) is None and label.get(b) is None
        label[w] = label[b] = t
        if v is not None:
            labeledge[w] = labeledge[b] = (v, w)
        else:
            labeledge[w] = labeledge[b] = None
        bestedge[w] = bestedge[b] = None
        if t == 1:
            # b became an S-vertex/blossom; add it(s vertices) to the queue.
            if isinstance(b, Blossom):
                queue.extend(b.leaves())
            else:
                queue.append(b)
        elif t == 2:
            # b became a T-vertex/blossom; assign label S to its mate.
            # (If b is a non-trivial blossom, its base is the only vertex
            # with an external mate.)
            base = blossombase[b]
            assignLabel(mate[base], 1, base)

    # Trace back from vertices v and w to discover either a new blossom
    # or an augmenting path. Return the base vertex of the new blossom,
    # or NoNode if an augmenting path was found.
    def scanBlossom(v, w):
        # Trace back from v and w, placing breadcrumbs as we go.
        path = []
        base = NoNode
        while v is not NoNode:
            # Look for a breadcrumb in v's blossom or put a new breadcrumb.
            b = inblossom[v]
            if label[b] & 4:
                base = blossombase[b]
                break
            assert label[b] == 1
            path.append(b)
            label[b] = 5
            # Trace one step back.
            if labeledge[b] is None:
                # The base of blossom b is single; stop tracing this path.
                assert blossombase[b] not in mate
                v = NoNode
            else:
                assert labeledge[b][0] == mate[blossombase[b]]
                v = labeledge[b][0]
                b = inblossom[v]
                assert label[b] == 2
                # b is a T-blossom; trace one more step back.
                v = labeledge[b][0]
            # Swap v and w so that we alternate between both paths.
            if w is not NoNode:
                v, w = w, v
        # Remove breadcrumbs.
        for b in path:
            label[b] = 1
        # Return base vertex, if we found one.
        return base

    # Construct a new blossom with given base, through S-vertices v and w.
    # Label the new blossom as S; set its dual variable to zero;
    # relabel its T-vertices to S and add them to the queue.
    def addBlossom(base, v, w):
        bb = inblossom[base]
        bv = inblossom[v]
        bw = inblossom[w]
        # Create blossom.
        b = Blossom()
        blossombase[b] = base
        blossomparent[b] = None
        blossomparent[bb] = b
        # Make list of sub-blossoms and their interconnecting edge endpoints.
        b.childs = path = []
        b.edges = edgs = [(v, w)]
        # Trace back from v to base.
        while bv != bb:
            # Add bv to the new blossom.
            blossomparent[bv] = b
            path.append(bv)
            edgs.append(labeledge[bv])
            assert label[bv] == 2 or (label[bv] == 1 and labeledge[bv][0]
                                      == mate[blossombase[bv]])
            # Trace one step back.
            v = labeledge[bv][0]
            bv = inblossom[v]
        # Add base sub-blossom; reverse lists.
        path.append(bb)
        path.reverse()
        edgs.reverse()
        # Trace back from w to base.
        while bw != bb:
            # Add bw to the new blossom.
            blossomparent[bw] = b
            path.append(bw)
            edgs.append((labeledge[bw][1], labeledge[bw][0]))
            assert label[bw] == 2 or (label[bw] == 1 and labeledge[bw][0]
                                      == mate[blossombase[bw]])
            # Trace one step back.
            w = labeledge[bw][0]
            bw = inblossom[w]
        # Set label to S.
        assert label[bb] == 1
        label[b] = 1
        labeledge[b] = labeledge[bb]
        # Set dual variable to zero.
        blossomdual[b] = 0
        # Relabel vertices.
        for v in b.leaves():
            if label[inblossom[v]] == 2:
                # This T-vertex now turns into an S-vertex because it becomes
                # part of an S-blossom; add it to the queue.
                queue.append(v)
            inblossom[v] = b
        # Compute b.mybestedges.
        bestedgeto = {}
        for bv in path:
            if isinstance(bv, Blossom):
                if bv.mybestedges is not None:
                    # Walk this subblossom's least-slack edges.
                    nblist = bv.mybestedges
                    # The sub-blossom won't need this data again.
                    bv.mybestedges = None
                else:
                    # This subblossom does not have a list of least-slack
                    # edges; get the information from the vertices.
                    nblist = [(v, w) for v in bv.leaves()
                              for w in G.neighbors(v) if v != w]
            else:
                nblist = [(bv, w) for w in G.neighbors(bv) if bv != w]
            for k in nblist:
                (i, j) = k
                if inblossom[j] == b:
                    i, j = j, i
                bj = inblossom[j]
                if (bj != b and label.get(bj) == 1
                        and ((bj not in bestedgeto)
                             or slack(i, j) < slack(*bestedgeto[bj]))):
                    bestedgeto[bj] = k
            # Forget about least-slack edge of the subblossom.
            bestedge[bv] = None
        b.mybestedges = list(bestedgeto.values())
        # Select bestedge[b].
        mybestedge = None
        bestedge[b] = None
        for k in b.mybestedges:
            kslack = slack(*k)
            if mybestedge is None or kslack < mybestslack:
                mybestedge = k
                mybestslack = kslack
        bestedge[b] = mybestedge

    # Expand the given top-level blossom.
    def expandBlossom(b, endstage):
        # Convert sub-blossoms into top-level blossoms.
        for s in b.childs:
            blossomparent[s] = None
            if isinstance(s, Blossom):
                if endstage and blossomdual[s] == 0:
                    # Recursively expand this sub-blossom.
                    expandBlossom(s, endstage)
                else:
                    for v in s.leaves():
                        inblossom[v] = s
            else:
                inblossom[s] = s
        # If we expand a T-blossom during a stage, its sub-blossoms must be
        # relabeled.
        if (not endstage) and label.get(b) == 2:
            # Start at the sub-blossom through which the expanding
            # blossom obtained its label, and relabel sub-blossoms untili
            # we reach the base.
            # Figure out through which sub-blossom the expanding blossom
            # obtained its label initially.
            entrychild = inblossom[labeledge[b][1]]
            # Decide in which direction we will go round the blossom.
            j = b.childs.index(entrychild)
            if j & 1:
                # Start index is odd; go forward and wrap.
                j -= len(b.childs)
                jstep = 1
            else:
                # Start index is even; go backward.
                jstep = -1
            # Move along the blossom until we get to the base.
            v, w = labeledge[b]
            while j != 0:
                # Relabel the T-sub-blossom.
                if jstep == 1:
                    p, q = b.edges[j]
                else:
                    q, p = b.edges[j - 1]
                label[w] = None
                label[q] = None
                assignLabel(w, 2, v)
                # Step to the next S-sub-blossom and note its forward edge.
                allowedge[(p, q)] = allowedge[(q, p)] = True
                j += jstep
                if jstep == 1:
                    v, w = b.edges[j]
                else:
                    w, v = b.edges[j - 1]
                # Step to the next T-sub-blossom.
                allowedge[(v, w)] = allowedge[(w, v)] = True
                j += jstep
            # Relabel the base T-sub-blossom WITHOUT stepping through to
            # its mate (so don't call assignLabel).
            bw = b.childs[j]
            label[w] = label[bw] = 2
            labeledge[w] = labeledge[bw] = (v, w)
            bestedge[bw] = None
            # Continue along the blossom until we get back to entrychild.
            j += jstep
            while b.childs[j] != entrychild:
                # Examine the vertices of the sub-blossom to see whether
                # it is reachable from a neighbouring S-vertex outside the
                # expanding blossom.
                bv = b.childs[j]
                if label.get(bv) == 1:
                    # This sub-blossom just got label S through one of its
                    # neighbours; leave it be.
                    j += jstep
                    continue
                if isinstance(bv, Blossom):
                    for v in bv.leaves():
                        if label.get(v):
                            break
                else:
                    v = bv
                # If the sub-blossom contains a reachable vertex, assign
                # label T to the sub-blossom.
                if label.get(v):
                    assert label[v] == 2
                    assert inblossom[v] == bv
                    label[v] = None
                    label[mate[blossombase[bv]]] = None
                    assignLabel(v, 2, labeledge[v][0])
                j += jstep
        # Remove the expanded blossom entirely.
        label.pop(b, None)
        labeledge.pop(b, None)
        bestedge.pop(b, None)
        del blossomparent[b]
        del blossombase[b]
        del blossomdual[b]

    # Swap matched/unmatched edges over an alternating path through blossom b
    # between vertex v and the base vertex. Keep blossom bookkeeping
    # consistent.
    def augmentBlossom(b, v):
        # Bubble up through the blossom tree from vertex v to an immediate
        # sub-blossom of b.
        t = v
        while blossomparent[t] != b:
            t = blossomparent[t]
        # Recursively deal with the first sub-blossom.
        if isinstance(t, Blossom):
            augmentBlossom(t, v)
        # Decide in which direction we will go round the blossom.
        i = j = b.childs.index(t)
        if i & 1:
            # Start index is odd; go forward and wrap.
            j -= len(b.childs)
            jstep = 1
        else:
            # Start index is even; go backward.
            jstep = -1
        # Move along the blossom until we get to the base.
        while j != 0:
            # Step to the next sub-blossom and augment it recursively.
            j += jstep
            t = b.childs[j]
            if jstep == 1:
                w, x = b.edges[j]
            else:
                x, w = b.edges[j - 1]
            if isinstance(t, Blossom):
                augmentBlossom(t, w)
            # Step to the next sub-blossom and augment it recursively.
            j += jstep
            t = b.childs[j]
            if isinstance(t, Blossom):
                augmentBlossom(t, x)
            # Match the edge connecting those sub-blossoms.
            mate[w] = x
            mate[x] = w
        # Rotate the list of sub-blossoms to put the new base at the front.
        b.childs = b.childs[i:] + b.childs[:i]
        b.edges = b.edges[i:] + b.edges[:i]
        blossombase[b] = blossombase[b.childs[0]]
        assert blossombase[b] == v

    # Swap matched/unmatched edges over an alternating path between two
    # single vertices. The augmenting path runs through S-vertices v and w.
    def augmentMatching(v, w):
        for (s, j) in ((v, w), (w, v)):
            # Match vertex s to vertex j. Then trace back from s
            # until we find a single vertex, swapping matched and unmatched
            # edges as we go.
            while 1:
                bs = inblossom[s]
                assert label[bs] == 1
                assert (labeledge[bs] is None and blossombase[bs]
                        not in mate) or (labeledge[bs][0]
                                         == mate[blossombase[bs]])
                # Augment through the S-blossom from s to base.
                if isinstance(bs, Blossom):
                    augmentBlossom(bs, s)
                # Update mate[s]
                mate[s] = j
                # Trace one step back.
                if labeledge[bs] is None:
                    # Reached single vertex; stop.
                    break
                t = labeledge[bs][0]
                bt = inblossom[t]
                assert label[bt] == 2
                # Trace one more step back.
                s, j = labeledge[bt]
                # Augment through the T-blossom from j to base.
                assert blossombase[bt] == t
                if isinstance(bt, Blossom):
                    augmentBlossom(bt, j)
                # Update mate[j]
                mate[j] = s

    # Verify that the optimum solution has been reached.
    def verifyOptimum():
        if maxcardinality:
            # Vertices may have negative dual;
            # find a constant non-negative number to add to all vertex duals.
            vdualoffset = max(0, -min(dualvar.values()))
        else:
            vdualoffset = 0
        # 0. all dual variables are non-negative
        assert min(dualvar.values()) + vdualoffset >= 0
        assert len(blossomdual) == 0 or min(blossomdual.values()) >= 0
        # 0. all edges have non-negative slack and
        # 1. all matched edges have zero slack;
        for i, j, d in G.edges(data=True):
            wt = d.get(weight, 1)
            if i == j:
                continue  # ignore self-loops
            s = dualvar[i] + dualvar[j] - 2 * wt
            iblossoms = [i]
            jblossoms = [j]
            while blossomparent[iblossoms[-1]] is not None:
                iblossoms.append(blossomparent[iblossoms[-1]])
            while blossomparent[jblossoms[-1]] is not None:
                jblossoms.append(blossomparent[jblossoms[-1]])
            iblossoms.reverse()
            jblossoms.reverse()
            for (bi, bj) in zip(iblossoms, jblossoms):
                if bi != bj:
                    break
                s += 2 * blossomdual[bi]
            assert s >= 0
            if mate.get(i) == j or mate.get(j) == i:
                assert mate[i] == j and mate[j] == i
                assert s == 0
        # 2. all single vertices have zero dual value;
        for v in gnodes:
            assert (v in mate) or dualvar[v] + vdualoffset == 0
        # 3. all blossoms with positive dual value are full.
        for b in blossomdual:
            if blossomdual[b] > 0:
                assert len(b.edges) % 2 == 1
                for (i, j) in b.edges[1::2]:
                    assert mate[i] == j and mate[j] == i
        # Ok.

    # Main loop: continue until no further improvement is possible.
    while 1:

        # Each iteration of this loop is a "stage".
        # A stage finds an augmenting path and uses that to improve
        # the matching.

        # Remove labels from top-level blossoms/vertices.
        label.clear()
        labeledge.clear()

        # Forget all about least-slack edges.
        bestedge.clear()
        for b in blossomdual:
            b.mybestedges = None

        # Loss of labeling means that we can not be sure that currently
        # allowable edges remain allowable throughout this stage.
        allowedge.clear()

        # Make queue empty.
        queue[:] = []

        # Label single blossoms/vertices with S and put them in the queue.
        for v in gnodes:
            if (v not in mate) and label.get(inblossom[v]) is None:
                assignLabel(v, 1, None)

        # Loop until we succeed in augmenting the matching.
        augmented = 0
        while 1:

            # Each iteration of this loop is a "substage".
            # A substage tries to find an augmenting path;
            # if found, the path is used to improve the matching and
            # the stage ends. If there is no augmenting path, the
            # primal-dual method is used to pump some slack out of
            # the dual variables.

            # Continue labeling until all vertices which are reachable
            # through an alternating path have got a label.
            while queue and not augmented:

                # Take an S vertex from the queue.
                v = queue.pop()
                assert label[inblossom[v]] == 1

                # Scan its neighbours:
                for w in G.neighbors(v):
                    if w == v:
                        continue  # ignore self-loops
                    # w is a neighbour to v
                    bv = inblossom[v]
                    bw = inblossom[w]
                    if bv == bw:
                        # this edge is internal to a blossom; ignore it
                        continue
                    if (v, w) not in allowedge:
                        kslack = slack(v, w)
                        if kslack <= 0:
                            # edge k has zero slack => it is allowable
                            allowedge[(v, w)] = allowedge[(w, v)] = True
                    if (v, w) in allowedge:
                        if label.get(bw) is None:
                            # (C1) w is a free vertex;
                            # label w with T and label its mate with S (R12).
                            assignLabel(w, 2, v)
                        elif label.get(bw) == 1:
                            # (C2) w is an S-vertex (not in the same blossom);
                            # follow back-links to discover either an
                            # augmenting path or a new blossom.
                            base = scanBlossom(v, w)
                            if base is not NoNode:
                                # Found a new blossom; add it to the blossom
                                # bookkeeping and turn it into an S-blossom.
                                addBlossom(base, v, w)
                            else:
                                # Found an augmenting path; augment the
                                # matching and end this stage.
                                augmentMatching(v, w)
                                augmented = 1
                                break
                        elif label.get(w) is None:
                            # w is inside a T-blossom, but w itself has not
                            # yet been reached from outside the blossom;
                            # mark it as reached (we need this to relabel
                            # during T-blossom expansion).
                            assert label[bw] == 2
                            label[w] = 2
                            labeledge[w] = (v, w)
                    elif label.get(bw) == 1:
                        # keep track of the least-slack non-allowable edge to
                        # a different S-blossom.
                        if bestedge.get(bv) is None or kslack < slack(
                                *bestedge[bv]):
                            bestedge[bv] = (v, w)
                    elif label.get(w) is None:
                        # w is a free vertex (or an unreached vertex inside
                        # a T-blossom) but we can not reach it yet;
                        # keep track of the least-slack edge that reaches w.
                        if bestedge.get(w) is None or kslack < slack(
                                *bestedge[w]):
                            bestedge[w] = (v, w)

            if augmented:
                break

            # There is no augmenting path under these constraints;
            # compute delta and reduce slack in the optimization problem.
            # (Note that our vertex dual variables, edge slacks and delta's
            # are pre-multiplied by two.)
            deltatype = -1
            delta = deltaedge = deltablossom = None

            # Compute delta1: the minimum value of any vertex dual.
            if not maxcardinality:
                deltatype = 1
                delta = min(dualvar.values())

            # Compute delta2: the minimum slack on any edge between
            # an S-vertex and a free vertex.
            for v in G.nodes():
                if label.get(
                        inblossom[v]) is None and bestedge.get(v) is not None:
                    d = slack(*bestedge[v])
                    if deltatype == -1 or d < delta:
                        delta = d
                        deltatype = 2
                        deltaedge = bestedge[v]

            # Compute delta3: half the minimum slack on any edge between
            # a pair of S-blossoms.
            for b in blossomparent:
                if (blossomparent[b] is None and label.get(b) == 1
                        and bestedge.get(b) is not None):
                    kslack = slack(*bestedge[b])
                    if allinteger:
                        assert (kslack % 2) == 0
                        d = kslack // 2
                    else:
                        d = kslack / 2.0
                    if deltatype == -1 or d < delta:
                        delta = d
                        deltatype = 3
                        deltaedge = bestedge[b]

            # Compute delta4: minimum z variable of any T-blossom.
            for b in blossomdual:
                if (blossomparent[b] is None and label.get(b) == 2
                        and (deltatype == -1 or blossomdual[b] < delta)):
                    delta = blossomdual[b]
                    deltatype = 4
                    deltablossom = b

            if deltatype == -1:
                # No further improvement possible; max-cardinality optimum
                # reached. Do a final delta update to make the optimum
                # verifyable.
                assert maxcardinality
                deltatype = 1
                delta = max(0, min(dualvar.values()))

            # Update dual variables according to delta.
            for v in gnodes:
                if label.get(inblossom[v]) == 1:
                    # S-vertex: 2*u = 2*u - 2*delta
                    dualvar[v] -= delta
                elif label.get(inblossom[v]) == 2:
                    # T-vertex: 2*u = 2*u + 2*delta
                    dualvar[v] += delta
            for b in blossomdual:
                if blossomparent[b] is None:
                    if label.get(b) == 1:
                        # top-level S-blossom: z = z + 2*delta
                        blossomdual[b] += delta
                    elif label.get(b) == 2:
                        # top-level T-blossom: z = z - 2*delta
                        blossomdual[b] -= delta

            # Take action at the point where minimum delta occurred.
            if deltatype == 1:
                # No further improvement possible; optimum reached.
                break
            elif deltatype == 2:
                # Use the least-slack edge to continue the search.
                (v, w) = deltaedge
                assert label[inblossom[v]] == 1
                allowedge[(v, w)] = allowedge[(w, v)] = True
                queue.append(v)
            elif deltatype == 3:
                # Use the least-slack edge to continue the search.
                (v, w) = deltaedge
                allowedge[(v, w)] = allowedge[(w, v)] = True
                assert label[inblossom[v]] == 1
                queue.append(v)
            elif deltatype == 4:
                # Expand the least-z blossom.
                expandBlossom(deltablossom, False)

            # End of a this substage.

        # Paranoia check that the matching is symmetric.
        for v in mate:
            assert mate[mate[v]] == v

        # Stop when no more augmenting path can be found.
        if not augmented:
            break

        # End of a stage; expand all S-blossoms which have zero dual.
        for b in list(blossomdual.keys()):
            if b not in blossomdual:
                continue  # already expanded
            if blossomparent[b] is None and label.get(
                    b) == 1 and blossomdual[b] == 0:
                expandBlossom(b, True)

    # Verify that we reached the optimum solution (only for integer weights).
    if allinteger:
        verifyOptimum()

    return matching_dict_to_set(mate)
 def f(dtype):
     data = list(itertools.repeat((datetime(2001, 1, 1), "aa", 20), 9))
     return DataFrame(data=data, columns=["A", "B", "C"], dtype=dtype)
Example #36
0
def use_pango_font(font, start, count, will_call_prepost=False):
    import gi
    gi.require_version('Pango', '1.0')
    gi.require_version('PangoCairo', '1.0')
    from gi.repository import Pango
    from gi.repository import PangoCairo
    #from gi.repository import Cairo as cairo
    import cairo

    fontDesc = Pango.FontDescription(font)
    a = array.array('b', itertools.repeat(0, 256 * 256))
    surface = cairo.ImageSurface.create_for_data(a, cairo.FORMAT_A8, 256, 256)
    context = cairo.Context(surface)
    pango_context = PangoCairo.create_context(context)
    layout = PangoCairo.create_layout(context)
    fontmap = PangoCairo.font_map_get_default()
    font = fontmap.load_font(fontmap.create_context(), fontDesc)
    layout.set_font_description(fontDesc)
    metrics = font.get_metrics()
    descent = metrics.get_descent()
    d = descent / Pango.SCALE
    linespace = metrics.get_ascent() + metrics.get_descent()
    width = metrics.get_approximate_char_width()

    GL.glPushClientAttrib(GL.GL_CLIENT_PIXEL_STORE_BIT)
    GL.glPixelStorei(GL.GL_UNPACK_SWAP_BYTES, 0)
    GL.glPixelStorei(GL.GL_UNPACK_LSB_FIRST, 1)
    GL.glPixelStorei(GL.GL_UNPACK_ROW_LENGTH, 256)
    GL.glPixelStorei(GL.GL_UNPACK_IMAGE_HEIGHT, 256)
    GL.glPixelStorei(GL.GL_UNPACK_SKIP_PIXELS, 0)
    GL.glPixelStorei(GL.GL_UNPACK_SKIP_ROWS, 0)
    GL.glPixelStorei(GL.GL_UNPACK_SKIP_IMAGES, 0)
    GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
    GL.glPixelZoom(1, -1)

    base = GL.glGenLists(count)
    for i in range(count):
        ch = chr(start + i)
        layout.set_text(ch, -1)
        w, h = layout.get_size()
        context.save()
        context.new_path()
        context.rectangle(0, 0, 256, 256)
        context.set_source_rgba(0., 0., 0., 0.)
        context.set_operator(cairo.OPERATOR_SOURCE)
        context.paint()
        context.restore()

        context.save()
        context.set_source_rgba(1., 1., 1., 1.)
        context.set_operator(cairo.OPERATOR_SOURCE)
        context.move_to(0, 0)
        PangoCairo.update_context(context, pango_context)
        PangoCairo.show_layout(context, layout)
        context.restore()
        w, h = int(w / Pango.SCALE), int(h / Pango.SCALE)
        GL.glNewList(base + i, GL.GL_COMPILE)
        GL.glBitmap(0, 0, 0, 0, 0, h - d, ''.encode())
        #glDrawPixels(0, 0, 0, 0, 0, h-d, '');
        if not will_call_prepost:
            pango_font_pre()
        if w and h:
            try:
                pass
                GL.glDrawPixels(w, h, GL.GL_LUMINANCE, GL.GL_UNSIGNED_BYTE,
                                a.tobytes())
            except Exception as e:
                print("glnav Exception ", e)

        GL.glBitmap(0, 0, 0, 0, w, -h + d, ''.encode())
        if not will_call_prepost:
            pango_font_post()
        GL.glEndList()

    GL.glPopClientAttrib()
    return base, int(width / Pango.SCALE), int(linespace / Pango.SCALE)
Example #37
0
 def parse(x):
     if isinstance(x, collections.abc.Iterable):
         return x
     return tuple(repeat(x, n))
def get_TRMM_GPM(d_source, coord, time_string, TRMM=False, GPM=False):
    """
    function to get TRMM and GPM data given data source and time strings
    input of the function:
    d_source:   a list of data source file  in format list of string
    time_string: a list of time strings in format YYYYMMDD
    output of the function:
    a list containing time string and data value e.g. [("20100101",3),("20100102",5),..]

    BY DEFAULT, I assume all data are in *nc or *nc4 format
    """
    # dictionary to store output data. dict{"coord1":[(time1,val),(time2,val),...]
    #                                       "coord2":[(time1,val1),(time2,val2),...]
    #                                       "coord3":[(time1,val2),(time2,val3),...]
    #                                       ,...}

    # initialize the dictionary, coordinate as keys
    # final=dict.fromkeys(coord,[])
    # #NOTE: dict created by this method using the same object for all values,
    # so when you update one, you update all

    # final return is a dict
    coord_str = convert_to_strkey(coord)
    final = {c: {"TRMM_GPM": []} for c in coord_str}

    # simple solution, given time string, search and read in data of all coord location
    # TODO better solutions? I think I can do parallel for this part
    for i, ts in enumerate(
            time_string):  # it has to do a search in list every time
        logger.info("We are dealing with time %s" % ts)
        filename = [ds for ds in d_source if ts in ds][0]
        filename = os.path.join(os.path.expanduser("~"),
                                filename.strip("./ \n"))
        logger.info("We are working on file %s" % filename)

        # here filename should be a .nc or .nc4

        # TODO:
        # concatenate all daily precipitation into yearly file can speedup file reading
        try:
            nc = netCDF4.Dataset(filename, "r")
        except Exception as e:
            logger.info(e)
            continue

        if GPM:
            if i == 0:
                GPM_grid = [nc.variables["lon"][:], nc.variables["lat"][:]]
                # coord_ind=(numpy.searchsorted(GPM_grid[0],list(zip(*coord))[1],side="left"),
                # numpy.searchsorted(GPM_grid[1],list(zip(*coord))[0],side="left"))
                coord_ind_lon = [
                    numpy.argmin(abs(GPM_grid[0] - cc[1])) for cc in coord
                ]
                coord_ind_lat = [
                    numpy.argmin(abs(GPM_grid[1] - cc[0])) for cc in coord
                ]
                coord_ind = (coord_ind_lon, coord_ind_lat)

                precipitation = nc.variables["precipitationCal"][:]
                # read in data, data will be masked array
            else:
                precipitation = nc.variables["precipitationCal"][:]
        elif TRMM:
            if i == 0:
                TRMM_grid = [nc.variables["lon"][:], nc.variables["lat"][:]]
                # coord_ind=(numpy.searchsorted(TRMM_grid[0],list(zip(*coord))[1],side="left"),
                # numpy.searchsorted(TRMM_grid[1],list(zip(*coord))[0],side="left"))
                coord_ind_lon = [
                    numpy.argmin(abs(TRMM_grid[0] - cc[1])) for cc in coord
                ]
                coord_ind_lat = [
                    numpy.argmin(abs(TRMM_grid[1] - cc[0])) for cc in coord
                ]
                coord_ind = (coord_ind_lon, coord_ind_lat)

                precipitation = nc.variables["precipitation"][:]
            else:
                precipitation = nc.variables["precipitation"][:]
        else:
            pass

        tmp = list(
            zip(
                itertools.repeat(ts, len(coord_ind[0])),
                precipitation[coord_ind].round(decimals=3),
            ))  # coordinate (lon,lat)

        # for i, key in enumerate(final.keys()):
        #   final[key].append(tmp[i])

        for i, key in enumerate(final.keys()):
            final[key]["TRMM_GPM"].append(tmp[i])
        # [item["TRMM_GPM"].append(tt) for item, tt in zip(final,tmp)]

        del (precipitation)
        del (tmp)
        nc.close()
        gc.collect()

    return final
Example #39
0
def repeat(plan, num=1, delay=None):
    """
    Repeat a plan num times with delay and checkpoint between each repeat.

    This is different from ``repeater`` and ``caching_repeater`` in that it
    adds ``checkpoint`` and optionally ``sleep`` messages if delay is provided.
    This is intended for users who need the structure of ``count`` but do not
    want to reimplement the control flow.

    Parameters
    ----------
    plan: callable
        Callable that returns an iterable of Msg objects
    num : integer, optional
        number of readings to take; default is 1

        If None, capture data until canceled
    delay : iterable or scalar, optional
        time delay between successive readings; default is 0

    Notes
    -----
    If ``delay`` is an iterable, it must have at least ``num - 1`` entries or
    the plan will raise a ``ValueError`` during iteration.
    """
    # Create finite or infinite counter
    if num is None:
        iterator = itertools.count()
    else:
        iterator = range(num)

    # If delay is a scalar, repeat it forever. If it is an iterable, leave it.
    if not isinstance(delay, Iterable):
        delay = itertools.repeat(delay)
    else:
        try:
            num_delays = len(delay)
        except TypeError:
            # No way to tell in advance if we have enough delays.
            pass
        else:
            if num - 1 > num_delays:
                raise ValueError("num=%r but delays only provides %r "
                                 "entries" % (num, num_delays))
        delay = iter(delay)

    def repeated_plan():
        for i in iterator:
            now = time.time()  # Intercept the flow in its earliest moment.
            yield Msg('checkpoint')
            yield from ensure_generator(plan())
            try:
                d = next(delay)
            except StopIteration:
                if i + 1 == num:
                    break
                elif num is None:
                    break
                else:
                    # num specifies a number of iterations less than delay
                    raise ValueError("num=%r but delays only provides %r "
                                     "entries" % (num, i))
            if d is not None:
                d = d - (time.time() - now)
                if d > 0:  # Sleep if and only if time is left to do it.
                    yield Msg('sleep', None, d)

    return (yield from repeated_plan())
Example #40
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
for e in itertools.repeat('a', 8):
    print e

odds = itertools.count(1, 2)
for o in itertools.takewhile(lambda x: x < 20, odds):
    print o

for c in itertools.chain(['abc', 'xyz'], [12, 34]):
    print c

for k, group in itertools.groupby('AaaBaabBCacA', lambda c: c.upper()):
    print k, group, list(group)

for x in itertools.imap(lambda x: x * x, [1, 2, 3, 4]):
    print x
Example #41
0
def getpset(pset_data: object = g_data):
    '''
    function for generating primitives for gp.PrimitiveTree
    :param pset_data: object, GAData, for renaming of args in accordance with training data features.
    :return: gp.PrimitiveSet, set of primitives to consider for future symbolic operations in GA.
    '''
    p_set = gp.PrimitiveSetTyped('MAIN',
                                 tuple(repeat(float, g_data.numfeatures)),
                                 float)

    # boolean ops
    p_set.addPrimitive(np.logical_and, (bool, bool), bool)
    p_set.addPrimitive(np.logical_or, (bool, bool), bool)
    p_set.addPrimitive(np.logical_not, (bool, ), bool)

    # logical ops
    # custom primitive
    @njit([
        float32(boolean, float32, float32),
        float64(boolean, float64, float64)
    ],
          nogil=True,
          parallel=True)
    def ifte(input: bool, out1: float, out2: float) -> float:
        if input:
            return out1
        else:
            return out2

    p_set.addPrimitive(np.less, (float, float), bool)
    p_set.addPrimitive(np.equal, (float, float), bool)
    p_set.addPrimitive(ifte, (float, float), bool)

    # flops
    # custom primitive
    @njit([float32(float32, float32),
           float64(float64, float64)],
          nogil=True,
          parallel=True)
    def safediv(l: float, r: float) -> float:
        '''
        zero protected division
        :param l: float, int
        :param r: float, int
        :return: float
        '''
        if r == 0.0:
            return 1
        return l / r

    p_set.addPrimitive(np.add, (float, float), float)
    p_set.addPrimitive(np.subtract, (float, float), float)
    p_set.addPrimitive(np.multiply, (float, float), float)
    p_set.addPrimitive(safediv, (float, float), float)
    p_set.addPrimitive(np.negative, (float, ), float)
    p_set.addPrimitive(np.tanh, (float, ), float)
    p_set.addPrimitive(np.cos, (float, ), float)
    p_set.addPrimitive(np.sin, (float, ), float)
    p_set.addPrimitive(np.maximum, (float, float), float)
    p_set.addPrimitive(np.minimum, (float, float), float)

    # terminals
    p_set.addEphemeralConstant(f'rand', lambda: np.random.uniform(-1, 1),
                               float)
    p_set.addTerminal(False, bool)
    p_set.addTerminal(True, bool)
    for cols in pset_data.c_args():
        p_set.renameArguments(**cols)
    return p_set
Example #42
0
import numpy as np
from itertools import repeat
import multiprocessing as mp


def asdf(one, a=None):
    if a is None:
        print('my dad never loved me')
    elif a == 1:
        print('I am redeemed!')
    return 4


pool = mp.Pool(4)
fd = np.random.random((12, 12, 3))
gs = np.array_split(fd, 4)
v = pool.map(asdf, gs)
print(v)
v = pool.starmap(asdf, zip(gs, repeat(1)))
print(v)




                                       '6.abstract': abstract}, index=[0])

                output = pd.concat([output, chapters], axis=1)

                output.to_excel('%s/%s_%s.xls' %
                                (output_path, bookname, author), 'Sheet1')
                print('提取成功:%s_%s' % (bookname, author))


#%%
if __name__ == '__main__':
    # get file path & output path from user input
    file = input('请输入目录文件路径: ')
    output_path = input('请输入输出路径: ')

    # Load spreadsheet
    xl = pd.ExcelFile(file)
    df = xl.parse('Sheet1')
    book_list = df['book_name'].drop_duplicates().str.strip().tolist()

    pool = Pool()
    failure_list = pool.starmap(
        parl_scraper, zip(book_list, repeat(output_path)))
    pool.close()
    pool.join()

    # keep track of books which have no matching search results
    with open("%s/失败目录.txt" % output_path, "w") as failure_file:
        failure_file.write("没有符合以下书名的查询结果:\n")
        failure_file.write("\n".join(filter(None, failure_list)))
Example #44
0
def intersperse(delimiter, seq):
    return itertools.islice(
        itertools.chain.from_iterable(
            zip(itertools.repeat(delimiter), seq)), 1, None)
Example #45
0
print(list(zip('abc', range(5), 'dsad')))

print(list(itertools.zip_longest('abd', range(5))))  #和zip相似按最长的对象为标准,不够的补None
print(list(itertools.zip_longest('abd', range(5), fillvalue='?')))  #指定不够长的补什么

print(list(itertools.product('abc', range(2))))  #生成一个笛卡尔积,输出两个元素拼接的最多可能元祖
print(list(itertools.product('abc')))  #传入一个无作用
print(list(itertools.product('abc', repeat=2)))  #重复几次输入各个可迭代对象

cy = itertools.cycle('abc')  #重复的产出各个元素
print(next(cy))
print(next(cy))
print(next(cy))
print(next(cy))

rp = itertools.repeat(7)  #重复的产出这个元素
print(next(rp))
print(next(rp))

print(list(itertools.repeat(7, 3)))  #指定产出多少个

print(list(itertools.combinations('abc', 2)))  #元素的几种组合,不包括相同元素
print(list(itertools.combinations_with_replacement('abc', 2)))  #包括相同元素

print(list(itertools.permutations('abc', 2)))  #元素的几种组合,不包括相同元素,可以有不同顺序的

print(list(itertools.groupby('LLLAAGG')))
for char, group in itertools.groupby('LLLAAGG'):
    print(char, '-->', list(group))

animals = ['aa', 'bb', 'a', 'b']
Example #46
0
 def hash_then_or(hash_name):
     # For now, all the decent hashes have 6-char names, so we can get
     # away with hard-coding space literals.
     return chain([hash_name], repeat('    or'))
Example #47
0
def diff(lhs_seq, rhs_seq, eq=None):
    '''
    Computes a diff of two sequences.

    Algorithm is based on Longest Common Subsequence problem.

    Returns a list of pairs. Each pair consists from either a value from the
    first sequence and None or from None and a value from the second sequence
    or values from both sequences.

    >>> diff(banana, ananas)
    [('b', None), ('a', 'a'), ('n', 'n'), ('a', 'a'), ('n', 'n'), ('a', 'a'), (None, 's')]
    '''
    if not eq:
        eq = lambda x, y: x == y

    result = list()
    l = 0
    l_e = len(lhs_seq) - 1
    r = 0
    r_e = len(rhs_seq) - 1
    # handle common prefix
    while l <= l_e and r <= r_e and eq(lhs_seq[l], rhs_seq[r]):
        result.append((lhs_seq[l], rhs_seq[r]))
        l += 1
        r += 1

    end_result = list()
    # handle common suffix
    while l <= l_e and r <= r_e and eq(lhs_seq[l_e], rhs_seq[r_e]):
        end_result.append((lhs_seq[l_e], rhs_seq[r_e]))
        l_e -= 1
        r_e -= 1

    matrix_row_len = (r_e - r) + 2
    # build matrix which has one more column and line than rhs x lhs
    m = list(itertools.repeat(0, ((l_e - l) + 2) * matrix_row_len))

    # skip first row because it contains only 0
    pos = matrix_row_len

    # in case where strings are the same l has value len(left) == l_e + 1
    i = l_e
    # in case where strings are the same r has value len(right) == r_e + 1
    j = r_e

    for i in xrange(l, l_e + 1):
        pos += 1  # skip first column which is always 0
        for j in xrange(r, r_e + 1):
            if eq(lhs_seq[i], rhs_seq[j]):
                res = m[pos - matrix_row_len - 1] + 1
            else:
                res = max(m[pos - matrix_row_len], m[pos - 1])
            m[pos] = res
            pos += 1

    pos -= 1  # current value is len(m)
    i += 1  # current value is last of xrange(l, l_e + 1)
    j += 1  # current value is last of xrange(r, r_e + 1)
    while i != l and j != r:
        if m[pos] == m[pos - 1]:
            pos -= 1
            j -= 1
            end_result.append((None, rhs_seq[j]))
        elif m[pos] == m[pos - matrix_row_len]:
            pos -= matrix_row_len
            i -= 1
            end_result.append((lhs_seq[i], None))
        else:
            pos -= matrix_row_len
            pos -= 1
            i -= 1
            j -= 1
            end_result.append((lhs_seq[i], rhs_seq[j]))

    while i != l:
        i -= 1
        end_result.append((lhs_seq[i], None))

    while j != r:
        j -= 1
        end_result.append((None, rhs_seq[j]))

    end_result.reverse()
    return result + end_result
Example #48
0
        def export_mtz(observed_hkls, experiment, filename):
            if experiment.goniometer:
                axis = experiment.goniometer.get_rotation_axis()
            else:
                axis = 0.0, 0.0, 0.0
            s0 = experiment.beam.get_s0()
            wavelength = experiment.beam.get_wavelength()

            from scitbx import matrix

            panel = experiment.detector[0]
            pixel_size = panel.get_pixel_size()
            cb_op_to_ref = (experiment.crystal.get_space_group().info().
                            change_of_basis_op_to_reference_setting())

            experiment.crystal = experiment.crystal.change_basis(cb_op_to_ref)

            from iotbx import mtz
            from scitbx.array_family import flex
            import itertools

            m = mtz.object()
            m.set_title("from dials.scratch.mg.strategy_i19")
            m.set_space_group_info(experiment.crystal.get_space_group().info())

            nrefcount = sum(observed_hkls.itervalues())
            nref = max(observed_hkls.itervalues())

            for batch in range(1, nref + 1):
                o = m.add_batch().set_num(batch).set_nbsetid(1).set_ncryst(1)
                o.set_time1(0.0).set_time2(0.0).set_title("Batch %d" % batch)
                o.set_ndet(1).set_theta(flex.float((0.0, 0.0))).set_lbmflg(0)
                o.set_alambd(wavelength).set_delamb(0.0).set_delcor(0.0)
                o.set_divhd(0.0).set_divvd(0.0)
                o.set_so(flex.float(s0)).set_source(flex.float((0, 0, -1)))
                o.set_bbfac(0.0).set_bscale(1.0)
                o.set_sdbfac(0.0).set_sdbscale(0.0).set_nbscal(0)
                _unit_cell = experiment.crystal.get_unit_cell()
                _U = experiment.crystal.get_U()

                o.set_cell(flex.float(_unit_cell.parameters()))
                o.set_lbcell(flex.int((-1, -1, -1, -1, -1, -1)))
                o.set_umat(flex.float(_U.transpose().elems))
                mosaic = experiment.crystal.get_mosaicity()
                o.set_crydat(
                    flex.float([
                        mosaic, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                        0.0, 0.0
                    ]))
                o.set_lcrflg(0)
                o.set_datum(flex.float((0.0, 0.0, 0.0)))

                # detector size, distance
                o.set_detlm(
                    flex.float([
                        0.0,
                        panel.get_image_size()[0],
                        0.0,
                        panel.get_image_size()[1],
                        0,
                        0,
                        0,
                        0,
                    ]))
                o.set_dx(flex.float([panel.get_directed_distance(), 0.0]))

                # goniometer axes and names, and scan axis number, and number of axes, missets
                o.set_e1(flex.float(axis))
                o.set_e2(flex.float((0.0, 0.0, 0.0)))
                o.set_e3(flex.float((0.0, 0.0, 0.0)))
                o.set_gonlab(flex.std_string(("AXIS", "", "")))
                o.set_jsaxs(1)
                o.set_ngonax(1)
                o.set_phixyz(flex.float((0.0, 0.0, 0.0, 0.0, 0.0, 0.0)))

                phi_start, phi_range = 0.0, 0.0
                o.set_phistt(phi_start)
                o.set_phirange(phi_range)
                o.set_phiend(phi_start + phi_range)
                o.set_scanax(flex.float(axis))

                # number of misorientation angles
                o.set_misflg(0)

                # crystal axis closest to rotation axis (why do I want this?)
                o.set_jumpax(0)

                # type of data - 1; 2D, 2; 3D, 3; Laue
                o.set_ldtype(2)

            # now create the actual data structures - first keep a track of the columns
            # H K L M/ISYM BATCH I SIGI IPR SIGIPR FRACTIONCALC XDET YDET ROT WIDTH
            # LP MPART FLAG BGPKRATIOS

            from cctbx.array_family import flex as cflex  # implicit import

            # now go for it and make an MTZ file...
            x = m.add_crystal("XTAL", "DIALS", unit_cell.parameters())
            d = x.add_dataset("FROMDIALS", wavelength)

            # now add column information...
            type_table = {
                "IPR": "J",
                "BGPKRATIOS": "R",
                "WIDTH": "R",
                "I": "J",
                "H": "H",
                "K": "H",
                "MPART": "I",
                "L": "H",
                "BATCH": "B",
                "M_ISYM": "Y",
                "SIGI": "Q",
                "FLAG": "I",
                "XDET": "R",
                "LP": "R",
                "YDET": "R",
                "SIGIPR": "Q",
                "FRACTIONCALC": "R",
                "ROT": "R",
            }

            m.adjust_column_array_sizes(nrefcount)
            m.set_n_reflections(nrefcount)

            # assign H, K, L, M_ISYM space
            for column in "H", "K", "L", "M_ISYM":
                d.add_column(column, type_table[column]).set_values(
                    flex.float(nrefcount, 0.0))

            batchnums = (_ for (x, n) in observed_hkls.iteritems()
                         for _ in range(1, n + 1))
            d.add_column("BATCH",
                         type_table["BATCH"]).set_values(flex.float(batchnums))
            d.add_column("FRACTIONCALC",
                         type_table["FRACTIONCALC"]).set_values(
                             flex.float(nrefcount, 3.0))

            m.replace_original_index_miller_indices(
                cb_op_to_ref.apply(
                    cflex.miller_index([
                        _ for (x, n) in observed_hkls.iteritems()
                        for _ in itertools.repeat(x, n)
                    ])))

            m.write(filename)

            return m
Example #49
0
 def assignRange(start, stop, value):
     items[start:stop] = repeat(value, stop - start)
Example #50
0
import itertools
ns = itertools.repeat('A', 3)
for n in ns:
    print(n)
Example #51
0
    def _estimate(self, iterable, partial_fit=False):
        indim = iterable.dimension()
        if not indim:
            raise ValueError("zero dimension from data source!")

        if not any(
                iterable.trajectory_lengths(stride=self.stride,
                                            skip=self.lag + self.skip) > 0):
            if partial_fit:
                self.logger.warn(
                    "Could not use data passed to partial_fit(), "
                    "because no single data set [longest=%i] is longer than lag+skip [%i]",
                    max(
                        iterable.trajectory_lengths(self.stride,
                                                    skip=self.skip)),
                    self.lag + self.skip)
                return self
            else:
                raise ValueError(
                    "None single dataset [longest=%i] is longer than"
                    " lag+skip [%i]." % (max(
                        iterable.trajectory_lengths(
                            self.stride,
                            skip=self.skip)), self.lag + self.skip))

        self.logger.debug(
            "will use %s total frames for %s",
            iterable.trajectory_lengths(self.stride, skip=self.skip),
            self.name)

        chunksize = 0 if partial_fit else iterable.chunksize
        it = iterable.iterator(lag=self.lag,
                               return_trajindex=False,
                               stride=self.stride,
                               skip=self.skip,
                               chunk=chunksize)
        # iterator over input weights
        if hasattr(self.weights, 'iterator'):
            if hasattr(self.weights, '_transform_array'):
                self.weights.data_producer = iterable
            it_weights = self.weights.iterator(lag=0,
                                               return_trajindex=False,
                                               stride=self.stride,
                                               skip=self.skip,
                                               chunk=chunksize)
            if it_weights.number_of_trajectories(
            ) != iterable.number_of_trajectories():
                raise ValueError(
                    "number of weight arrays did not match number of input data sets. {} vs. {}"
                    .format(it_weights.number_of_trajectories(),
                            iterable.number_of_trajectories()))
        else:
            # if we only have a scalar, repeat it.
            import itertools
            it_weights = itertools.repeat(self.weights)

        # TODO: we could possibly optimize the case lag>0 and c0t=False using skip.
        # Access how much iterator hassle this would be.
        #self.skipped=0
        pg = ProgressReporter()
        pg.register(it.n_chunks, 'calculate covariances', stage=0)
        with it, pg.context(stage=0):
            self._init_covar(partial_fit, it.n_chunks)
            for data, weight in zip(it, it_weights):
                if self.lag != 0:
                    X, Y = data
                else:
                    X, Y = data, None

                if weight is not None:
                    if isinstance(weight, np.ndarray):
                        weight = weight.squeeze()[:len(X)]
                        # TODO: if the weight is exactly zero it makes not sense to add the chunk to running moments.
                        # however doing so, leads to wrong results...
                        # if np.all(np.abs(weight) < np.finfo(np.float).eps):
                        #     #print("skip")
                        #     self.skipped += len(X)
                        #     continue
                if self.remove_constant_mean is not None:
                    X = X - self.remove_constant_mean[np.newaxis, :]
                    if Y is not None:
                        Y = Y - self.remove_constant_mean[np.newaxis, :]

                try:
                    self._rc.add(X, Y, weights=weight)
                except MemoryError:
                    raise MemoryError(
                        'Covariance matrix does not fit into memory. '
                        'Input is too high-dimensional ({} dimensions). '.
                        format(X.shape[1]))
                pg.update(1, stage=0)

        if partial_fit:
            if '_rc' not in self.__serialize_fields:
                self.__serialize_fields.append('_rc')
        else:
            if '_rc' in self.__serialize_fields:
                self.__serialize_fields.remove('_rc')
        return self
Example #52
0
def process(job):
    ''''''
    job_id, (f, k) = job

    # get options and parameters
    c = cfg.sound_speed
    rho = cfg.fluid_rho
    array = abstract.load(cfg.array_config)
    refn = cfg.mesh_refn

    # create finite element matrix
    Gfe, _ = fem.mbk_from_abstract(array, f, refn)

    # create boundary element matrix
    hmkwrds = [
        'aprx', 'basis', 'admis', 'eta', 'eps', 'm', 'clf', 'eps_aca', 'rk',
        'q_reg', 'q_sing', 'strict'
    ]
    hmargs = {k: getattr(cfg, k) for k in hmkwrds}
    Z = bem.z_from_abstract(array, k, refn, format='HFormat', **hmargs)
    omg = 2 * np.pi * f
    Gbe = -omg**2 * 2 * rho * Z

    # define total linear system and find its LU decomposition
    G = MbkSparseMatrix(Gfe) + Gbe
    Glu = G.lu()

    # create patch pressure loads
    F = fem.f_from_abstract(array, refn)
    AVG = fem.avg_from_abstract(array, refn)
    mesh = Mesh.from_abstract(array, refn)
    ob = mesh.on_boundary

    ## TEST
    # Fall = fem.mem_f_vector(mesh, 1)

    # solve for each source patch
    npatch = abstract.get_patch_count(array)
    source_patch = np.arange(npatch)
    dest_patch = np.arange(npatch)
    patches = abstract.get_patches_from_array(array)
    patch_areas = np.array([p.area for p in patches])

    for sid in source_patch:
        # get RHS
        b = np.array(F[:, sid].todense())
        # b = Fall

        # solve
        start = timer()
        # conjugate so phase is consistent with -iwt convention used by h2lib
        x = np.conj(Glu.lusolve(b))
        time_solve = timer() - start
        x[ob] = 0

        # average displacement over patches
        # area = patches[sid].length_x * patches[sid].length_y
        # x_patch = (F.T).dot(x) / patches[sid].area
        # x_patch = (F.T).dot(x) / patch_areas
        x_patch = (AVG.T).dot(x) / patch_areas

        # write patch displacement results to frequency response database
        data = {}
        data['frequency'] = repeat(f)
        data['wavenumber'] = repeat(k)
        data['source_patch'] = repeat(sid)
        data['dest_patch'] = dest_patch
        data['displacement_real'] = np.real(x_patch)
        data['displacement_imag'] = np.imag(x_patch)
        data['time_solve'] = repeat(time_solve)

        with write_lock:
            frequency_response.update_displacements(file, **data)

        # write node displacement results to frequency response database
        data = {}
        data['x'] = mesh.vertices[:, 0]
        data['y'] = mesh.vertices[:, 1]
        data['z'] = mesh.vertices[:, 2]
        data['frequency'] = repeat(f)
        data['wavenumber'] = repeat(k)
        data['source_patch'] = repeat(sid)
        data['displacement_real'] = np.real(x)
        data['displacement_imag'] = np.imag(x)

        with write_lock:
            frequency_response.update_node_displacements(file, **data)

    with write_lock:
        util.update_progress(file, job_id)
def _WaitJob(apitools_client,
             messages_module,
             job_reference,
             progress_reporter,
             status='DONE',
             wait=sys.maxint):
    """Poll for a job to run until it reaches the requested status.

  Arguments:
    apitools_client: the client to be used for polling
    messages_module: The module defining messages used in apitools calls.
    job_reference: JobReference to poll.
    progress_reporter: a job_progress.ProgressReporter
      that will be called after each job poll.
    status: (optional, default 'DONE') Desired job status.
    wait: (optional, default maxint) Max wait time.

  Returns:
    The job object returned by the final status call.

  Raises:
    StopIteration: If polling does not reach the desired state before
      timing out.
    ValueError: If given an invalid wait value.
  """
    start_time = time.time()
    job = None

    # This is a first pass at wait logic: we ping at 1s intervals a few
    # times, then increase to max(3, max_wait), and then keep waiting
    # that long until we've run out of time.
    waits = itertools.chain(itertools.repeat(1, 8), xrange(2, 30, 3),
                            itertools.repeat(30))
    current_wait = 0
    current_status = 'UNKNOWN'
    while current_wait <= wait:
        try:
            done, job = _PollJob(apitools_client,
                                 messages_module,
                                 job_reference,
                                 status=status,
                                 wait=wait)
            current_status = job.status.state
            if done:
                progress_reporter.Print(job_reference.jobId, current_wait,
                                        current_status)
                break
        except bigquery.CommunicationError as e:
            # Communication errors while waiting on a job are okay.
            logging.warning('Transient error during job status check: %s', e)
        except bigquery.BackendError as e:
            # Temporary server errors while waiting on a job are okay.
            logging.warning('Transient error during job status check: %s', e)
        for _ in xrange(waits.next()):
            current_wait = time.time() - start_time
            progress_reporter.Print(job_reference.jobId, current_wait,
                                    current_status)
            time.sleep(1)
    else:
        raise StopIteration(
            'Wait timed out. Operation not finished, in state {0}'.format(
                current_status))
    progress_reporter.Done()
    return job
Example #54
0
def windowed_mutations(contigs, w):
    '''Return array [[window_length, num_mutations], ...] for each contig'''
    with ThreadPoolExecutor() as p:
        return list(
            map(_smcpp._windowed_mutations_helper, contigs,
                itertools.repeat(w)))
    def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
                 cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
        self.img_size = img_size
        self.augment = augment
        self.hyp = hyp
        self.image_weights = image_weights
        self.rect = False if image_weights else rect
        # load 4 images at a time into a mosaic (only during training)
        self.mosaic = self.augment and not self.rect
        self.mosaic_border = [-img_size // 2, -img_size // 2]
        self.stride = stride
        self.path = path

        try:
            f = []  # image files
            for p in path if isinstance(path, list) else [path]:
                p = Path(p)  # os-agnostic
                if p.is_dir():  # dir
                    f += glob.glob(str(p / '**' / '*.*'), recursive=True)
                    # f = list(p.rglob('**/*.*'))  # pathlib
                elif p.is_file():  # file
                    with open(p, 'r') as t:
                        t = t.read().strip().splitlines()
                        parent = str(p.parent) + os.sep
                        # local to global path
                        f += [x.replace('./', parent)
                              if x.startswith('./') else x for x in t]
                        # f += [p.parent / x.lstrip(os.sep) for x in t]  # local to global path (pathlib)
                else:
                    raise Exception(f'{prefix}{p} does not exist')
            self.img_files = sorted(
                [x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
            # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats])  # pathlib
            assert self.img_files, f'{prefix}No images found'
        except Exception as e:
            raise Exception(
                f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')

        # Check cache
        self.label_files = img2label_paths(self.img_files)  # labels
        cache_path = (p if p.is_file() else Path(
            self.label_files[0]).parent).with_suffix('.cache')  # cached labels
        if cache_path.is_file():
            cache, exists = torch.load(cache_path), True  # load
            # changed
            if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache:
                cache, exists = self.cache_labels(
                    cache_path, prefix), False  # re-cache
        else:
            cache, exists = self.cache_labels(
                cache_path, prefix), False  # cache

        # Display cache
        # found, missing, empty, corrupted, total
        nf, nm, ne, nc, n = cache.pop('results')
        if exists:
            d = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
            # display cache results
            tqdm(None, desc=prefix + d, total=n, initial=n)
        assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'

        # Read cache
        cache.pop('hash')  # remove hash
        cache.pop('version')  # remove version
        labels, shapes, self.segments = zip(*cache.values())
        self.labels = list(labels)
        self.shapes = np.array(shapes, dtype=np.float64)
        self.img_files = list(cache.keys())  # update
        self.label_files = img2label_paths(cache.keys())  # update
        if single_cls:
            for x in self.labels:
                x[:, 0] = 0

        n = len(shapes)  # number of images
        bi = np.floor(np.arange(n) / batch_size).astype(np.int)  # batch index
        nb = bi[-1] + 1  # number of batches
        self.batch = bi  # batch index of image
        self.n = n
        self.indices = range(n)

        # Rectangular Training
        if self.rect:
            # Sort by aspect ratio
            s = self.shapes  # wh
            ar = s[:, 1] / s[:, 0]  # aspect ratio
            irect = ar.argsort()
            self.img_files = [self.img_files[i] for i in irect]
            self.label_files = [self.label_files[i] for i in irect]
            self.labels = [self.labels[i] for i in irect]
            self.shapes = s[irect]  # wh
            ar = ar[irect]

            # Set training image shapes
            shapes = [[1, 1]] * nb
            for i in range(nb):
                ari = ar[bi == i]
                mini, maxi = ari.min(), ari.max()
                if maxi < 1:
                    shapes[i] = [maxi, 1]
                elif mini > 1:
                    shapes[i] = [1, 1 / mini]

            self.batch_shapes = np.ceil(
                np.array(shapes) * img_size / stride + pad).astype(np.int) * stride

        # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
        self.imgs = [None] * n
        if cache_images:
            gb = 0  # Gigabytes of cached images
            self.img_hw0, self.img_hw = [None] * n, [None] * n
            results = ThreadPool(8).imap(lambda x: load_image(
                *x), zip(repeat(self), range(n)))  # 8 threads
            pbar = tqdm(enumerate(results), total=n)
            for i, x in pbar:
                # img, hw_original, hw_resized = load_image(self, i)
                self.imgs[i], self.img_hw0[i], self.img_hw[i] = x
                gb += self.imgs[i].nbytes
                pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
Example #56
0
    def __init__(self,
                 in_channels: int,
                 num_filters: int,
                 filter_length: int,
                 subsample_length: int,
                 groups: int = 1,
                 dilation: int = 1,
                 dropouts: Union[float, Sequence[float]] = 0,
                 **config) -> NoReturn:
        """ finished, NOT checked,

        Parameters:
        -----------
        in_channels: int,
            number of features (channels) of the input
        num_filters: int,
            number of filters for the convolutional layers
        filter_length: int,
            length (size) of the filter kernels
        subsample_lengths: int,
            subsample length,
            including pool size for short cut, and stride for the top convolutional layer
        groups: int, default 1,
            pattern of connections between inputs and outputs,
            for more details, ref. `nn.Conv1d`
        dilation: int, default 1,
            dilation of the convolutional layers
        dropouts: float, or sequence of float, default 0.0,
            dropout ratio after each convolution (and batch normalization, and activation, etc.)
        config: dict,
            other hyper-parameters, including
            filter length (kernel size), activation choices, weight initializer,
            and short cut patterns, etc.
        """
        super().__init__()
        self.__num_convs = 2
        self.__in_channels = in_channels
        self.__out_channels = num_filters
        self.__kernel_size = filter_length
        self.__down_scale = subsample_length
        self.__stride = subsample_length
        self.__groups = groups
        self.__dilation = dilation
        if isinstance(dropouts, float):
            self.__dropouts = list(repeat(dropouts, self.__num_convs))
        else:
            self.__dropouts = list(dropouts)
        assert len(self.__dropouts) == self.__num_convs
        self.config = ED(deepcopy(config))
        if self.__DEBUG__:
            print(
                f"configuration of {self.__name__} is as follows\n{dict_to_str(self.config)}"
            )

        self.__increase_channels = (self.__out_channels > self.__in_channels)
        self.shortcut = self._make_shortcut_layer()

        self.main_stream = nn.Sequential()
        conv_in_channels = self.__in_channels
        for i in range(self.__num_convs):
            conv_activation = (self.config.activation
                               if i < self.__num_convs - 1 else None)
            self.main_stream.add_module(
                f"cba_{i}",
                Conv_Bn_Activation(
                    in_channels=conv_in_channels,
                    out_channels=self.__out_channels,
                    kernel_size=self.__kernel_size,
                    stride=(self.__stride if i == 0 else 1),
                    dilation=self.__dilation,
                    groups=self.__groups,
                    batch_norm=True,
                    activation=conv_activation,
                    kw_activation=self.config.kw_activation,
                    kernel_initializer=self.config.kernel_initializer,
                    kw_initializer=self.config.kw_initializer,
                    bias=self.config.bias,
                ))
            conv_in_channels = self.__out_channels
            if i == 0 and self.__dropouts[i] > 0:
                self.main_stream.add_module(f"dropout_{i}",
                                            nn.Dropout(self.__dropouts[i]))
            if i == 1:
                self.main_stream.add_module(
                    f"gcb",
                    GlobalContextBlock(
                        in_channels=self.__out_channels,
                        ratio=self.config.gcb.ratio,
                        reduction=self.config.gcb.reduction,
                        pooling_type=self.config.gcb.pooling_type,
                        fusion_types=self.config.gcb.fusion_types,
                    ))

        if isinstance(self.config.activation, str):
            self.out_activation = \
                Activations[self.config.activation.lower()](**self.config.kw_activation)
        else:
            self.out_activation = \
                self.config.activation(**self.config.kw_activation)

        if self.__dropouts[1] > 0:
            self.out_dropout = nn.Dropout(self.__dropouts[1])
        else:
            self.out_dropout = None
Example #57
0
class LaxVmapTest(jtu.JaxTestCase):

  def _CheckBatching(self, op, bdim_size, bdims, shapes, dtypes, rng,
                     rtol=None, atol=None):
    batched_shapes = map(partial(add_bdim, bdim_size), bdims, shapes)
    args = [rng(shape, dtype) for shape, dtype in zip(batched_shapes, dtypes)]
    args_slice = args_slicer(args, bdims)
    ans = api.vmap(op, bdims)(*args)
    if bdim_size == 0:
      args = [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]
      out = op(*args)
      expected = np.zeros((0,) + out.shape, out.dtype)
    else:
      expected = np.stack([op(*args_slice(i)) for i in range(bdim_size)])
    self.assertAllClose(ans, expected, rtol=rtol, atol=atol)

  @parameterized.named_parameters(itertools.chain.from_iterable(
      jtu.cases_from_list(
        {"testcase_name": "{}_bdims={}".format(
            jtu.format_test_name_suffix(rec.op, shapes,
                                        itertools.repeat(dtype)), bdims),
         "op_name": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes,
         "dtype": dtype, "bdims": bdims, "tol": rec.tol}
        for shape_group in compatible_shapes
        for shapes in itertools.combinations_with_replacement(shape_group, rec.nargs)
        for bdims in all_bdims(*shapes)
        for dtype in rec.dtypes)
      for rec in LAX_OPS))
  def testOp(self, op_name, rng_factory, shapes, dtype, bdims, tol):
    rng = rng_factory(self.rng())
    op = getattr(lax, op_name)
    self._CheckBatching(op, 10, bdims, shapes, [dtype] * len(shapes), rng,
                        atol=tol, rtol=tol)

  @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
       "testcase_name":
       "_lhs_shape={}_rhs_shape={}_strides={}_padding={}_lhs_dilation={}_"
       "rhs_dilation={}_dims={}_feature_group_count={}_batch_group_count={}"
       "_lhs_bdim={}_rhs_bdim={}"
       .format(jtu.format_shape_dtype_string(lhs_shape, dtype),
               jtu.format_shape_dtype_string(rhs_shape, dtype),
               strides, padding, lhs_dil, rhs_dil, ",".join(dim_nums),
               feature_group_count, batch_group_count, lhs_bdim, rhs_bdim),
       "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
       "strides": strides, "padding": padding, "lhs_dil": lhs_dil,
       "rhs_dil": rhs_dil, "dimension_numbers": dim_nums,
       "perms": perms, "lhs_bdim": lhs_bdim, "rhs_bdim": rhs_bdim,
       "feature_group_count": feature_group_count,
       "batch_group_count": batch_group_count,
     } for batch_group_count, feature_group_count in s([(1, 1), (2, 1), (1, 2)])
       for lhs_shape, rhs_shape, all_strides, all_pads, lhs_dils, rhs_dils in s([
           ((b * batch_group_count, i * feature_group_count, 6, 7),  # lhs_shape
            (j * batch_group_count * feature_group_count, i, 1, 2),  # rhs_shape
            [(1, 1), (1, 2), (2, 1)],  # strides
            [((0, 0), (0, 0)), ((1, 0), (0, 1)), ((0, -1), (0, 0))],  # pads
            [(1, 1), (2, 1)],  # lhs_dils
            [(1, 1), (2, 2)])  # rhs_dils
           for b, i, j in itertools.product([1, 2], repeat=3)])
       for strides in s(all_strides)
       for rhs_dil in s(rhs_dils)
       for lhs_dil in s(lhs_dils)
       for dtype in s([np.float32])
       for padding in s(all_pads)
       for dim_nums, perms in s([
           (("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])),
           (("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])),
           (("NHWC", "OIHW", "NCHW"), ([0, 2, 3, 1], [0, 1, 2, 3]))])
       for lhs_bdim in s(itertools.chain([cast(Optional[int], None)],
                                         range(len(lhs_shape) + 1)))
       for rhs_bdim in s(itertools.chain([cast(Optional[int], None)],
                                         range(len(rhs_shape) + 1)))
       if (lhs_bdim, rhs_bdim) != (None, None)
       )))
  def testConvGeneralDilatedBatching(
      self, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dil, rhs_dil,
      dimension_numbers, perms, feature_group_count, batch_group_count,
      lhs_bdim, rhs_bdim):
    rng = jtu.rand_default(self.rng())
    tol = 1e-1 if dtypes.finfo(dtype).bits <= 32 else 1e-3

    # permute shapes to match dim_spec, scale by feature_group_count
    lhs_perm, rhs_perm = perms
    lhs_shape = list(np.take(lhs_shape, lhs_perm))
    rhs_shape = list(np.take(rhs_shape, rhs_perm))

    conv = partial(lax.conv_general_dilated, window_strides=strides,
                   padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil,
                   dimension_numbers=dimension_numbers,
                   feature_group_count=feature_group_count,
                   batch_group_count=batch_group_count,
                   precision=lax.Precision.HIGHEST)
    self._CheckBatching(conv, 5, (lhs_bdim, rhs_bdim), (lhs_shape, rhs_shape),
                        (dtype, dtype), rng, rtol=tol, atol=tol)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_shape={}_from_dtype={}_to_dtype={}_bdims={}".format(
          shape, from_dtype, to_dtype, bdims),
       "shape": shape, "from_dtype": from_dtype, "to_dtype": to_dtype,
       "bdims": bdims}
      for from_dtype, to_dtype in itertools.product(
          [np.float32, np.int32, "float32", "int32"], repeat=2)
      for shape in [(2, 3)]
      for bdims in all_bdims(shape)))
  def testConvertElementType(self, shape, from_dtype, to_dtype, bdims):
    rng = jtu.rand_default(self.rng())
    op = lambda x: lax.convert_element_type(x, to_dtype)
    self._CheckBatching(op, 10, bdims, (shape,), (from_dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_{}_nmant={}_nexp={}_bdims={}".format(
          jtu.format_shape_dtype_string(shape, dtype), nmant, nexp, bdims),
       "shape": shape, "dtype": dtype, "nmant": nmant, "nexp": nexp, "bdims": bdims}
      for dtype in float_dtypes
      for shape in [(2, 4)]
      for nexp in [1, 3, 5]
      for nmant in [0, 2, 4]
      for bdims in all_bdims(shape)))
  def testReducePrecision(self, shape, dtype, nmant, nexp, bdims):
    rng = jtu.rand_default(self.rng())
    op = lambda x: lax.reduce_precision(x, exponent_bits=nexp, mantissa_bits=nmant)
    self._CheckBatching(op, 10, bdims, (shape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_shape={}_from_dtype={}_to_dtype={}_bdims={}".format(
          shape, from_dtype, to_dtype, bdims),
       "shape": shape, "from_dtype": from_dtype, "to_dtype": to_dtype,
       "bdims": bdims}
      for from_dtype, to_dtype in itertools.product(
          [np.float32, np.int32, "float32", "int32"], repeat=2)
      for shape in [(2, 3)]
      for bdims in all_bdims(shape)))
  def testBitcastElementType(self, shape, from_dtype, to_dtype, bdims,):
    rng = jtu.rand_default(self.rng())
    op = lambda x: lax.bitcast_convert_type(x, to_dtype)
    self._CheckBatching(op, 10, bdims, (shape,), (from_dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_min_shape={}_operand_shape={}_max_shape={}_bdims={}"
       .format(jtu.format_shape_dtype_string(min_shape, dtype),
               jtu.format_shape_dtype_string(operand_shape, dtype),
               jtu.format_shape_dtype_string(max_shape, dtype),
               bdims),
       "min_shape": min_shape, "operand_shape": operand_shape,
       "max_shape": max_shape, "dtype": dtype, "bdims": bdims}
      for min_shape, operand_shape, max_shape in [
          [(), (2, 3), ()],
          [(2, 3), (2, 3), ()],
          [(), (2, 3), (2, 3)],
          [(2, 3), (2, 3), (2, 3)],
      ]
      for dtype in default_dtypes
      for bdims in all_bdims(min_shape, operand_shape, max_shape)))
  def testClamp(self, min_shape, operand_shape, max_shape, dtype, bdims):
    rng = jtu.rand_default(self.rng())
    shapes = [min_shape, operand_shape, max_shape]
    self._CheckBatching(lax.clamp, 10, bdims, shapes, [dtype] * 3, rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_lhs_shape={}_rhs_shape={}_bdims={}".format(
          jtu.format_shape_dtype_string(lhs_shape, dtype),
          jtu.format_shape_dtype_string(rhs_shape, dtype),
          bdims),
       "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
       "bdims": bdims}
      for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]
      for bdims in all_bdims(lhs_shape, rhs_shape)
      for dtype in default_dtypes))
  def testDot(self, lhs_shape, rhs_shape, dtype, bdims):
    rng = jtu.rand_default(self.rng())
    op = partial(lax.dot, precision=lax.Precision.HIGHEST)
    self._CheckBatching(op, 5, bdims, (lhs_shape, rhs_shape), (dtype, dtype),
                        rng, rtol={np.float16: 5e-2, np.float64: 5e-14})

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name":
       "_lhs_shape={}_rhs_shape={}_lhs_contracting={}_rhs_contracting={}_bdims={}"
       .format(jtu.format_shape_dtype_string(lhs_shape, dtype),
               jtu.format_shape_dtype_string(rhs_shape, dtype),
               lhs_contracting, rhs_contracting, bdims),
       "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
       "lhs_contracting": lhs_contracting, "rhs_contracting": rhs_contracting,
       "bdims": bdims}
      for lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [
          [(5,), (5,), [0], [0]],
          [(5, 7), (5,), [0], [0]],
          [(7, 5), (5,), [1], [0]],
          [(3, 5), (2, 5), [1], [1]],
          [(5, 3), (5, 2), [0], [0]],
          [(5, 3, 2), (5, 2, 4), [0], [0]],
          [(5, 3, 2), (5, 2, 4), [0,2], [0,1]],
          [(5, 3, 2), (3, 5, 2, 4), [0,2], [1,2]],
          [(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]],
          [(3, 2), (2, 4), [1], [0]],
      ]
      for bdims in all_bdims(lhs_shape, rhs_shape)
      for dtype in default_dtypes))
  def testDotGeneralContractOnly(self, lhs_shape, rhs_shape, dtype,
                                 lhs_contracting, rhs_contracting, bdims):
    rng = jtu.rand_small(self.rng())
    dimension_numbers = ((lhs_contracting, rhs_contracting), ([], []))
    dot = partial(lax.dot_general, dimension_numbers=dimension_numbers)
    self._CheckBatching(dot, 5, bdims, (lhs_shape, rhs_shape), (dtype, dtype),
                        rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name":
       "_lhs_shape={}_rhs_shape={}_dimension_numbers={}_bdims={}"
       .format(jtu.format_shape_dtype_string(lhs_shape, dtype),
               jtu.format_shape_dtype_string(rhs_shape, dtype),
               dimension_numbers, bdims),
       "lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
       "dimension_numbers": dimension_numbers, "bdims": bdims}
      for lhs_shape, rhs_shape, dimension_numbers in [
          ((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),
          ((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1]))),
          ((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),
      ]
      for bdims in all_bdims(lhs_shape, rhs_shape)
      for dtype in default_dtypes))
  def testDotGeneralContractAndBatch(self, lhs_shape, rhs_shape, dtype,
                                     dimension_numbers, bdims):
    rng = jtu.rand_small(self.rng())
    dot = partial(lax.dot_general, dimension_numbers=dimension_numbers)
    self._CheckBatching(dot, 5, bdims, (lhs_shape, rhs_shape), (dtype, dtype),
                        rng)

    # Checks that batching didn't introduce any transposes or broadcasts.
    jaxpr = api.make_jaxpr(dot)(np.zeros(lhs_shape, dtype),
                                np.zeros(rhs_shape, dtype))
    for eqn in jtu.iter_eqns(jaxpr.jaxpr):
      self.assertFalse(eqn.primitive in ["transpose", "broadcast"])

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_shape={}_dtype={}_broadcast_sizes={}_bdims={}".format(
          shape, np.dtype(dtype).name, broadcast_sizes, bdims),
       "shape": shape, "dtype": dtype, "broadcast_sizes": broadcast_sizes,
       "bdims": bdims}
      for shape in [(), (2, 3)]
      for dtype in default_dtypes
      for broadcast_sizes in [(), (2,), (1, 2)]
      for bdims in all_bdims(shape)))
  def testBroadcast(self, shape, dtype, broadcast_sizes, bdims):
    rng = jtu.rand_default(self.rng())
    op = lambda x: lax.broadcast(x, broadcast_sizes)
    self._CheckBatching(op, 5, bdims, (shape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_inshape={}_outshape={}_bcdims={}_bdims={}".format(
          jtu.format_shape_dtype_string(inshape, dtype),
          outshape, broadcast_dimensions, bdims),
       "inshape": inshape, "dtype": dtype, "outshape": outshape,
       "dimensions": broadcast_dimensions, "bdims": bdims}
      for inshape, outshape, broadcast_dimensions in [
          ([2], [2, 2], [0]),
          ([2], [2, 2], [1]),
          ([2], [2, 3], [0]),
          ([], [2, 3], []),
      ]
      for dtype in default_dtypes
      for bdims in all_bdims(inshape)))
  def testBroadcastInDim(self, inshape, dtype, outshape, dimensions, bdims):
    rng = jtu.rand_default(self.rng())
    raise SkipTest("this test has failures in some cases")  # TODO(mattjj)
    op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)
    self._CheckBatching(op, 5, bdims, (inshape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_inshape={}_dimensions={}_bdims={}".format(
          jtu.format_shape_dtype_string(arg_shape, np.float32),
          dimensions, bdims),
       "arg_shape": arg_shape, "dimensions": dimensions, "bdims": bdims}
      for arg_shape, dimensions in [
          [(1,), (0,)],
          [(1,), (-1,)],
          [(2, 1, 4), (1,)],
          [(2, 1, 4), (-2,)],
          [(2, 1, 3, 1), (1,)],
          [(2, 1, 3, 1), (1, 3)],
          [(2, 1, 3, 1), (3,)],
          [(2, 1, 3, 1), (1, -1)],
      ]
      for bdims in all_bdims(arg_shape)))
  def testSqueeze(self, arg_shape, dimensions, bdims):
    dtype = np.float32
    rng = jtu.rand_default(self.rng())
    op = lambda x: lax.squeeze(x, dimensions)
    self._CheckBatching(op, 10, bdims, (arg_shape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_inshape={}_outshape={}_dims={}_bdims={}".format(
          jtu.format_shape_dtype_string(arg_shape, dtype),
          jtu.format_shape_dtype_string(out_shape, dtype),
          dimensions, bdims),
       "arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
       "dimensions": dimensions, "bdims": bdims}
      for dtype in default_dtypes
      for arg_shape, dimensions, out_shape in [
          [(3, 4), None, (12,)],
          [(2, 1, 4), None, (8,)],
          [(2, 2, 4), None, (2, 8)],
          [(2, 2, 4), (0, 1, 2), (2, 8)],
          [(2, 2, 4), (1, 0, 2), (8, 2)],
          [(2, 2, 4), (2, 1, 0), (4, 2, 2)]
      ]
      for bdims in all_bdims(arg_shape)))
  def testReshape(self, arg_shape, out_shape, dtype, dimensions, bdims):
    rng = jtu.rand_default(self.rng())
    op = lambda x: lax.reshape(x, out_shape, dimensions=dimensions)
    self._CheckBatching(op, 10, bdims, (arg_shape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_inshape={}_pads={}_bdims={}"
       .format(jtu.format_shape_dtype_string(shape, dtype), pads, bdims),
       "shape": shape, "dtype": dtype, "pads": pads, "bdims": bdims}
      for shape in [(2, 3)]
      for bdims in all_bdims(shape, ())
      for dtype in default_dtypes
      for pads in [[(1, 2, 1), (0, 1, 0)]]))
  def testPad(self, shape, dtype, pads, bdims):
    rng = jtu.rand_small(self.rng())
    fun = lambda operand, padding: lax.pad(operand, padding, pads)
    self._CheckBatching(fun, 5, bdims, (shape, ()), (dtype, dtype), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_predshape={}_argshapes={}_bdims={}".format(
          jtu.format_shape_dtype_string(pred_shape, np.bool_),
          jtu.format_shape_dtype_string(arg_shape, arg_dtype),
          bdims),
       "pred_shape": pred_shape, "arg_shape": arg_shape, "arg_dtype": arg_dtype,
       "bdims": bdims}
      for arg_shape in [(), (3,), (2, 3)]
      for pred_shape in ([(), arg_shape] if arg_shape else [()])
      for bdims in all_bdims(pred_shape, arg_shape, arg_shape)
      for arg_dtype in default_dtypes))
  def testSelect(self, pred_shape, arg_shape, arg_dtype, bdims):
    rng = jtu.rand_default(self.rng())
    op = lambda c, x, y: lax.select(c < 0, x, y)
    self._CheckBatching(op, 5, bdims, (pred_shape, arg_shape, arg_shape,),
                        (np.bool_, arg_dtype, arg_dtype), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name":
       "_shape={}_start_indices={}_limit_indices={}_strides={}_bdims={}".format(
          jtu.format_shape_dtype_string(shape, dtype),
          start_indices, limit_indices, strides, bdims),
       "shape": shape, "dtype": dtype, "starts": start_indices,
       "limits": limit_indices, "strides": strides, "bdims": bdims}
      for shape, start_indices, limit_indices, strides in [
        [(3,), (1,), (2,), None],
        [(7,), (4,), (7,), None],
        [(5,), (1,), (5,), (2,)],
        [(8,), (1,), (6,), (2,)],
        [(5, 3), (1, 1), (3, 2), None],
        [(5, 3), (1, 1), (3, 1), None],
        [(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
        [(5, 3), (1, 1), (2, 1), (1, 1)],
        [(5, 3), (1, 1), (5, 3), (2, 1)],
      ]
      for bdims in all_bdims(shape)
      for dtype in default_dtypes))
  def testSlice(self, shape, dtype, starts, limits, strides, bdims):
    rng = jtu.rand_default(self.rng())
    op = lambda x: lax.slice(x, starts, limits, strides)
    self._CheckBatching(op, 5, bdims, (shape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_shape={}_perm={}_bdims={}".format(
          jtu.format_shape_dtype_string(shape, dtype), perm, bdims),
       "shape": shape, "dtype": dtype, "perm": perm, "bdims": bdims}
      for shape, perm in [
        [(3, 4), (1, 0)],
        [(3, 4), (0, 1)],
        [(3, 4, 5), (2, 1, 0)],
        [(3, 4, 5), (1, 0, 2)],
      ]
      for bdims in all_bdims(shape)
      for dtype in default_dtypes))
  def testTranspose(self, shape, dtype, perm, bdims):
    rng = jtu.rand_default(self.rng())
    op = lambda x: lax.transpose(x, perm)
    self._CheckBatching(op, 5, bdims, (shape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_op={}_inshape={}_reducedims={}_initval={}_bdims={}"
       .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims,
               init_val, bdims),
       "op": op, "init_val": init_val, "shape": shape, "dtype": dtype,
       "dims": dims, "bdims": bdims}
      for init_val, op, dtypes in [
          (0, lax.add, default_dtypes),
          (1, lax.mul, default_dtypes),
          (0, lax.max, all_dtypes), # non-monoidal
          (-np.inf, lax.max, float_dtypes),
          (dtypes.iinfo(np.int32).min, lax.max, [np.int32]),
          (dtypes.iinfo(np.int64).min, lax.max, [np.int64]),
          (dtypes.iinfo(np.uint32).min, lax.max, [np.uint32]),
          (dtypes.iinfo(np.uint64).min, lax.max, [np.uint64]),
          (np.inf, lax.min, float_dtypes),
          (dtypes.iinfo(np.int32).max, lax.min, [np.int32]),
          (dtypes.iinfo(np.int64).max, lax.min, [np.int64]),
          (dtypes.iinfo(np.uint32).max, lax.min, [np.uint32]),
          (dtypes.iinfo(np.uint64).max, lax.min, [np.uint64]),
      ]
      for dtype in dtypes
      for shape, dims in [
          [(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)],
          [(3, 4, 5), (0, 2)], [(3, 4, 5), (0, 1, 2)]
      ]
      for bdims in all_bdims(shape)))
  def testReduce(self, op, init_val, shape, dtype, dims, bdims):
    rng = jtu.rand_small(self.rng())
    init_val = np.asarray(init_val, dtype=dtype)
    fun = lambda operand: lax.reduce(operand, init_val, op, dims)
    self._CheckBatching(fun, 5, bdims, (shape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_op={}_inshape={}_reducedims={}_bdims={}"
       .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dim,
               bdims),
       "op": op, "shape": shape, "dtype": dtype,
       "dim": dim, "bdims": bdims}
      for op in [lax.argmin, lax.argmax]
      for dtype in default_dtypes
      for shape in [(3, 4, 5)]
      for dim in range(len(shape))
      for bdims in all_bdims(shape)))
  def testArgminmax(self, op, shape, dtype, dim, bdims):
    rng = jtu.rand_default(self.rng())
    fun = lambda operand: op(operand, dim, np.int32)
    self._CheckBatching(fun, 5, bdims, (shape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": ("_op={}_shape={}_dims={}_strides={}_padding={}"
                         "_basedilation={}_windowdilation={}")
       .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype),
               dims, strides, padding, base_dilation, window_dilation),
       "op": op, "init_val": init_val, "dtype": dtype, "shape": shape,
       "dims": dims, "strides": strides, "padding": padding,
       "base_dilation": base_dilation, "window_dilation": window_dilation}
      for init_val, op, dtypes in [
          (0, lax.add, [np.float32]),
          (-np.inf, lax.max, [np.float32]),
          (np.inf, lax.min, [np.float32]),
      ]
      for shape, dims, strides, padding, base_dilation, window_dilation in (
        itertools.chain(
          itertools.product(
            [(4, 6)],
            [(2, 1), (1, 2)],
            [(1, 1), (2, 1), (1, 2)],
            ["VALID", "SAME", [(0, 3), (1, 2)]],
            [(1, 1), (2, 3)],
            [(1, 1), (1, 2)]),
          itertools.product(
            [(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)],
            [(1, 2, 2, 1), (1, 1, 1, 1)],
            ["VALID", "SAME", [(0, 1), (1, 0), (2, 3), (0, 2)]],
            [(1, 1, 1, 1), (2, 1, 3, 2)],
            [(1, 1, 1, 1), (1, 2, 2, 1)])))
      for dtype in dtypes))
  def testReduceWindow(self, op, init_val, dtype, shape, dims, strides, padding,
                       base_dilation, window_dilation):
    rng = jtu.rand_small(self.rng())
    init_val = np.asarray(init_val, dtype=dtype)

    def fun(operand):
      return lax.reduce_window(operand, init_val, op, dims, strides, padding,
                               base_dilation, window_dilation)

    for bdims in all_bdims(shape):
      self._CheckBatching(fun, 3, bdims, (shape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_op={}_shape={}_axis={}_bdims={}_reverse={}"
       .format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), axis,
               bdims, reverse),
       "op": op, "shape": shape, "dtype": dtype, "bdims": bdims,
       "axis": axis, "reverse": reverse}
      for op, types in [
          (lax.cumsum, [np.float32, np.float64]),
          (lax.cumprod, [np.float32, np.float64]),
      ]
      for dtype in types
      for shape in [[10], [3, 4, 5]]
      for axis in range(len(shape))
      for bdims in all_bdims(shape)
      for reverse in [False, True]))
  def testCumulativeReduce(self, op, shape, dtype, axis, bdims, reverse):
    rng_factory = (jtu.rand_default if dtypes.issubdtype(dtype, np.integer)
                   else jtu.rand_small)
    rng = rng_factory(self.rng())
    self._CheckBatching(partial(op, axis=axis, reverse=reverse), 7, bdims,
                        (shape,), (dtype,), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_dtype={}_padding={}".format(np.dtype(dtype).name,
                                                      padding),
       "dtype": dtype, "padding": padding}
      for dtype in float_dtypes
      for padding in ["VALID", "SAME"]))
  @jtu.skip_on_flag("jax_skip_slow_tests", True)
  @jtu.ignore_warning(message="Using reduced precision for gradient.*")
  def testSelectAndGatherAdd(self, dtype, padding):
    rng = jtu.rand_small(self.rng())
    all_configs = itertools.chain(
        itertools.product(
            [(4, 6)],
            [(2, 1), (1, 2)],
            [(1, 1), (2, 1), (1, 2)]),
        itertools.product(
            [(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)],
            [(1, 2, 2, 1), (1, 1, 1, 1)]))

    def fun(operand, tangents):
      pads = lax.padtype_to_pads(operand.shape, dims, strides, padding)
      ones = (1,) * len(operand.shape)
      return lax._select_and_gather_add(operand, tangents, lax.ge_p, dims,
                                        strides, pads, ones, ones)

    for shape, dims, strides in all_configs:
      for bdims in all_bdims(shape, shape):
        self._CheckBatching(fun, 3, bdims, (shape, shape), (dtype, dtype), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": f"_dtype={jtu.format_shape_dtype_string(shape, dtype)}"
      f"_padding={padding}_dims={dims}_strides={strides}",
       "dtype": dtype, "padding": padding, "shape": shape,
       "dims": dims, "strides": strides}
      for dtype in float_dtypes
      for padding in ["VALID", "SAME"]
      for shape in [(3, 2, 4, 6)]
      for dims in [(1, 1, 2, 1)]
      for strides in [(1, 2, 2, 1), (1, 1, 1, 1)]))
  def testSelectAndScatterAdd(self, dtype, padding, shape, dims, strides):
    rng = jtu.rand_small(self.rng())

    pads = lax.padtype_to_pads(shape, dims, strides, padding)

    def fun(operand, cotangents):
      return lax._select_and_scatter_add(operand, cotangents, lax.ge_p, dims,
                                         strides, pads)
    ones = (1,) * len(shape)
    cotangent_shape = api.eval_shape(
      lambda x: lax._select_and_gather_add(x, x, lax.ge_p, dims, strides,
                                           pads, ones, ones),
      np.ones(shape, dtype)).shape

    for bdims in all_bdims(cotangent_shape, shape):
      self._CheckBatching(fun, 3, bdims, (cotangent_shape, shape),
                          (dtype, dtype), rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_shape={}_bdims={}_fft_ndims={}"
       .format(shape, bdims, fft_ndims),
       "shape": shape, "bdims": bdims, "fft_ndims": fft_ndims}
      for shape in [(5,), (3, 4, 5), (2, 3, 4, 5)]
      for bdims in all_bdims(shape)
      for fft_ndims in range(0, min(3, len(shape)) + 1)))
  def testFft(self, fft_ndims, shape, bdims):
    rng = jtu.rand_default(self.rng())
    ndims = len(shape)
    axes = range(ndims - fft_ndims, ndims)
    fft_lengths = [shape[axis] for axis in axes]
    op = lambda x: lax.fft(x, xla_client.FftType.FFT, fft_lengths)
    self._CheckBatching(op, 5, bdims, [shape], [np.complex64], rng)

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_shape={}_idxs={}_dnums={}_slice_sizes={}_bdims={}"
       .format(jtu.format_shape_dtype_string(shape, dtype), idxs, dnums,
               slice_sizes, bdims),
       "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
       "slice_sizes": slice_sizes, "bdims": bdims}
      for dtype in all_dtypes
      for shape, idxs, dnums, slice_sizes in [
          ((5,), np.array([[0], [2]]), lax.GatherDimensionNumbers(
            offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
            (1,)),
          ((10,), np.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
            offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
            (2,)),
          ((10, 5,), np.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
            offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
            (1, 3)),
          ((10, 5), np.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(
            offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
            (1, 3)),
      ]
      for bdims in all_bdims(shape, idxs.shape)))
  def testGather(self, shape, dtype, idxs, dnums, slice_sizes, bdims):
    fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
    self._CheckBatching(fun, 0, bdims, [shape, idxs.shape], [dtype, idxs.dtype],
                        jtu.rand_default(self.rng()))
    self._CheckBatching(fun, 5, bdims, [shape, idxs.shape], [dtype, idxs.dtype],
                        jtu.rand_default(self.rng()))

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_shape={}_idxs={}_update={}_dnums={}_bdims={}".format(
          jtu.format_shape_dtype_string(arg_shape, dtype),
          idxs, update_shape, dnums, bdims),
       "arg_shape": arg_shape, "dtype": dtype, "idxs": idxs,
       "update_shape": update_shape, "dnums": dnums, "bdims": bdims}
      for dtype in float_dtypes
      for arg_shape, idxs, update_shape, dnums in [
          ((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
            update_window_dims=(), inserted_window_dims=(0,),
            scatter_dims_to_operand_dims=(0,))),
          ((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
            update_window_dims=(1,), inserted_window_dims=(),
            scatter_dims_to_operand_dims=(0,))),
          ((10, 5,), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
            update_window_dims=(1,), inserted_window_dims=(0,),
            scatter_dims_to_operand_dims=(0,))),
      ]
      for bdims in all_bdims(arg_shape, idxs.shape, update_shape)))
  def testScatterAdd(self, arg_shape, dtype, idxs, update_shape, dnums, bdims):
    fun = partial(lax.scatter_add, dimension_numbers=dnums)
    self._CheckBatching(fun, 5, bdims, [arg_shape, idxs.shape, update_shape],
                        [dtype, idxs.dtype, dtype], jtu.rand_default(self.rng()),
                        rtol={np.float16: 5e-3, dtypes.bfloat16: 3e-2})

  def testShapeUsesBuiltinInt(self):
    x = lax.iota(np.int32, 3) + 1
    self.assertIsInstance(x.shape[0], int)  # not np.int64

  def testBroadcastShapesReturnsPythonInts(self):
    shape1, shape2 = (1, 2, 3), (2, 3)
    out_shape = lax.broadcast_shapes(shape1, shape2)
    self.assertTrue(all(type(s) is int for s in out_shape))

  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_shape={}_k={}_bdims={}".format(
          jtu.format_shape_dtype_string(shape, dtype), k, bdims),
       "shape": shape, "dtype": dtype, "k": k, "bdims": bdims, "rng_factory": rng_factory}
      for shape in [(4,), (3, 5, 3)]
      for k in [1, 3]
      for bdims in all_bdims(shape)
      # TODO(b/155170120): test with repeats once the XLA:CPU stable top_k bug is fixed:
      # The top_k indices for integer arrays with identical entries won't match between
      # vmap'd version and manual reference, so only test unique integer arrays for int_dtypes.
      # Note also that we chose 3 * 5 * 3 * 5 such that it fits in the range of
      # values a bfloat16 can represent exactly to avoid ties.
      for dtype, rng_factory in itertools.chain(
        unsafe_zip(default_dtypes, itertools.repeat(jtu.rand_unique_int)))))
  def testTopK(self, shape, dtype, k, bdims, rng_factory):
    rng = rng_factory(self.rng())
    # _CheckBatching doesn't work with tuple outputs, so test outputs separately.
    op1 = lambda x: lax.top_k(x, k=k)[0]
    self._CheckBatching(op1, 5, bdims, (shape,), (dtype,), rng)
    op2 = lambda x: lax.top_k(x, k=k)[1]
    self._CheckBatching(op2, 5, bdims, (shape,), (dtype,), rng)


  @parameterized.named_parameters(jtu.cases_from_list(
      {"testcase_name": "_shape={}_dimension={}_arity={}_bdims={}_isstable={}"
       .format(jtu.format_shape_dtype_string(shape, np.float32), dimension,
               arity, bdims, is_stable),
       "shape": shape, "dimension": dimension, "arity": arity, "bdims": bdims,
       "is_stable": is_stable}
      for shape in [(2, 3)]
      for dimension in [0, 1]
      for arity in range(3)
      for bdims in all_bdims(*((shape,) * arity))
      for is_stable in [False, True]))
  def testSort(self, shape, dimension, arity, bdims, is_stable):
    rng = jtu.rand_default(self.rng())
    if arity == 1:
      fun = partial(lax.sort, dimension=dimension)
      self._CheckBatching(fun, 5, bdims, (shape,) * arity, (np.float32,) * arity,
                          rng)
    else:
      for i in range(arity):
        fun = lambda *args, i=i: lax.sort(args,
                                          dimension=dimension,
                                          is_stable=is_stable)[i]
        self._CheckBatching(fun, 5, bdims, (shape,) * arity,
                            (np.float32,) * arity, rng)
def visualise_normed_potentials(path_to_results, path_to_plot):
    """Visualises the range of potentials relative to demand in each municipality."""
    sns.set_context('paper')
    units = pd.DataFrame(gpd.read_file(path_to_results))
    units = units[["country_code", "population_sum", "normed_potential"]]
    units["country"] = units["country_code"].map(
        lambda country_code: pycountry.countries.lookup(country_code).name)
    units["country"].replace("Macedonia, Republic of",
                             value="Macedonia",
                             inplace=True)  # too long
    units["country"].replace("Bosnia and Herzegovina",
                             value="Bosnia",
                             inplace=True)  # too long
    people = pd.DataFrame(
        data={
            "country":
            list(
                chain(*[(
                    repeat(unit[1].country, round(unit[1].population_sum /
                                                  100)))
                        for unit in units.iterrows()])),
            "normed_potential":
            list(
                chain(*[(repeat(unit[1].normed_potential,
                                round(unit[1].population_sum / 100)))
                        for unit in units.iterrows()]))
        })

    people_eu = people.copy()
    people_eu["country"] = "Europe"
    people = pd.concat([people, people_eu])

    fig = plt.figure(figsize=(8, 10), constrained_layout=True)
    ax = fig.add_subplot(111)
    sns.boxplot(data=people,
                x="normed_potential",
                y="country",
                order=people.groupby("country").normed_potential.quantile(
                    SORT_QUANTILE).sort_values().index,
                ax=ax,
                color=GREEN,
                whis=[2.5, 97.5],
                saturation=0.85,
                linewidth=1.3,
                width=0.7,
                boxprops=dict(linewidth=1.3, edgecolor=GREEN),
                whiskerprops=dict(linewidth=1, color=GREEN),
                flierprops=dict(markerfacecolor="k",
                                markeredgecolor="k",
                                markersize=0,
                                marker="o"),
                capprops=dict(color=GREEN))
    ax.axvline(1, color=RED, linewidth=1.5)
    ax.set_xlabel("potential relative to demand")
    ax.set_ylabel("country")
    ax.set_xscale('log')
    ax.set_xlim(0.08, 100)
    ax.set_xticklabels(
        ["{:.0f}%".format(tick * 100) for tick in ax.get_xticks()])
    eu_position = list(
        people.groupby("country").normed_potential.quantile(
            SORT_QUANTILE).sort_values().index).index("Europe")
    eu_patch = [
        child for child in ax.get_children()
        if isinstance(child, matplotlib.patches.PathPatch)
    ][eu_position]
    eu_patch.set_facecolor(BLUE)
    eu_patch.set_edgecolor(BLUE)
    eu_patch.set_alpha(0.8)
    eu_patch.set_zorder(100000)
    fig.savefig(path_to_plot, dpi=300, transparent=True)
Example #59
0
def evaluate(input_folders_pxl, gt_folders_xml, gt_folders_pxl, output_path, j,
             eval_tool, penalty_reduction, seam_every_x_pxl,
             small_component_ratio, **kwargs):

    # Select the number of threads
    if j == 0:
        pool = Pool(processes=cpu_count())
    else:
        pool = Pool(processes=j)

    # Get the list of all input images
    input_images = []
    for path in input_folders_pxl:
        input_images.extend(get_file_list(path, ['.png']))

    # Get the list of all GT XML
    gt_xml = []
    for path in gt_folders_xml:
        gt_xml.extend(get_file_list(path, ['.xml', '.XML']))

    # Get the list of all GT pxl
    gt_pxl = []
    for path in gt_folders_pxl:
        gt_pxl.extend(get_file_list(path, ['.png']))

    # Create output path for run
    tic = time.time()
    output_path = os.path.join(
        output_path, 'penalty_reduction_{}_seams_{}_component_ratio_{}'.format(
            penalty_reduction, seam_every_x_pxl, small_component_ratio))

    if not os.path.exists(output_path):
        os.makedirs(os.path.join(output_path))
    else:
        for the_file in os.listdir(output_path):
            file_path = os.path.join(output_path, the_file)
            try:
                if os.path.isfile(file_path):
                    os.unlink(file_path)
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)
            except Exception as e:
                print(e)

    # Debugging purposes only!
    # input_images = [input_images[1]]
    # gt_xml = [gt_xml[1]]
    # gt_pxl = [gt_pxl[1]]

    # For each file run
    param_list = dict(penalty_reduction=penalty_reduction,
                      seam_every_x_pxl=seam_every_x_pxl,
                      small_component_ratio=small_component_ratio)
    results = list(
        pool.starmap(
            compute_for_all,
            zip(input_images, gt_xml, gt_pxl, itertools.repeat(output_path),
                itertools.repeat(param_list), itertools.repeat(eval_tool))))
    pool.close()
    print("Pool closed)")

    scores = []
    errors = []

    for item in results:
        if item[0] is not None:
            scores.append(item[0])
        else:
            errors.append(item)

    if list(scores):
        score = np.mean(scores)
    else:
        score = -1

    write_stats(output_path, errors)
    print('Total time taken: {:.2f}, avg_line_iu={}, nb_errors={}'.format(
        time.time() - tic, score, len(errors)))
    return score
Example #60
0
def get_trigger_info(h5in):
    group = h5in.root.Trigger if "Trigger" in h5in.root else ()
    trigger_type = group.trigger if "trigger" in group else repeat(None)
    trigger_channels = group.events if "events" in group else repeat(None)
    return trigger_type, trigger_channels