Example #1
0
def test_token_not_too_simple():

    def _valid(tk):
        return json_get("/foods", tk).status_code == 200

    uids, usernames, passwords, tokens = [], [], [], []
    for i in range(5):
        uid, token = next(token_gen)
        username, password = user_store[uid]

        # simply don't use them directly, it's way too simple
        assert uid != token
        assert username != token
        assert password != token

        uids.append(uid)
        usernames.append(username)
        passwords.append(password)
        tokens.append(token)


    # username & password should never be contained in token
    # it may occasionally occur in token since it's random string, so
    # test 5 random user to avoid mistake
    assert not all(u in t for u, t in zip(usernames, tokens))
    assert not all(p in t for p, t in zip(passwords, tokens))

    # uid may occur in token since it's only numbers, but don't make it too
    # easily guessed
    for uid, tk in zip(uids, tokens):
        if str(uid) in token:
            for i in range(1, 10):
                assert not _valid(token.replace(str(uid), str(int(uid) + i)))
                assert not _valid(token.replace(str(uid), str(int(uid) - i)))
def test_plot_partial_dependence_multiclass(pyplot):
    # Test partial dependence plot function on multi-class input.
    iris = load_iris()
    clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
    clf.fit(iris.data, iris.target)

    grid_resolution = 25
    plot_partial_dependence(clf, iris.data, [0, 1],
                            target=0,
                            grid_resolution=grid_resolution)
    fig = pyplot.gcf()
    axs = fig.get_axes()
    assert len(axs) == 2
    assert all(ax.has_data for ax in axs)

    # now with symbol labels
    target = iris.target_names[iris.target]
    clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
    clf.fit(iris.data, target)

    grid_resolution = 25
    plot_partial_dependence(clf, iris.data, [0, 1],
                            target='setosa',
                            grid_resolution=grid_resolution)
    fig = pyplot.gcf()
    axs = fig.get_axes()
    assert len(axs) == 2
    assert all(ax.has_data for ax in axs)
Example #3
0
    def rule_is_enumerable(self, rule):
        """ Checks that it is possible to generate sensible GET queries for
        a given rule (if the endpoint matches its own requirements)

        :type rule: werkzeug.routing.Rule
        :rtype: bool
        """
        endpoint = rule.endpoint
        methods = rule.methods or ['GET']
        converters = rule._converters.values()
        if not ('GET' in methods
            and endpoint.routing['type'] == 'http'
            and endpoint.routing['auth'] in ('none', 'public')
            and endpoint.routing.get('website', False)
            and all(hasattr(converter, 'generate') for converter in converters)
            and endpoint.routing.get('website')):
            return False

        # dont't list routes without argument having no default value or converter
        spec = inspect.getargspec(endpoint.method.original_func)

        # remove self and arguments having a default value
        defaults_count = len(spec.defaults or [])
        args = spec.args[1:(-defaults_count or None)]

        # check that all args have a converter
        return all( (arg in rule._converters) for arg in args)
Example #4
0
    def command(self, format, terms=None, **kwargs):
        session, config = self.session, self.session.config

        if not format in PluginManager.CONTACT_IMPORTERS.keys():
            session.ui.error("No such import format")
            return False

        importer = PluginManager.CONTACT_IMPORTERS[format]

        if not all([x in kwargs.keys() for x in importer.required_parameters]):
            session.ui.error(
                _("Required paramter missing. Required parameters "
                  "are: %s") % ", ".join(importer.required_parameters))
            return False

        allparams = importer.required_parameters + importer.optional_parameters

        if not all([x in allparams for x in kwargs.keys()]):
            session.ui.error(
                _("Unknown parameter passed to importer. "
                  "Provided %s; but known parameters are: %s"
                  ) % (", ".join(kwargs), ", ".join(allparams)))
            return False

        imp = importer(kwargs)
        if terms:
            contacts = imp.filter_contacts(terms)
        else:
            contacts = imp.get_contacts()

        for importedcontact in contacts:
            # Check if contact exists. If yes, then update. Else create.
            pass
Example #5
0
def _test_column_grouping(m=10, n=5000, num_repeat=5, verbose=False):
    print('\nTesting column_grouping ...')
    A = np.array([[True, False, False, False, False],
                  [True, True, False, True, True]])
    grps1 = _column_group_loop(A)
    grps2 = _column_group_recursive(A)
    grps3 = [np.array([0]),
             np.array([1, 3, 4]),
             np.array([2])]
    print('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps2)]) else 'Fail')
    print('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps3)]) else 'Fail')

    for i in range(0, num_repeat):
        A = np.random.rand(m, n)
        B = A > 0.5
        start = time.time()
        grps1 = _column_group_loop(B)
        elapsed_loop = time.time() - start
        start = time.time()
        grps2 = _column_group_recursive(B)
        elapsed_recursive = time.time() - start
        if verbose:
            print('Loop     :', elapsed_loop)
            print('Recursive:', elapsed_recursive)
        print('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps2)]) else 'Fail')
    # sorted_idx = np.concatenate(grps)
    # print B
    # print sorted_idx
    # print B[:,sorted_idx]
    return
Example #6
0
  def test_directory_children(self):
    # Creates 2 directories and 2 queries and saves to home directory
    dir1 = Directory.objects.create(name='test_dir1', owner=self.user)
    dir2 = Directory.objects.create(name='test_dir2', owner=self.user)
    query1 = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={})
    query2 = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={})
    children = [dir1, dir2, query1, query2]

    self.home_dir.children.add(*children)

    # Test that all children directories and documents are returned
    response = self.client.get('/desktop/api2/docs', {'path': '/'})
    data = json.loads(response.content)
    assert_true('children' in data)
    assert_equal(5, data['count'])  # This includes the 4 docs and .Trash

    # Test filter type
    response = self.client.get('/desktop/api2/docs', {'path': '/', 'type': ['directory']})
    data = json.loads(response.content)
    assert_equal(['directory'], data['types'])
    assert_equal(3, data['count'])
    assert_true(all(doc['type'] == 'directory' for doc in data['children']))

    # Test search text
    response = self.client.get('/desktop/api2/docs', {'path': '/', 'text': 'query'})
    data = json.loads(response.content)
    assert_equal('query', data['text'])
    assert_equal(2, data['count'])
    assert_true(all('query' in doc['name'] for doc in data['children']))

    # Test pagination with limit
    response = self.client.get('/desktop/api2/docs', {'path': '/', 'page': 2, 'limit': 2})
    data = json.loads(response.content)
    assert_equal(5, data['count'])
    assert_equal(2, len(data['children']))
Example #7
0
 def checkSummary(self, got, step, build=None):
     self.failUnless(all(isinstance(k, unicode) for k in got.keys()))
     self.failUnless(all(isinstance(k, unicode) for k in got.values()))
     exp = {u'step': step}
     if build:
         exp[u'build'] = build
     self.assertEqual(got, exp)
Example #8
0
  def from_list(index, queues):
    """Create a queue using the queue reference from `queues[index]`.

    Args:
      index: An integer scalar tensor that determines the input that gets
        selected.
      queues: A list of `QueueBase` objects.

    Returns:
      A `QueueBase` object.

    Raises:
      TypeError: When `queues` is not a list of `QueueBase` objects,
        or when the data types of `queues` are not all the same.
    """
    if ((not queues) or
        (not isinstance(queues, list)) or
        (not all(isinstance(x, QueueBase) for x in queues))):
      raise TypeError("A list of queues expected")

    dtypes = queues[0].dtypes
    if not all([dtypes == q.dtypes for q in queues[1:]]):
      raise TypeError("Queues do not have matching component dtypes.")

    queue_refs = [x.queue_ref for x in queues]
    selected_queue = control_flow_ops.ref_select(index, queue_refs)
    # TODO(josh11b): Unify the shapes of the queues too?
    return QueueBase(dtypes=dtypes, shapes=None, queue_ref=selected_queue)
Example #9
0
 def check_grade(self, test_output, truth_output, test_input):
     def get_ans(output):
         pat = re.compile('Student has an ([A-D]) grade',re.IGNORECASE)
         m = pat.search(output)
         if m is None:
             p2 = re.compile('Student has (failed) the course',re.IGNORECASE)
             m2 = p2.search(output)
             if m2 is None:
                 raise Exception("GRADE: no answer found")
             return m2.group(1)
         return m.group(1)
     truth_answer = get_ans(truth_output)
     grades = set(['A','B','C','D'])
     grade_patterns = {g: re.compile("Student has an {} grade".format(g),
                                     re.IGNORECASE)
                       for g in grades}
     if truth_answer in grades:
         no_neg_match = all([rgx.search(test_output) is None
                             for g, rgx in grade_patterns.items()
                             if g != truth_answer])
         pos_match = grade_patterns[truth_answer].search(test_output)
     elif truth_answer == 'failed':
         failure_rgx = re.compile("Student has failed the course",
                                  re.IGNORECASE)
         no_neg_match = all([rgx.search(test_output) is None
                            for g, rgx in grade_patterns.items()])
         pos_match = failure_rgx.search(test_output)
     else:
         raise ValueError("Unknown grades truth {}".format(truth_answer))
     return pos_match is not None and no_neg_match
Example #10
0
    def compilable(cls, clf):
        """
        Verifies that the given fitted model is eligible to be compiled.

        Returns True if the model is eligible, and False otherwise.

        Parameters
        ----------

        clf:
          A fitted regression tree/ensemble.


        """
        # TODO - is there an established way to check `is_fitted``?
        if isinstance(clf, DecisionTreeRegressor):
            return clf.n_outputs_ == 1 and clf.n_classes_ == 1 \
                and clf.tree_ is not None

        if isinstance(clf, GradientBoostingRegressor):
            return clf.estimators_.size and all(cls.compilable(e)
                                                for e in clf.estimators_.flat)

        if isinstance(clf, ForestRegressor):
            estimators = np.asarray(clf.estimators_)
            return estimators.size and all(cls.compilable(e)
                                           for e in estimators.flat)
        return False
Example #11
0
def sanitize_indices(indices):
    """Check and possibly sanitize indices.

    Parameters
    ----------
    indices : int, slice, or sequence of ints and slices
        If an int or slice is passed in, it is converted to a
        1-tuple.

    Returns
    -------
    2-tuple
        ('point', indices) if all `indices` are ints, or
        ('view', indices) if some `indices` are slices.

    Raises
    ------
    TypeError
        If `indices` is not all ints or slices.
    """

    if isinstance(indices, int) or isinstance(indices, slice):
        return sanitize_indices((indices,))
    elif all(isinstance(i, int) for i in indices):
        return 'point', indices
    elif all(isinstance(i, int) or isinstance(i, slice) for i in indices):
        return 'view', indices
    else:
        raise TypeError("Index must be a sequence of ints and slices")
Example #12
0
def test_default_instance_initialize():
    """
    Testing the default _instance_initialize provided by module.
    """

    class M1(Module):
        def __init__(self):
            super(M1, self).__init__()
            self.a = T.dscalar()
            self.b = T.lscalar()
            self.c = T.lvector()

    class M2(Module):
        def __init__(self):
            super(M2, self).__init__()
            self.a = T.lscalar()
            self.x = M1()
            self.y = self.x
            self.z = M1()

    m = M2().make(a = 13,
                  x = dict(a = 1, b = 2, c = [3, 4]),
                  z = dict(a = 5, b = 6, c = [7, 8]))

    assert m.a == 13
    assert m.x.a == 1
    assert m.x.b == 2
    assert all(m.x.c == [3, 4])
    assert m.y.a == 1
    assert m.y.b == 2
    assert all(m.y.c == [3, 4])
    assert m.z.a == 5
    assert m.z.b == 6
    assert all(m.z.c == [7, 8])
Example #13
0
def _non_dominated_front_old(iterable, key=lambda x: x, allowequality=True):
    """Return a subset of items from iterable which are not dominated by any
    other item in iterable."""
    items = list(iterable)
    keys = dict((i, key(i)) for i in items)
    dim = len(keys.values()[0])
    if any(dim != len(k) for k in keys.values()):
        raise ValueError("Wrong tuple size.")

    # Make a dictionary that holds the items another item dominates.
    dominations = collections.defaultdict(lambda: [])
    for i in items:
        for j in items:
            if allowequality:
                if all(keys[i][k] < keys[j][k] for k in xrange(dim)):
                    dominations[i].append(j)
            else:
                if all(keys[i][k] <= keys[j][k] for k in xrange(dim)):
                    dominations[i].append(j)

    dominates = lambda i, j: j in dominations[i]

    res = set()
    items = set(items)
    for i in items:
        res.add(i)
        for j in list(res):
            if i is j:
                continue
            if dominates(j, i):
                res.remove(i)
                break
            elif dominates(i, j):
                res.remove(j)
    return res
Example #14
0
def test_with_shift_that_spans_upcoming_midnight():
    """
    0 1 2 3 4 5 6 7 8 9 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2
                        0 1 2 3 4 5 6 7 8 9 0 1 2 3 4
              [*-*-*-*-*]
                        [*-*-*-*-*]
                                  [*-*-*-*-*]
                                            [*-*-*-*-
    """
    shifts = []
    # 3 hour shifts, staggered 2 hours apart.
    shifts.append(ShiftDict(start_time=today_at_hour(5), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(10), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(15), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(20), shift_minutes=5 * HOUR))

    data = shifts_to_tabular_data(_wrap_in_lists(shifts), datetime.date.today())

    assert get_num_columns(data) == ONE_DAY

    assert_columns_all_at_correct_location(data)

    assert len(data) == 5 * HOUR + 4
    assert all(c['columns'] == 1 for c in data[:5 * HOUR])
    assert all(c['columns'] == 5 * HOUR for c in data[5 * HOUR:5 * HOUR + 3])
    assert data[-1]['columns'] == 4 * HOUR
Example #15
0
def test_with_shift_that_spans_previous_midnight():
    """
    0 1 2 3 4 5 6 7 8 9 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2
                        0 1 2 3 4 5 6 7 8 9 0 1 2 3 4
    *-*-*-**]
            [*-*-*-*-*]
                      [*-*-*-*-*]
                                [*-*-*-*-*]
    """
    shifts = []
    # 3 hour shifts, staggered 2 hours apart.
    shifts.append(ShiftDict(start_time=yesterday_at_hour(23), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(4), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(9), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(14), shift_minutes=5 * HOUR))

    data = shifts_to_tabular_data(_wrap_in_lists(shifts), datetime.date.today())
    assert get_num_columns(data) == ONE_DAY

    assert_columns_all_at_correct_location(data)

    assert len(data) == 5 * HOUR + 4
    assert data[0]['columns'] == 4 * HOUR
    assert all(list(c['columns'] == 5 * HOUR for c in data[1:4]))
    assert all(list(c['columns'] == 1 for c in data[4:]))
Example #16
0
def test_with_non_overlapping():
    """
    0 1 2 3 4 5 6 7 8 9 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2
                        0 1 2 3 4 5 6 7 8 9 0 1 2 3 4
                      [*-*-*]
                            [*-*-*]
                                        [*-*-*]
    """
    shifts = []
    # shift from 9am to noon
    shifts.append(ShiftDict(start_time=today_at_hour(9), shift_minutes=3 * HOUR))
    # shift from noon to 3pm
    shifts.append(ShiftDict(start_time=today_at_hour(12), shift_minutes=3 * HOUR))
    # shift from 6pm to 9pm
    shifts.append(ShiftDict(start_time=today_at_hour(18), shift_minutes=3 * HOUR))

    data = shifts_to_tabular_data(_wrap_in_lists(shifts), datetime.date.today())
    assert get_num_columns(data) == ONE_DAY

    assert_columns_all_at_correct_location(data)

    collapsed_data = collapse_empty_columns(data)

    empties = collapsed_data[0], collapsed_data[3], collapsed_data[5]

    assert all(list(d['shifts'] == [] for d in empties))

    non_empties = collapsed_data[1], collapsed_data[2], collapsed_data[4]

    assert all(list(d['columns'] == 3 * HOUR for d in non_empties))
Example #17
0
def invest(positions,num_trials):
    '''Returns a dataframe with percentage returns of the form [trial(i),position(j)], 
    where entry[i,j] represents the i'th independent trial of buying 'j' investments 
    of value 1000/'j' in an instrument the returns double 51% and zero 49%  of the time.'''
    
    #Exception Handling (positions argument)
    if type(positions)!=list:
        raise NotListError
    if all([(type(x)==int or type(x)==float) for x in positions])==False:
        raise NotNumError
    if all([x % 1==0.0 for x in positions])==False:
        raise NotIntError
    if all([(0<x<=1000) for x in positions])==False:
        raise InvalidPosError
        
    #Excption Handling (num_trials argument)    
    if (type(num_trials)!=int and type(num_trials)!=float):
        raise TrialNotNumError
    if 0>=(num_trials):
        raise TrialNegError
    
    #Program
    position_value = 1000/np.array(positions)
    cumu_ret = DataFrame(columns=positions,index=np.arange(1,num_trials+1))
    
    for i in position_value:
        col=1000/i
        cumu_ret[col] = col
        cumu_ret[col] = cumu_ret[col].map(calcCumRet)    
    daily_ret = (cumu_ret/1000)-1
    return daily_ret
    def test_history(self, manager, my_vcr):
        with my_vcr.use_cassette("pipeline/history_Consumer_Website"):
            name = "Consumer_Website"
            result = manager.history(name)

            assert all(isinstance(i, pipeline.PipelineInstance) for i in result)
            assert all(i.data.name == name for i in result)
Example #19
0
    def test_simple(self):
        prob = Problem(Group(), impl=impl)

        size = 5
        A1 = prob.root.add('A1', IndepVarComp('a', np.zeros(size, float)))
        B1 = prob.root.add('B1', IndepVarComp('b', np.zeros(size, float)))
        B2 = prob.root.add('B2', IndepVarComp('b', np.zeros(size, float)))
        S1 = prob.root.add('S1', IndepVarComp('s', ''))
        L1 = prob.root.add('L1', IndepVarComp('l', []))

        C1 = prob.root.add('C1', ABCDArrayComp(size))
        C2 = prob.root.add('C2', ABCDArrayComp(size))

        prob.root.connect('A1.a', 'C1.a')
        prob.root.connect('B1.b', 'C1.b')
        # prob.root.connect('S1:s', 'C1.in_string')
        # prob.root.connect('L1:l', 'C1.in_list')

        prob.root.connect('C1.c', 'C2.a')
        prob.root.connect('B2.b', 'C2.b')
        # prob.root.connect('C1.out_string', 'C2.in_string')
        # prob.root.connect('C1.out_list',   'C2.in_list')

        prob.setup(check=False)

        prob['A1.a'] = np.ones(size, float) * 3.0
        prob['B1.b'] = np.ones(size, float) * 7.0
        prob['B2.b'] = np.ones(size, float) * 5.0

        prob.run()

        self.assertTrue(all(prob['C2.a'] == np.ones(size, float)*10.))
        self.assertTrue(all(prob['C2.b'] == np.ones(size, float)*5.))
        self.assertTrue(all(prob['C2.c'] == np.ones(size, float)*15.))
        self.assertTrue(all(prob['C2.d'] == np.ones(size, float)*5.))
Example #20
0
    def test_multiple_problems(self):
        if MPI:
            # split the comm and run an instance of the Problem in each subcomm
            subcomm = self.comm.Split(self.comm.rank)
            prob = Problem(Group(), impl=impl, comm=subcomm)

            size = 5
            value = self.comm.rank + 1
            values = np.ones(size)*value

            A1 = prob.root.add('A1', IndepVarComp('x', values))
            C1 = prob.root.add('C1', ABCDArrayComp(size))

            prob.root.connect('A1.x', 'C1.a')
            prob.root.connect('A1.x', 'C1.b')

            prob.setup(check=False)
            prob.run()

            # check the first output array and store in result
            self.assertTrue(all(prob['C1.c'] == np.ones(size)*(value*2)))
            result = prob['C1.c']

            # gather the results from the separate processes/problems and check
            # for expected values
            results = self.comm.allgather(result)
            self.assertEqual(len(results), self.comm.size)

            for n in range(self.comm.size):
                expected = np.ones(size)*2*(n+1)
                self.assertTrue(all(results[n] == expected))
def add_full_barcode_adapter_sets(matching_sets):
    """
    This function adds some new 'full' adapter sequences based on what was already found. For
    example, if the ligation adapters and the reverse barcode adapters are found, it assumes we are
    looking at a native barcoding run and so it adds the complete native barcoding adapter
    sequences (with the barcode's upstream and downstream context included).
    """
    matching_set_names = [x.name for x in matching_sets]

    for i in range(1, 97):

        # Native barcode full sequences
        if all(x in matching_set_names
               for x in ['SQK-NSK007', 'Barcode ' + str(i) + ' (reverse)']):
            matching_sets.append(make_full_native_barcode_adapter(i))

        # Rapid barcode full sequences
        if all(x in matching_set_names
               for x in ['SQK-NSK007', 'Rapid', 'Barcode ' + str(i) + ' (forward)']):
            matching_sets.append(make_full_rapid_barcode_adapter(i))
        # Added for test
        # PCR barcode full sequences
        if all(x in matching_set_names
               for x in ['PCR', 'Barcode ' + str(i) + ' (forward)']):
            matching_sets.append(make_full_PCR_barcode_adapter(i))

    return matching_sets
def test_add_patch_info():
    """Test adding patch info to source space."""
    # let's setup a small source space
    src = read_source_spaces(fname_small)
    src_new = read_source_spaces(fname_small)
    for s in src_new:
        s['nearest'] = None
        s['nearest_dist'] = None
        s['pinfo'] = None

    # test that no patch info is added for small dist_limit
    try:
        add_source_space_distances(src_new, dist_limit=0.00001)
    except RuntimeError:  # what we throw when scipy version is wrong
        pass
    else:
        assert all(s['nearest'] is None for s in src_new)
        assert all(s['nearest_dist'] is None for s in src_new)
        assert all(s['pinfo'] is None for s in src_new)

    # now let's use one that works
    add_source_space_distances(src_new)

    for s1, s2 in zip(src, src_new):
        assert_array_equal(s1['nearest'], s2['nearest'])
        assert_allclose(s1['nearest_dist'], s2['nearest_dist'], atol=1e-7)
        assert_equal(len(s1['pinfo']), len(s2['pinfo']))
        for p1, p2 in zip(s1['pinfo'], s2['pinfo']):
            assert_array_equal(p1, p2)
Example #23
0
    def _parse_table_name(self, table_id):
        """Parse a table name in the form of appid_YYYY_MM or
        YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.

        Args:
            table_id: The table id as listed by BigQuery.

        Returns:
            Tuple containing year/month and app id. Returns None, None if the
            table id cannot be parsed.
        """

        # Prefix date
        attributes = table_id.split('_')
        year_month = "-".join(attributes[:2])
        app_id = "-".join(attributes[2:])

        # Check if date parsed correctly
        if year_month.count("-") == 1 and all(
                [num.isdigit() for num in year_month.split('-')]):
            return year_month, app_id

        # Postfix date
        attributes = table_id.split('_')
        year_month = "-".join(attributes[-2:])
        app_id = "-".join(attributes[:-2])

        # Check if date parsed correctly
        if year_month.count("-") == 1 and all(
                [num.isdigit() for num in year_month.split('-')]):
            return year_month, app_id

        return None, None
Example #24
0
  def insert_atomic_inputs(self, atomic_inputs, events_list=None):
    '''Insert inputs into events_list in the same relative order as the
    original events list. This method is needed because set union as used in
    delta debugging does not make sense for event sequences (events are ordered)'''
    # Note: events_list should never be None (I think), since it does not make
    # sense to insert inputs into the original sequence that are already present
    if events_list is None:
      raise ValueError("Shouldn't be adding inputs to the original trace")

    inputs = self._expand_atomics(atomic_inputs)

    if not all(e in self._event2idx for e in inputs):
      raise ValueError("Not all inputs present in original events list %s" %
                       [e for e in input if e not in self._event2idx])
    if not all(e in self._event2idx for e in events_list):
      raise ValueError("Not all events in original events list %s" %
                       [e for e in events_list if e not in self._event2idx])

    result = []
    for _, successor in enumerate(events_list):
      orig_successor_idx = self._event2idx[successor]
      while len(inputs) > 0 and orig_successor_idx > self._event2idx[inputs[0]]:
        # If the current successor did in fact come after the next input in the
        # original trace, insert next input here
        input = inputs.pop(0)
        result.append(input)
      result.append(successor)

    # Any remaining inputs should be appended at the end -- they had no
    # successors
    result += inputs
    # Deal with newly added host migrations
    result = self._straighten_inserted_migrations(result)
    return EventDagView(self, result)
Example #25
0
  def test_add_strategy_with_setitem(self):
    sdict = StrategyDict("sdict")
    sdict["add"] = operator.add
    sdict["mul"] = operator.mul
    sdict["+"] = operator.add

    assert len(sdict) == 2
    assert set(sdict.keys()) == {("add", "+"), ("mul",)}
    assert all(name in dir(sdict) for name in {"add", "+", "mul"})
    assert all(name in vars(sdict) for name in {"add", "+", "mul"})

    assert sdict.add(2, 3) == 5 == sdict["add"](2, 3)
    assert sdict.mul(2, 3) == 6 == sdict["mul"](2, 3)
    assert sdict(7, 8) == 15 == sdict.default(7, 8)

    del sdict["+"]
    assert len(sdict) == 2
    del sdict.add
    assert len(sdict) == 1
    assert sdict(7, 8) == NotImplemented == sdict.default(7, 8)

    sdict["pow"] = operator.pow
    assert len(sdict) == 2
    assert sdict(2, 3) == 8 == sdict.default(2, 3)
    assert sdict.pow(5, 2) == 25 == sdict["pow"](5, 2)
Example #26
0
 def _module_quotient(self, other, relations=False):
     # See: [SCA, section 2.8.4]
     if relations and len(other.gens) != 1:
         raise NotImplementedError
     if len(other.gens) == 0:
         return self.ring.ideal(1)
     elif len(other.gens) == 1:
         # We do some trickery. Let f be the (vector!) generating ``other``
         # and f1, .., fn be the (vectors) generating self.
         # Consider the submodule of R^{r+1} generated by (f, 1) and
         # {(fi, 0) | i}. Then the intersection with the last module
         # component yields the quotient.
         g1 = list(other.gens[0]) + [1]
         gi = [list(x) + [0] for x in self.gens]
         # NOTE: We *need* to use an elimination order
         M = self.ring.free_module(self.rank + 1).submodule(*([g1] + gi),
                                         order='ilex', TOP=False)
         if not relations:
             return self.ring.ideal(*[x[-1] for x in M._groebner_vec() if
                                      all(y == self.ring.zero for y in x[:-1])])
         else:
             G, R = M._groebner_vec(extended=True)
             indices = [i for i, x in enumerate(G) if
                        all(y == self.ring.zero for y in x[:-1])]
             return (self.ring.ideal(*[G[i][-1] for i in indices]),
                     [[-x for x in R[i][1:]] for i in indices])
     # For more generators, we use I : <h1, .., hn> = intersection of
     #                                    {I : <hi> | i}
     # TODO this can be done more efficiently
     return reduce(lambda x, y: x.intersect(y),
         (self._module_quotient(self.container.submodule(x)) for x in other.gens))
Example #27
0
  def test_strategies_names_introspection(self):
    sd = StrategyDict()
    sd.strategy("first", "abc")(lambda val: "abc" + val)
    sd.strategy("second", "def")(lambda val: "def" + val) # Neglect 2nd name
    sd.strategy("third", "123")(lambda val: "123" + val) # Neglect 2nd name

    # Nothing new here: strategies do what they should...
    assert sd("x") == "abcx"
    assert sd.default("p") == "abcp"

    assert sd.first("w") == "abcw" == sd["first"]("w")
    assert sd.second("zsc") == "defzsc" == sd["second"]("zsc")
    assert sd.third("blah") == "123blah" == sd["third"]("blah")

    assert sd.abc("y") == "abcy" == sd["abc"]("y")
    assert sd["def"]("few") == "deffew"
    assert sd["123"]("lots") == "123lots"

    # Valid names for attributes
    all_names = {"first", "second", "third", "abc", "def", "123"}
    assert all(name in dir(sd) for name in all_names)
    assert all(name in vars(sd) for name in all_names)
    assert "default" in dir(sd)
    assert "default" in vars(sd)
    all_keys_tuples = sd.keys()
    all_keys = reduce(operator.concat, all_keys_tuples)
    assert set(all_keys) == all_names # Default not in keys
    assert set(all_keys_tuples) == {("first", "abc"),
                                    ("second", "def"),
                                    ("third", "123")}

    # First name is the __name__
    assert sd["abc"].__name__ == "first"
    assert sd["def"].__name__ == "second"
    assert sd["123"].__name__ == "third"
    def test_http_pool_key_fields(self):
        """Assert the HTTPPoolKey fields are honored when selecting a pool."""
        connection_pool_kw = {
            'timeout': timeout.Timeout(3.14),
            'retries': retry.Retry(total=6, connect=2),
            'block': True,
            'strict': True,
            'source_address': '127.0.0.1',
        }
        p = PoolManager()
        conn_pools = [
            p.connection_from_url('http://example.com/'),
            p.connection_from_url('http://example.com:8000/'),
            p.connection_from_url('http://other.example.com/'),
        ]

        for key, value in connection_pool_kw.items():
            p.connection_pool_kw[key] = value
            conn_pools.append(p.connection_from_url('http://example.com/'))

        assert all(
            x is not y
            for i, x in enumerate(conn_pools)
            for j, y in enumerate(conn_pools)
            if i != j
        )
        assert all(isinstance(key, PoolKey) for key in p.pools.keys())
    def test_mix(self):
        mix_reps = 4
        mix_asp_transport = self.asp_transport.copy()
        mix_asp_transport["pump_override_volume"] = None
        mix_dsp_transport = self.asp_transport.copy()
        mix_dsp_transport["volume"] = -mix_dsp_transport["volume"]
        mix_dsp_transport["pump_override_volume"] = None

        tip_position = self.asp_transport["mode_params"]["tip_position"]
        self.lhm._mix(
            # pylint: disable=invalid-unary-operand-type
            volume=-self.asp_transport["volume"],
            repetitions=mix_reps,
            position_x=tip_position["position_x"],
            position_y=tip_position["position_y"],
            initial_z=self.well_top_z,
            asp_flowrate=self.asp_transport["flowrate"],
            dsp_flowrate=self.asp_transport["flowrate"],
            delay_time=self.asp_transport["delay_time"],
            liquid_class=self.asp_transport["mode_params"]["liquid_class"]
        )
        assert self.lhm._transports[0] == self.well_top_transport
        asp_transports = [
            self.lhm._transports[_] for _ in range(1, mix_reps, 2)
        ]
        dsp_transports = [
            self.lhm._transports[_] for _ in range(2, mix_reps, 2)
        ]
        assert all(_ == mix_asp_transport for _ in asp_transports)
        assert all(_ == mix_dsp_transport for _ in dsp_transports)
Example #30
0
    def __init__(
        self,
        job_id=1,
        search_tree=None,
        q_event_proxies=None,
        q_grids=None,
        ):
        from abjad.tools import quantizationtools

        search_tree = search_tree or \
            quantizationtools.UnweightedSearchTree()

        q_event_proxies = q_event_proxies or []

        assert isinstance(search_tree, quantizationtools.SearchTree)
        assert all(
            isinstance(x, quantizationtools.QEventProxy)
            for x in q_event_proxies
            )
        self._job_id = job_id
        self._search_tree = search_tree
        self._q_event_proxies = tuple(q_event_proxies)
        if q_grids is None:
            self._q_grids = ()
        else:
            assert all(
                isinstance(x, quantizationtools.QGrid)
                for x in q_grids
                )
            self._q_grids = tuple(q_grids)
Example #31
0
def _set_network_proxy():
    if conf.proxy:
        debug_msg = "setting the HTTP/SOCKS proxy for all network requests"
        logger.debug(debug_msg)

        try:
            _ = urlsplit(conf.proxy)
        except Exception as ex:
            err_msg = "invalid proxy address '{0}' ('{1}')".format(conf.proxy, str(ex))
            raise PocsuiteSyntaxException(err_msg)

        hostname_port = _.netloc.split(":")
        scheme = _.scheme.upper()
        hostname = hostname_port[0]
        port = None
        username = None
        password = None

        if len(hostname_port) == 2:
            try:
                port = int(hostname_port[1])
            except Exception:
                pass

        if not all((scheme, hasattr(PROXY_TYPE, scheme), hostname, port)):
            err_msg = "proxy value must be in format '({0})://address:port'".format("|".join(
                _[0].lower() for _ in get_public_type_members(PROXY_TYPE)))
            raise PocsuiteSyntaxException(err_msg)

        if conf.proxy_cred:
            _ = re.search(r"\A(.*?):(.*?)\Z", conf.proxy_cred)
            if not _:
                err_msg = "proxy authentication credentials "
                err_msg += "value must be in format username:password"
                raise PocsuiteSyntaxException(err_msg)
            else:
                username = _.group(1)
                password = _.group(2)

        if scheme in (PROXY_TYPE.SOCKS4, PROXY_TYPE.SOCKS5, PROXY_TYPE.SOCKS5H):
            socks.set_default_proxy(
                socks.PROXY_TYPE_SOCKS4 if scheme == PROXY_TYPE.SOCKS4 else socks.PROXY_TYPE_SOCKS5,
                hostname,
                port,
                username=username,
                password=password,
                rdns=True if scheme == PROXY_TYPE.SOCKS5H else False,
            )
            socket.socket = socks.socksocket
            conf.proxies = {
                "http": conf.proxy,
                "https": conf.proxy,
            }
        else:
            if conf.proxy_cred:
                proxy_string = "{0}@".format(conf.proxy_cred)
            else:
                proxy_string = ""

            proxy_string = "{0}{1}:{2}".format(proxy_string, hostname, port)
            conf.proxies = {
                "http": proxy_string,
                "https": proxy_string
            }
Example #32
0
    def update_state(
        self, session: Session = None, execute_callbacks: bool = True
    ) -> Tuple[List[TI], Optional[callback_requests.DagCallbackRequest]]:
        """
        Determines the overall state of the DagRun based on the state
        of its TaskInstances.

        :param session: Sqlalchemy ORM Session
        :type session: Session
        :param execute_callbacks: Should dag callbacks (success/failure, SLA etc) be invoked
            directly (default: true) or recorded as a pending request in the ``callback`` property
        :type execute_callbacks: bool
        :return: Tuple containing tis that can be scheduled in the current loop & `callback` that
            needs to be executed
        """
        # Callback to execute in case of Task Failures
        callback: Optional[callback_requests.DagCallbackRequest] = None

        start_dttm = timezone.utcnow()
        self.last_scheduling_decision = start_dttm
        with Stats.timer(f"dagrun.dependency-check.{self.dag_id}"):
            dag = self.get_dag()
            info = self.task_instance_scheduling_decisions(session)

            tis = info.tis
            schedulable_tis = info.schedulable_tis
            changed_tis = info.changed_tis
            finished_tasks = info.finished_tasks
            unfinished_tasks = info.unfinished_tasks

            none_depends_on_past = all(not t.task.depends_on_past for t in unfinished_tasks)
            none_task_concurrency = all(t.task.task_concurrency is None for t in unfinished_tasks)

            if unfinished_tasks and none_depends_on_past and none_task_concurrency:
                # small speed up
                are_runnable_tasks = (
                    schedulable_tis
                    or self._are_premature_tis(unfinished_tasks, finished_tasks, session)
                    or changed_tis
                )

        leaf_task_ids = {t.task_id for t in dag.leaves}
        leaf_tis = [ti for ti in tis if ti.task_id in leaf_task_ids]

        # if all roots finished and at least one failed, the run failed
        if not unfinished_tasks and any(leaf_ti.state in State.failed_states for leaf_ti in leaf_tis):
            self.log.error('Marking run %s failed', self)
            self.set_state(State.FAILED)
            if execute_callbacks:
                dag.handle_callback(self, success=False, reason='task_failure', session=session)
            else:
                callback = callback_requests.DagCallbackRequest(
                    full_filepath=dag.fileloc,
                    dag_id=self.dag_id,
                    execution_date=self.execution_date,
                    is_failure_callback=True,
                    msg='task_failure',
                )

        # if all leafs succeeded and no unfinished tasks, the run succeeded
        elif not unfinished_tasks and all(leaf_ti.state in State.success_states for leaf_ti in leaf_tis):
            self.log.info('Marking run %s successful', self)
            self.set_state(State.SUCCESS)
            if execute_callbacks:
                dag.handle_callback(self, success=True, reason='success', session=session)
            else:
                callback = callback_requests.DagCallbackRequest(
                    full_filepath=dag.fileloc,
                    dag_id=self.dag_id,
                    execution_date=self.execution_date,
                    is_failure_callback=False,
                    msg='success',
                )

        # if *all tasks* are deadlocked, the run failed
        elif unfinished_tasks and none_depends_on_past and none_task_concurrency and not are_runnable_tasks:
            self.log.error('Deadlock; marking run %s failed', self)
            self.set_state(State.FAILED)
            if execute_callbacks:
                dag.handle_callback(self, success=False, reason='all_tasks_deadlocked', session=session)
            else:
                callback = callback_requests.DagCallbackRequest(
                    full_filepath=dag.fileloc,
                    dag_id=self.dag_id,
                    execution_date=self.execution_date,
                    is_failure_callback=True,
                    msg='all_tasks_deadlocked',
                )

        # finally, if the roots aren't done, the dag is still running
        else:
            self.set_state(State.RUNNING)

        self._emit_true_scheduling_delay_stats_for_finished_state(finished_tasks)
        self._emit_duration_stats_for_finished_state()

        session.merge(self)

        return schedulable_tis, callback
def test_for_string_value_in_randomized(fn, locale):
    results_set = set()
    for _ in range(50):
        results_set.add(fn(locale))

    assert all([isinstance(result, str) for result in results_set])
Example #34
0
def free_rows_and_columns(data):
    return (sum(1 for row in data if all(square == '.' for square in row)),
            sum(1 for row in zip(*data[::-1])
                if all(square == '.' for square in row)))
Example #35
0
def image_exists(name, features):
  suffixes = features.copy()
  if 'sh1' in suffixes:
    suffixes.remove('sh1')
    suffixes += ['sh1x', 'sh1y', 'sh1z']
  return all([os.path.isfile(name + '.' + s + '.exr') for s in suffixes])
Example #36
0
 def process_item(self, item, spider):
     if not all(item.values()):
         raise DropItem("Missing values!")
     else:
         return item