def test_connector_skeletons(self):
        self.fake_authentication()

        response = self.client.post(
                '/%d/connector/skeletons' % self.test_project_id, {
                    'connector_ids[0]': 356,
                    'connector_ids[1]': 2463
                })
        self.assertEqual(response.status_code, 200)
        parsed_response = json.loads(response.content.decode('utf-8'))
        expected_result = [
            [356, {
                'presynaptic_to': 235,
                'presynaptic_to_node': 285,
                'postsynaptic_to': [361, 373],
                'postsynaptic_to_node': [367, 377]
            }],
            [2463, {
                'presynaptic_to': 2462,
                'presynaptic_to_node': 2462,
                'postsynaptic_to': [2462],
                'postsynaptic_to_node': [2461]
            }],
        ]
        self.assertEqual(len(expected_result), len(parsed_response))
        six.assertCountEqual(self, expected_result, parsed_response)
    def test_must_prefer_implicit_with_any_method(self):

        implicit_apis = {
            "Event1": {
                "Type": "Api",
                "Properties": {
                    # This API is duplicated between implicit & explicit
                    "Path": "/path",
                    "Method": "ANY"
                }
            }
        }

        explicit_apis = [
            # Explicit should be over masked completely by implicit, because of "ANY"
            Api(path="/path", method="GET", function_name="explicitfunction", cors=None),
            Api(path="/path", method="DELETE", function_name="explicitfunction", cors=None),
        ]

        self.template["Resources"]["Api1"]["Properties"]["DefinitionBody"] = make_swagger(explicit_apis)
        self.template["Resources"]["ImplicitFunc"]["Properties"]["Events"] = implicit_apis

        expected_apis = [
            Api(path="/path", method="GET", function_name="ImplicitFunc", cors=None),
            Api(path="/path", method="POST", function_name="ImplicitFunc", cors=None),
            Api(path="/path", method="PUT", function_name="ImplicitFunc", cors=None),
            Api(path="/path", method="DELETE", function_name="ImplicitFunc", cors=None),
            Api(path="/path", method="HEAD", function_name="ImplicitFunc", cors=None),
            Api(path="/path", method="OPTIONS", function_name="ImplicitFunc", cors=None),
            Api(path="/path", method="PATCH", function_name="ImplicitFunc", cors=None)
        ]

        provider = SamApiProvider(self.template)
        assertCountEqual(self, expected_apis, provider.apis)
    def test_with_binary_media_types_in_swagger_and_on_resource(self):

        input_apis = [
            Api(path="/path", method="OPTIONS", function_name="SamFunc1"),
        ]
        extra_binary_types = ["text/html"]

        template = {
            "Resources": {

                "Api1": {
                    "Type": "AWS::Serverless::Api",
                    "Properties": {
                        "BinaryMediaTypes": extra_binary_types,
                        "StageName": "Prod",
                        "DefinitionBody": make_swagger(input_apis, binary_media_types=self.binary_types)
                    }
                }
            }
        }

        expected_binary_types = sorted(self.binary_types + extra_binary_types)
        expected_apis = [
            Api(path="/path", method="OPTIONS", function_name="SamFunc1", binary_media_types=expected_binary_types),
        ]

        provider = SamApiProvider(template)
        assertCountEqual(self, expected_apis, provider.apis)
示例#4
0
    def test_write_genre_list(self):
        mediafile = self._mediafile_fixture('empty')
        mediafile.genres = [u'one', u'two']
        mediafile.save()

        mediafile = MediaFile(mediafile.path)
        assertCountEqual(self, mediafile.genres, [u'one', u'two'])
    def test_must_prefer_implicit_api_over_explicit(self):

        implicit_apis = {
            "Event1": {
                "Type": "Api",
                "Properties": {
                    # This API is duplicated between implicit & explicit
                    "Path": "/path1",
                    "Method": "get"
                }
            },

            "Event2": {
                "Type": "Api",
                "Properties": {
                    "Path": "/path2",
                    "Method": "POST"
                }
            }
        }

        self.template["Resources"]["Api1"]["Properties"]["DefinitionBody"] = self.swagger
        self.template["Resources"]["ImplicitFunc"]["Properties"]["Events"] = implicit_apis

        expected_apis = [
            Api(path="/path1", method="GET", function_name="ImplicitFunc", cors=None),  # Comes from Implicit

            Api(path="/path2", method="GET", function_name="explicitfunction", cors=None),
            Api(path="/path2", method="POST", function_name="ImplicitFunc", cors=None),  # Comes from implicit

            Api(path="/path3", method="GET", function_name="explicitfunction", cors=None),
        ]

        provider = SamApiProvider(self.template)
        assertCountEqual(self, expected_apis, provider.apis)
    def test_swagger_with_any_method(self):

        apis = [
            Api(path="/path", method="any", function_name="SamFunc1", cors=None)
        ]

        expected_apis = [
            Api(path="/path", method="GET", function_name="SamFunc1", cors=None),
            Api(path="/path", method="POST", function_name="SamFunc1", cors=None),
            Api(path="/path", method="PUT", function_name="SamFunc1", cors=None),
            Api(path="/path", method="DELETE", function_name="SamFunc1", cors=None),
            Api(path="/path", method="HEAD", function_name="SamFunc1", cors=None),
            Api(path="/path", method="OPTIONS", function_name="SamFunc1", cors=None),
            Api(path="/path", method="PATCH", function_name="SamFunc1", cors=None)
        ]

        template = {
            "Resources": {
                "Api1": {
                    "Type": "AWS::Serverless::Api",
                    "Properties": {
                        "StageName": "Prod",
                        "DefinitionBody": make_swagger(apis)
                    }
                }
            }
        }

        provider = SamApiProvider(template)
        assertCountEqual(self, expected_apis, provider.apis)
    def test_with_binary_media_types(self):

        template = {
            "Resources": {

                "Api1": {
                    "Type": "AWS::Serverless::Api",
                    "Properties": {
                        "StageName": "Prod",
                        "DefinitionBody": make_swagger(self.input_apis, binary_media_types=self.binary_types)
                    }
                }
            }
        }

        expected_binary_types = sorted(self.binary_types)
        expected_apis = [
            Api(path="/path1", method="GET", function_name="SamFunc1", cors=None,
                binary_media_types=expected_binary_types),
            Api(path="/path1", method="POST", function_name="SamFunc1", cors=None,
                binary_media_types=expected_binary_types),

            Api(path="/path2", method="PUT", function_name="SamFunc1", cors=None,
                binary_media_types=expected_binary_types),
            Api(path="/path2", method="GET", function_name="SamFunc1", cors=None,
                binary_media_types=expected_binary_types),

            Api(path="/path3", method="DELETE", function_name="SamFunc1", cors=None,
                binary_media_types=expected_binary_types)
        ]

        provider = SamApiProvider(template)
        assertCountEqual(self, expected_apis, provider.apis)
 def test_build_signature_dict_for_content_type_generates_correct_python_dict_for_date_decimal_and_none_types(self):
     self.sut.raw_data = {'date': datetime.date(1900, 1, 2), "foo": Decimal(100.12), "empty": None}
     result = self.sut._build_signature_dict_for_content_type({"Content-Type": "application/json"})
     six.assertCountEqual(self, result, {
         "date": "1900-01-02",
         "foo":  "100.1200000000000045474735088646411895751953125",
         "empty": None})
    def test_with_swagger_as_both_body_and_uri(self, SamSwaggerReaderMock):

        body = {"some": "body"}
        filename = "somefile.txt"

        template = {
            "Resources": {

                "Api1": {
                    "Type": "AWS::Serverless::Api",
                    "Properties": {
                        "StageName": "Prod",
                        "DefinitionUri": filename,
                        "DefinitionBody": body
                    }
                }
            }
        }

        SamSwaggerReaderMock.return_value.read.return_value = make_swagger(self.input_apis)

        cwd = "foo"
        provider = SamApiProvider(template, cwd=cwd)
        assertCountEqual(self, self.input_apis, provider.apis)
        SamSwaggerReaderMock.assert_called_with(definition_body=body, definition_uri=filename, working_dir=cwd)
示例#10
0
  def testExample(self):
    class SlotManager(tracking.AutoCheckpointable):

      def __init__(self):
        self.slotdeps = containers.UniqueNameTracker()
        slotdeps = self.slotdeps
        slots = []
        slots.append(slotdeps.track(
            resource_variable_ops.ResourceVariable(3.), "x"))
        slots.append(slotdeps.track(
            resource_variable_ops.ResourceVariable(4.), "y"))
        slots.append(slotdeps.track(
            resource_variable_ops.ResourceVariable(5.), "x"))
        self.slots = data_structures.NoDependency(slots)

    manager = SlotManager()
    self.evaluate([v.initializer for v in manager.slots])
    checkpoint = util.Checkpoint(slot_manager=manager)
    checkpoint_directory = self.get_temp_dir()
    checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
    save_path = checkpoint.save(checkpoint_prefix)
    metadata = util.object_metadata(save_path)
    dependency_names = []
    for node in metadata.nodes:
      for child in node.children:
        dependency_names.append(child.local_name)
    six.assertCountEqual(
        self,
        dependency_names,
        ["x", "x_1", "y", "slot_manager", "slotdeps", "save_counter"])
示例#11
0
    def test_fit_and_transform(self):
        augmenter = FeatureAugmenter(column_value="val", column_id="id", column_sort="sort",
                                     column_kind="kind",
                                     kind_to_fc_parameters=self.kind_to_fc_parameters)

        # Fit should do nothing
        returned_df = augmenter.fit()
        six.assertCountEqual(self, returned_df.__dict__, augmenter.__dict__)
        self.assertRaises(RuntimeError, augmenter.transform, None)

        augmenter.set_timeseries_container(self.test_df)

        # Add features to all time series
        X_with_index = pd.DataFrame([{"feature_1": 1}]*2, index=[10, 500])
        X_transformed = augmenter.transform(X_with_index)

        # Require same shape
        for i in X_transformed.index:
            self.assertIn(i, X_with_index.index)

        for i in X_with_index.index:
            self.assertIn(i, X_transformed.index)

        self.assertEqual(X_transformed.shape, (2, 3))

        # Preserve old features
        six.assertCountEqual(self, list(X_transformed.columns), ["feature_1", "a__length", "b__length"])

        # Features are not allowed to be NaN
        for index, row in X_transformed.iterrows():
            print((index, row))
            self.assertFalse(np.isnan(row["a__length"]))
            self.assertFalse(np.isnan(row["b__length"]))
    def test_that_workspace_property_returns_workspace_when_not_in_ADS(self):
        workspace_handle = MuonWorkspaceWrapper(workspace=self.workspace)

        ws_property = workspace_handle.workspace

        six.assertCountEqual(self, ws_property.readX(0), [1, 2, 3, 4])
        six.assertCountEqual(self, ws_property.readY(0), [10, 10, 10, 10])
    def test_delete_stacks_uses_the_correct_order(self,
                                                  custom_resource_mock,
                                                  stack_mock,
                                                  template_loader_mock,
                                                  dependency_resolver_mock,
                                                  parameter_resolver_mock,
                                                  cfn_mock):

        dependency_resolver_mock.return_value.get_stack_order.return_value = ['a', 'c']
        cfn_mock.return_value.get_stack_names.return_value = ['a', 'c']

        stack_a = CloudFormationStack('', [], 'a', '')
        stack_c = CloudFormationStack('', [], 'c', '')

        def stack_side_effect(*args):
            if args[2] == 'a':
                return stack_a
            if args[2] == 'c':
                return stack_c
            return None

        stack_mock.side_effect = stack_side_effect

        handler = StackActionHandler(Mock())
        handler.delete_stacks()

        expected_calls = [call(stack_c), call(stack_a)]
        six.assertCountEqual(self, expected_calls, cfn_mock.return_value.delete_stack.mock_calls)
 def testTracking(self):
   model = HasList()
   output = model(array_ops.ones([32, 2]))
   self.assertAllEqual([32, 12], output.shape)
   self.assertEqual(11, len(model.layers))
   self.assertEqual(10, len(model.layer_list.layers))
   six.assertCountEqual(
       self,
       model.layers,
       model.layer_list.layers + model.layers_with_updates)
   for index in range(10):
     self.assertEqual(3 + index, model.layer_list.layers[index].units)
   self.assertEqual(2, len(model._checkpoint_dependencies))
   self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)
   self.assertIs(model.layers_with_updates,
                 model._checkpoint_dependencies[1].ref)
   self.assertEqual(
       10, len(model._checkpoint_dependencies[0].ref._checkpoint_dependencies))
   self.evaluate([v.initializer for v in model.variables])
   self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))
   save_path = os.path.join(self.get_temp_dir(), "ckpt")
   model.save_weights(save_path)
   self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))
   model.load_weights(save_path)
   self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],
                       self.evaluate(model.variables[0]))
   v = variables.Variable(1.)
   model.var_list = [v]
   self.assertIn(v, model.variables)
   self.assertIn(v, model.trainable_variables)
   self.assertNotIn(v, model.non_trainable_variables)
示例#15
0
    def test_get_multiple_albums_by_id(self):
        response = self.client.get('/album/1,2')
        response.json = json.loads(response.data.decode('utf-8'))

        self.assertEqual(response.status_code, 200)
        response_albums = [album['album'] for album in response.json['albums']]
        assertCountEqual(self, response_albums, [u'album', u'another album'])
示例#16
0
    def test_passport(self):
        
        def p(s):
            return b'<123/' + s + b'/something>'
        
        cases = (
            # Test all keys
            ((b'!f#f+f*fAfCfBfEfDfGfFfHfKfLfOfNfQfPfSfRfUfTfWfYfXfZf^f~f',),
            [Passport.get_keys(),]),
            # Anything below 8 is not registered
            ((b'S9T8A7',), [['S', 'T'],]),
            # Sequence of strokes
            ((b'SfTf', b'Zf', b'QfLf'), [['S', 'T'], ['Z',], ['Q', 'L']]),
        )

        params = {k: v[0] for k, v in Passport.get_option_info().items()}
        with patch('plover.machine.base.serial.Serial', MockSerial) as mock:
            for inputs, expected in cases:
                mock.inputs = list(map(p, inputs))
                actual = []
                m = Passport(params)
                m.add_stroke_callback(actual.append)
                m.start_capture()
                while mock.index < len(mock.inputs):
                    time.sleep(0.00001)
                m.stop_capture()
                self.assertEqual(len(actual), len(expected))
                for actual_keys, expected_keys in zip(actual, expected):
                    assertCountEqual(self, actual_keys, expected_keys)
示例#17
0
 def test_trigger(self):
     thresholds, trigger = self.station.trigger(1378771200)  # 2013-9-10
     six.assertCountEqual(self, thresholds, [[253, 323]] * 4)
     six.assertCountEqual(self, trigger, [2, 3, 1, 0])
     data = self.station.trigger(FUTURE)
     data2 = self.station.trigger()
     self.assertEqual(data, data2)
示例#18
0
    def test_get_cuts(self):
        gps_station = (datetime_to_gps(datetime(2014, 1, 1, 10, 3)),
                       datetime_to_gps(datetime(2014, 3, 1, 11, 32)))
        gps_ref_station = (datetime_to_gps(datetime(2014, 1, 5, 0, 1, 1)),
                           datetime_to_gps(datetime(2014, 3, 5, 3, 34, 4)))
        elec_station = (datetime_to_gps(datetime(2014, 1, 3, 3, 34, 3)),
                        datetime_to_gps(datetime(2014, 3, 5, 23, 59, 59)))
        elec_ref_station = (datetime_to_gps(datetime(2014, 1, 9, 0, 0, 0)),
                            datetime_to_gps(datetime(2014, 3, 15, 1, 2, 3)))
        gps_mock = Mock()
        elec_mock = Mock()

        gps_mock.side_effect = [array(gps_station), array(gps_ref_station)]
        elec_mock.side_effect = [array(elec_station), array(elec_ref_station)]

        self.off._get_electronics_timestamps = elec_mock
        self.off._get_gps_timestamps = gps_mock

        cuts = self.off._get_cuts(sentinel.station, sentinel.ref_station)

        elec_mock.assert_has_calls([call(sentinel.ref_station), call(sentinel.station)], any_order=True)
        gps_mock.assert_has_calls([call(sentinel.ref_station), call(sentinel.station)], any_order=True)

        self.assertEqual(len(cuts), 8)
        six.assertCountEqual(self, sorted(cuts), cuts)
        self.assertEqual(cuts[0], datetime(2014, 1, 1))
        today = datetime.now()
        self.assertEqual(cuts[-1], datetime(today.year, today.month, today.day))
  def testLonelyClient(self):
    host = 'nosuchhostexists'
    port = 54321
    method = 'test method'
    deadline = time.time() + _TIMEOUT
    after_deadline = deadline + _AFTER_DELAY
    metadata_tag = object()
    finish_tag = object()

    completion_queue = _low.CompletionQueue()
    channel = _low.Channel('%s:%d' % (host, port), None)
    client_call = _low.Call(channel, completion_queue, method, host, deadline)

    client_call.invoke(completion_queue, metadata_tag, finish_tag)
    first_event = completion_queue.get(after_deadline)
    self.assertIsNotNone(first_event)
    second_event = completion_queue.get(after_deadline)
    self.assertIsNotNone(second_event)
    kinds = [event.kind for event in (first_event, second_event)]
    six.assertCountEqual(self,
        (_low.Event.Kind.METADATA_ACCEPTED, _low.Event.Kind.FINISH),
        kinds)

    self.assertIsNone(completion_queue.get(after_deadline))

    completion_queue.stop()
    stop_event = completion_queue.get(_FUTURE)
    self.assertEqual(_low.Event.Kind.STOP, stop_event.kind)

    del client_call
    del channel
    del completion_queue
示例#20
0
 def test_finalize_stats_summaries(self):
   p = plan.Plan(None)
   p.save_summaries_secs = 42
   p.losses['foo'] = tf.constant([1.0])
   p.losses['bar'] = tf.constant([2.0, 3.0])
   p.metrics['baz'] = tf.constant(4)
   p.metrics['qux'] = tf.constant([5.0, 6.0])
   p.finalize_stats()
   with self.test_session():
     self.assertEqual(6, p.loss_total.eval({p.batch_size_placeholder: 1}))
     summary = tf.Summary()
     summary.ParseFromString(p.summaries.eval({p.batch_size_placeholder: 1}))
     qux_string = tf.summary.histogram('qux', [5, 6]).eval()
     qux_proto = tf.Summary()
     qux_proto.ParseFromString(qux_string)
     qux_histogram = qux_proto.value[0].histo
     expected_values = [
         tf.Summary.Value(tag='foo', simple_value=1),
         tf.Summary.Value(tag='bar', simple_value=5),
         tf.Summary.Value(tag='loss_total', simple_value=6),
         tf.Summary.Value(tag='baz', simple_value=4),
         tf.Summary.Value(tag='qux', histo=qux_histogram)]
     six.assertCountEqual(self, expected_values, summary.value)
     summary.ParseFromString(p.summaries.eval({p.batch_size_placeholder: 2}))
     expected_values = [
         tf.Summary.Value(tag='foo', simple_value=0.5),
         tf.Summary.Value(tag='bar', simple_value=2.5),
         tf.Summary.Value(tag='loss_total', simple_value=3),
         tf.Summary.Value(tag='baz', simple_value=4),
         tf.Summary.Value(tag='qux', histo=qux_histogram)]
     six.assertCountEqual(self, expected_values, summary.value)
示例#21
0
 def test_discover(self):
     org = 'quay.io/cogniteev'
     truncated_config = StringIO()
     config = load_config(self.CONFIG_FILE)
     config['organizations'][org]['repositories'] = None
     save_config(config, truncated_config)
     truncated_config.seek(0, 0)
     discover_command(['cogniteev'], truncated_config, interactive=False)
     config = load_config(self.CONFIG_FILE)
     tags = config.get('organizations', {})\
         .get('quay.io/cogniteev', {})\
         .get('repositories', {})\
         .get('docido-contrib-crawlers', {})\
         .get('on_build', {})\
         .get('tags', {})
     self.assertEqual(len(tags), 1)
     latest_trigger = tags.get('latest')
     self.assertEqual(len(latest_trigger), 1)
     github_repo = 'quay.io/cogniteev/docido-pull-crawler-github'
     assertCountEqual(
         self,
         latest_trigger.get(github_repo),
         [
             dict(trigger_uuid="dcb1e958-9fdb-4e9b-9856-4d52771b3df9",
                  ref="refs/heads/develop"),
             dict(trigger_uuid="567da7a3-0373-4cf2-8480-58a18b8dbe47",
                  ref="refs/tags/v1.1"),
         ]
     )
示例#22
0
 def test_many_elements(self):
     expected_data = []
     for i in range(20):
         self.d.add(i, i)
         expected_data.append((i, i))
     gevent.sleep(TestFlushingDict.FLUSH_LATENCY_SECONDS + 0.1)
     six.assertCountEqual(self, expected_data, sum(self.received_data, []))
示例#23
0
 def test_group(self):
     assign_perm("view_collection", self.group1, self.collection)
     assign_perm("edit_collection", self.group1, self.collection)
     user_perms, group_perms = get_user_group_perms(self.group1, self.collection)
     self.assertEqual(len(group_perms), 1)
     six.assertCountEqual(self, group_perms[0][2], ["view_collection", "edit_collection"])
     self.assertEqual(len(user_perms), 0)
示例#24
0
    def test_search_with_nodes_and_duplicate_label(self):
        self.fake_authentication()

        response = self.client.get(
                '/%d/search' % self.test_project_id,
                {'substring': 'uncertain end'})
        self.assertEqual(response.status_code, 200)
        parsed_response = json.loads(response.content.decode('utf-8'))

        # Expect only one result that has a node linked
        expected_result = [
            {"id":2342, "name":"uncertain end", "class_name":"label", "nodes":[
                {"id":403, "x":7840, "y":2380, "z":0, "skid":373}]},
        ]
        six.assertCountEqual(self, expected_result, parsed_response)

        # Add a duplicate record of the label, without any node links
        label = ClassInstance.objects.get(id=2342)
        label.id = None
        label.save()

        response2 = self.client.get(
                '/%d/search' % self.test_project_id,
                {'substring': 'uncertain end'})
        self.assertEqual(response2.status_code, 200)
        parsed_response2 = json.loads(response2.content.decode(('utf-8')))

        # Expect the nodes to be not linked to the duplicate record
        expected_result2 = [
            {"id":label.id, "name":"uncertain end", "class_name":"label"},
            {"id":2342, "name":"uncertain end", "class_name":"label", "nodes":[
                {"id":403, "x":7840, "y":2380, "z":0, "skid":373}]}
        ]
        six.assertCountEqual(self, expected_result2, parsed_response2)
示例#25
0
    def test_parse_stream(
        self, structure_and_messages1, structure_and_messages2, structure_and_messages3
    ):
        """
        L{Parser.parse_stream} returns an iterable of completed and then
        incompleted tasks.
        """
        _, messages1 = structure_and_messages1
        _, messages2 = structure_and_messages2
        _, messages3 = structure_and_messages3
        # Need at least one non-dropped message in partial tree:
        assume(len(messages3) > 1)
        # Need unique UUIDs per task:
        assume(
            len(set(m[0][TASK_UUID_FIELD] for m in (messages1, messages2, messages3)))
            == 3
        )

        # Two complete tasks, one incomplete task:
        all_messages = (messages1, messages2, messages3[:-1])

        all_tasks = list(
            Parser.parse_stream(
                [m for m in chain(*zip_longest(*all_messages)) if m is not None]
            )
        )
        assertCountEqual(
            self, all_tasks, [parse_to_task(msgs) for msgs in all_messages]
        )
示例#26
0
    def test_uniform_noise_4_bots_no_noise_manhattan(self):
        test_layout = (
        """ ##################
            # #.  .  # . 2   #
            # #####    #####3#
            #  0  . #  .  .#1#
            ################## """)
        universe = CTFUniverse.create(test_layout, 4)
        noiser = ManhattanNoiser(universe.copy())

        expected_0 = [ (1, 1), (3, 1), (4, 1), (5, 1), (6, 1),
                       (1, 2), (1, 3), (2, 3), (3, 3), (4, 3), (5, 3),
                       (6, 3), (7, 3), (7, 2) ]
        position_bucket_0 = collections.defaultdict(int)

        bot_2_pos = (13, 1)
        position_bucket_2 = {bot_2_pos : 0}

        for i in range(200):
            new = noiser.uniform_noise(universe.copy(), 1)
            self.assertTrue(new.bots[0].noisy)
            self.assertFalse(new.bots[2].noisy)
            position_bucket_0[new.bots[0].current_pos] += 1
            position_bucket_2[new.bots[2].current_pos] += 1
        self.assertEqual(200, sum(position_bucket_0.values()))
        self.assertEqual(200, sum(position_bucket_2.values()))
        # Since this is a randomized algorithm we need to be a bit lenient with
        # our tests. We check that each position was selected at least once.
        six.assertCountEqual(self, position_bucket_0, expected_0, position_bucket_0)

        # bots should never have been noised
        self.assertEqual(200, position_bucket_2[bot_2_pos])
示例#27
0
    def assertSearchResults(self, results=None, resultCode=0):
        """
        Shortcut for checking results returned by test server on LDAPSearchRequest.
        Results must be prepared as a list of dictionaries with 'objectName' and 'attributes' keys
        """
        if results is None:
            results = []

        messages = []

        for result in results:
            message = pureldap.LDAPMessage(
                pureldap.LDAPSearchResultEntry(
                    objectName=result['objectName'],
                    attributes=result['attributes']
                ),
                id=2
            )
            messages.append(message)

        messages.append(
            pureldap.LDAPMessage(
                pureldap.LDAPSearchResultDone(resultCode=resultCode),
                id=2
            )
        )
        six.assertCountEqual(
            self,
            self._makeResultList(self.server.transport.value()),
            [msg.toWire() for msg in messages]
        )
    def test_numeric_range_filter(self):
        # https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-range-query.html

        numeric_range_filter_options = {
            "fields": ["start", "end", "age"],
            "field_validators": {
                "start": ["date"],
                "end": ["date"],
            }
        }

        filter = filters.NumericRangeFilter

        query_params = {'start.gte': '2016-01-01', 'start.lte': '2001-06-01', 'start.tz': 'Pacific/Auckland', 'age.gt': '18'}

        queryset = self.get_queryset_for_filter(filter=filter, query_params=query_params,
                                                options=numeric_range_filter_options)

        self.assertEqual(len(queryset.filters), 2)
        self.assertEqual(len(queryset.queries), 0)

        # raw
        six.assertCountEqual(self, queryset.filters,[
            ('range', {'start': {'gte': '2016-01-01', 'lte': '2001-06-01', 'time_zone': 'Pacific/Auckland'}}),
            ('range', {'age': {'gt': '18'}})
                             ])
示例#29
0
    def test_uniform_noise_4_bots_a_star(self):
        test_layout = (
        """ ##################
            # #. 2.  # .     #
            # #####    #####3#
            #  0  . #  .  .#1#
            ################## """)
        universe = CTFUniverse.create(test_layout, 4)
        noiser = AStarNoiser(universe.copy())

        expected_0 = [(1, 2), (7, 3), (1, 3), (3, 3), (6, 3),
                      (2, 3), (4, 3), (1, 1), (5, 3)]
        position_bucket_0 = collections.defaultdict(int)

        expected_2 = [(7, 3), (8, 2), (7, 1), (8, 1), (6, 1), (3, 1), (5, 1),
                      (4, 1), (7, 2)]
        position_bucket_2 = collections.defaultdict(int)

        for i in range(100):
            new = noiser.uniform_noise(universe.copy(), 1)
            self.assertTrue(new.bots[0].noisy)
            self.assertTrue(new.bots[2].noisy)
            position_bucket_0[new.bots[0].current_pos] += 1
            position_bucket_2[new.bots[2].current_pos] += 1
        self.assertEqual(100, sum(position_bucket_0.values()))
        self.assertEqual(100, sum(position_bucket_2.values()))
        # Since this is a randomized algorithm we need to be a bit lenient with
        # our tests. We check that each position was selected at least once.
        six.assertCountEqual(self, position_bucket_0, expected_0, sorted(position_bucket_0.keys()))
        six.assertCountEqual(self, position_bucket_2, expected_2, sorted(position_bucket_2.keys()))
    def test_transform_dict_to_yaml_lines_list_accepts_joins(self):
        input = {
            "source": {"Fn::Join": [":", ["my-registry/my-app", {"Ref": "appVersion"}]]}
        }

        expected = [
            {
                "Fn::Join": [
                    ": ",
                    [
                        "source",
                        {
                            "Fn::Join": [
                                ":",
                                [
                                    "my-registry/my-app",
                                    {
                                        "Ref": "appVersion"
                                    }
                                ]
                            ]
                        }
                    ]
                ]
            }
        ]

        result = CloudFormationTemplateTransformer.transform_dict_to_yaml_lines_list(input)
        six.assertCountEqual(self, expected, result)
示例#31
0
    def test_update_cpus(self):
        """Tests CPU distribution cron."""
        self.maxDiff = None  # pylint: disable=invalid-name
        manager = manage_vms.OssFuzzClustersManager('clusterfuzz-external')
        manager.update_clusters()

        proj1 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj1').get()
        self.assertIsNotNone(proj1)
        self.assertDictEqual(
            {
                'name':
                'proj1',
                'clusters': [{
                    'cluster': 'oss-fuzz-linux-zone2-pre',
                    'cpu_count': 100,
                    'gce_zone': 'us-east2-a',
                }, {
                    'cluster': 'oss-fuzz-linux-zone3-worker',
                    'cpu_count': 1,
                    'gce_zone': 'us-central1-d',
                }],
            }, proj1.to_dict())

        proj2 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj2').get()
        self.assertIsNotNone(proj2)
        self.assertDictEqual(
            {
                'name':
                'proj2',
                'clusters': [{
                    'cluster': 'oss-fuzz-linux-zone2-pre',
                    'cpu_count': 200,
                    'gce_zone': 'us-east2-a',
                }, {
                    'cluster': 'oss-fuzz-linux-zone3-worker',
                    'cpu_count': 4,
                    'gce_zone': 'us-central1-d',
                }],
            }, proj2.to_dict())

        proj3 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj3').get()
        self.assertIsNotNone(proj3)
        self.assertDictEqual(
            {
                'name':
                'proj3',
                'clusters': [{
                    'cluster': 'oss-fuzz-linux-zone2-pre',
                    'cpu_count': 499,
                    'gce_zone': 'us-east2-a',
                }, {
                    'cluster': 'oss-fuzz-linux-zone3-worker',
                    'cpu_count': 9,
                    'gce_zone': 'us-central1-d',
                }],
            }, proj3.to_dict())

        proj4 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj4').get()
        self.assertIsNotNone(proj4)
        self.assertDictEqual(
            {
                'name':
                'proj4',
                'clusters': [{
                    'cluster': 'oss-fuzz-linux-zone2-pre',
                    'cpu_count': 99,
                    'gce_zone': 'us-east2-a',
                }, {
                    'cluster': 'oss-fuzz-linux-zone3-worker',
                    'cpu_count': 1,
                    'gce_zone': 'us-central1-d',
                }],
            }, proj4.to_dict())

        proj5 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj5').get()
        self.assertIsNotNone(proj5)
        self.assertDictEqual(
            {
                'name':
                'proj5',
                'clusters': [{
                    'cluster': 'oss-fuzz-linux-zone2-pre',
                    'cpu_count': 99,
                    'gce_zone': 'us-east2-a',
                }, {
                    'cluster': 'oss-fuzz-linux-zone3-worker',
                    'cpu_count': 1,
                    'gce_zone': 'us-central1-d',
                }],
            }, proj5.to_dict())

        proj6 = ndb.Key(data_types.OssFuzzProjectInfo, 'proj6').get()
        self.assertIsNotNone(proj6)
        self.assertDictEqual(
            {
                'name':
                'proj6',
                'clusters': [{
                    'cluster': 'oss-fuzz-linux-zone3-worker-high-end',
                    'cpu_count': 2,
                    'gce_zone': 'us-central1-d',
                }],
            }, proj6.to_dict())

        old_proj = ndb.Key(data_types.OssFuzzProjectInfo, 'old_proj').get()
        self.assertIsNone(old_proj)

        mock_bot_manager = self.mock.BotManager('clusterfuzz-external',
                                                'us-east2-a')

        # proj1: new project.
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone2-pre-proj1').create.assert_called_with(
                expected_instance_template('clusterfuzz-external',
                                           'external-pre-zone2', 'proj1'))
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj1').create.assert_called_with(
                'oss-fuzz-linux-zone2-pre-proj1',
                'oss-fuzz-linux-zone2-pre-proj1',
                size=100,
                wait_for_instances=False)
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj1').resize.assert_not_called()

        # proj2: already exists. needs a resize. old cluster should be deleted.
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone2-pre-proj2').create.assert_not_called()
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone2-pre-proj2').delete.assert_not_called()
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj2').create.assert_not_called()
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj2').delete.assert_not_called()
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj2').resize.assert_called_with(
                200, wait_for_instances=False)
        mock_bot_manager.instance_template(
            'old-cluster-proj2').delete.assert_called()
        mock_bot_manager.instance_group(
            'old-cluster-proj2').delete.assert_called()

        # proj3: already exists. no changes needed.
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone2-pre-proj3').delete.assert_not_called()
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone2-pre-proj3').create.assert_not_called()
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj3').create.assert_not_called()
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj3').resize.assert_not_called()
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj3').delete.assert_not_called()

        # proj4: needs a template update (version change).
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone2-pre-proj4').delete.assert_called()
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone2-pre-proj4').create.assert_called_with(
                expected_instance_template('clusterfuzz-external',
                                           'external-pre-zone2', 'proj4'))
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj4').delete.assert_called()
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj4').create.assert_called_with(
                'oss-fuzz-linux-zone2-pre-proj4',
                'oss-fuzz-linux-zone2-pre-proj4',
                size=99,
                wait_for_instances=False)
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj4').resize.assert_not_called()

        # proj5: needs a template update (disk size change).
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone2-pre-proj5').delete.assert_called()
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone2-pre-proj5').create.assert_called_with(
                expected_instance_template('clusterfuzz-external',
                                           'external-pre-zone2',
                                           'proj5',
                                           disk_size_gb=10))
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj5').delete.assert_called()
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj5').create.assert_called_with(
                'oss-fuzz-linux-zone2-pre-proj5',
                'oss-fuzz-linux-zone2-pre-proj5',
                size=99,
                wait_for_instances=False)
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-proj5').resize.assert_not_called()

        # proj6: high end project.
        for j in range(1, 6):
            mock_bot_manager.instance_group(
                'oss-fuzz-linux-zone3-worker-high-end-proj' +
                str(j)).create.assert_not_called()

        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone3-worker-high-end-proj6').create.assert_called(
            )

        # old_proj: deleted.
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-old-proj').create.assert_not_called()
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone2-pre-old-proj').delete.assert_called()
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone2-pre-old-proj').delete.assert_called()

        # host instances: created.
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone3-host').create.assert_called_with(
                expected_host_instance_template('clusterfuzz-external',
                                                'host-zone3'))
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone3-host').create.assert_called_with(
                'oss-fuzz-linux-zone3-host',
                'oss-fuzz-linux-zone3-host',
                size=2,
                wait_for_instances=False)

        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone3-host-high-end').create.assert_called_with(
                'oss-fuzz-linux-zone3-host-high-end',
                'oss-fuzz-linux-zone3-host-high-end',
                size=1,
                wait_for_instances=False)

        # Worker instances: created.
        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone3-worker-proj1').create.assert_called_with(
                expected_instance_template(
                    'clusterfuzz-external',
                    'worker-zone3',
                    'proj1',
                    service_account='*****@*****.**',
                    tls_cert=True))
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone3-worker-proj1').create.assert_called_with(
                'oss-fuzz-linux-zone3-worker-proj1',
                'oss-fuzz-linux-zone3-worker-proj1',
                size=1,
                wait_for_instances=False)

        mock_bot_manager.instance_template(
            'oss-fuzz-linux-zone3-worker-proj2').create.assert_called_with(
                expected_instance_template(
                    'clusterfuzz-external',
                    'worker-zone3',
                    'proj2',
                    service_account='*****@*****.**',
                    tls_cert=True))
        mock_bot_manager.instance_group(
            'oss-fuzz-linux-zone3-worker-proj2').create.assert_called_with(
                'oss-fuzz-linux-zone3-worker-proj2',
                'oss-fuzz-linux-zone3-worker-proj2',
                size=4,
                wait_for_instances=False)

        six.assertCountEqual(
            self, [{
                'instance_num': 0,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj1-0001',
                'project_name': u'proj1',
                'host_name': u'oss-fuzz-linux-zone3-host-abcd'
            }, {
                'instance_num': 1,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0001',
                'project_name': u'proj2',
                'host_name': u'oss-fuzz-linux-zone3-host-abcd'
            }, {
                'instance_num': 2,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0002',
                'project_name': u'proj2',
                'host_name': u'oss-fuzz-linux-zone3-host-abcd'
            }, {
                'instance_num': 3,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0003',
                'project_name': u'proj2',
                'host_name': u'oss-fuzz-linux-zone3-host-abcd'
            }, {
                'instance_num': 4,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj2-0004',
                'project_name': u'proj2',
                'host_name': u'oss-fuzz-linux-zone3-host-abcd'
            }, {
                'instance_num': 5,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0001',
                'project_name': u'proj3',
                'host_name': u'oss-fuzz-linux-zone3-host-abcd'
            }, {
                'instance_num': 6,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0002',
                'project_name': u'proj3',
                'host_name': u'oss-fuzz-linux-zone3-host-abcd'
            }, {
                'instance_num': 7,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0003',
                'project_name': u'proj3',
                'host_name': u'oss-fuzz-linux-zone3-host-abcd'
            }, {
                'instance_num': 0,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0004',
                'project_name': u'proj3',
                'host_name': u'oss-fuzz-linux-zone3-host-efgh'
            }, {
                'instance_num': 1,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0005',
                'project_name': u'proj3',
                'host_name': u'oss-fuzz-linux-zone3-host-efgh'
            }, {
                'instance_num': 2,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0006',
                'project_name': u'proj3',
                'host_name': u'oss-fuzz-linux-zone3-host-efgh'
            }, {
                'instance_num': 3,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0007',
                'project_name': u'proj3',
                'host_name': u'oss-fuzz-linux-zone3-host-efgh'
            }, {
                'instance_num': 4,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0008',
                'project_name': u'proj3',
                'host_name': u'oss-fuzz-linux-zone3-host-efgh'
            }, {
                'instance_num': 5,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj3-0009',
                'project_name': u'proj3',
                'host_name': u'oss-fuzz-linux-zone3-host-efgh'
            }, {
                'instance_num': 6,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj4-0001',
                'project_name': u'proj4',
                'host_name': u'oss-fuzz-linux-zone3-host-efgh'
            }, {
                'instance_num': 7,
                'worker_name': u'oss-fuzz-linux-zone3-worker-proj5-0001',
                'project_name': u'proj5',
                'host_name': u'oss-fuzz-linux-zone3-host-efgh'
            }, {
                'instance_num': 0,
                'worker_name':
                u'oss-fuzz-linux-zone3-worker-high-end-proj6-0001',
                'project_name': u'proj6',
                'host_name': u'oss-fuzz-linux-zone3-host-high-end-1'
            }, {
                'instance_num': 1,
                'worker_name':
                u'oss-fuzz-linux-zone3-worker-high-end-proj6-0002',
                'project_name': u'proj6',
                'host_name': u'oss-fuzz-linux-zone3-host-high-end-1'
            }], [
                assignment.to_dict()
                for assignment in data_types.HostWorkerAssignment.query()
            ])
示例#32
0
 def test_update_match_are_abstract(self):
     six.assertCountEqual(
         self, {"report"},
         col_pro_compilers.BaseCompiler.__abstractmethods__)
示例#33
0
 def test_verify_common_config_opts(self):
     opts = util.get_parsed_args(prog='test')
     opts_dict = vars(opts[0])
     six.assertCountEqual(self, ['config_file', 'clean', 'verbose'],
                          opts_dict.keys())
示例#34
0
 def testNamingWithOptimizer(self):
     input_value = constant_op.constant([[3.]])
     model = MyModel()
     # A nuisance Model using the same optimizer. Its slot variables should not
     # go in the checkpoint, since it is never depended on.
     other_model = MyModel()
     optimizer = adam.AdamOptimizer(0.001)
     optimizer_step = training_util.get_or_create_global_step()
     root_checkpointable = checkpointable_utils.Checkpoint(
         optimizer=optimizer, model=model, optimizer_step=optimizer_step)
     if context.executing_eagerly():
         optimizer.minimize(lambda: model(input_value),
                            global_step=optimizer_step)
         optimizer.minimize(lambda: other_model(input_value),
                            global_step=optimizer_step)
     else:
         train_op = optimizer.minimize(model(input_value),
                                       global_step=optimizer_step)
         optimizer.minimize(other_model(input_value),
                            global_step=optimizer_step)
         self.evaluate(
             checkpointable_utils.gather_initializers(root_checkpointable))
         self.evaluate(train_op)
     named_variables, serialized_graph = (
         checkpointable_utils._serialize_object_graph(root_checkpointable))
     expected_checkpoint_names = (
         # Created in the root node, so no prefix.
         "optimizer_step",
         "model/_second/kernel",
         "model/_named_dense/kernel",
         "model/_named_dense/bias",
         # non-Layer dependency of the model
         "model/_non_layer/a_variable",
         # The optimizer creates two non-slot variables
         "optimizer/beta1_power",
         "optimizer/beta2_power",
         # Slot variables
         "model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
         "model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
         "model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
         "model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
         "model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
         "model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
     )
     suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
     expected_checkpoint_names = [
         name + suffix for name in expected_checkpoint_names
     ]
     six.assertCountEqual(self, expected_checkpoint_names,
                          named_variables.keys())
     # Check that we've mapped to the right variable objects (not exhaustive)
     self.assertEqual("global_step:0",
                      named_variables["optimizer_step" + suffix].name)
     self.assertEqual("my_model/dense_1/kernel:0",
                      named_variables["model/_second/kernel" + suffix].name)
     self.assertEqual(
         "my_model/dense/kernel:0",
         named_variables["model/_named_dense/kernel" + suffix].name)
     self.assertEqual(
         "beta1_power:0",
         named_variables["optimizer/beta1_power" + suffix].name)
     self.assertEqual(
         "beta2_power:0",
         named_variables["optimizer/beta2_power" + suffix].name)
     # Spot check the generated protocol buffers.
     self.assertEqual("optimizer",
                      serialized_graph.nodes[0].children[1].local_name)
     optimizer_node = serialized_graph.nodes[
         serialized_graph.nodes[0].children[1].node_id]
     self.assertEqual("beta1_power", optimizer_node.children[0].local_name)
     self.assertEqual(
         "beta1_power", serialized_graph.nodes[
             optimizer_node.children[0].node_id].attributes[0].full_name)
     self.assertEqual(
         "my_model/dense/kernel",
         serialized_graph.nodes[optimizer_node.slot_variables[
             0].original_variable_node_id].attributes[0].full_name)
     # We strip off the :0 suffix, as variable.name-based saving does.
     self.assertEqual(
         "my_model/dense/kernel/Adam",
         serialized_graph.nodes[optimizer_node.slot_variables[
             0].slot_variable_node_id].attributes[0].full_name)
     self.assertEqual(
         "my_model/dense/kernel/Adam:0",
         optimizer.get_slot(
             var=named_variables["model/_named_dense/kernel" + suffix],
             name="m").name)
     self.assertEqual(
         "model/_named_dense/kernel" + suffix,
         serialized_graph.nodes[optimizer_node.slot_variables[
             0].original_variable_node_id].attributes[0].checkpoint_key)
     self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
     self.assertEqual(
         "model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
         serialized_graph.nodes[optimizer_node.slot_variables[
             0].slot_variable_node_id].attributes[0].checkpoint_key)
示例#35
0
 def test_abstract_method(self):
   func = abstract.Function("f", self._vm).to_variable(self._vm.root_cfg_node)
   func.data[0].is_abstract = True
   cls = abstract.InterpreterClass("X", [], {"f": func}, None, self._vm)
   six.assertCountEqual(self, cls.abstract_methods, {"f"})
示例#36
0
 def test_bound_function_repr_no_callself(self):
   f = self._make_pytd_function(params=())
   callself = self._vm.program.NewVariable()
   bound = abstract.BoundFunction(callself, f)
   six.assertCountEqual(self, bound.repr_names(), ["<class>.f"])
示例#37
0
    def test_variables_to_restore_from_model(self):
        model = modeling.DeepVariantModel('test', 'path')
        # We haven't created a slim model, so the variables_to_restore_from_model
        # should be returning an empty list.
        self.assertEqual([], model.variables_to_restore_from_model())

        # Create two model variable and one regular variables.
        with tf.compat.v1.variable_scope('model'):
            with tf.compat.v1.variable_scope('l1'):
                w1 = slim.model_variable('w1', shape=[10, 3, 3])
            with tf.compat.v1.variable_scope('l2'):
                w2 = slim.model_variable('w2', shape=[10, 3, 3])
                w3 = slim.model_variable('w3', shape=[10, 3, 3])
        v1 = slim.variable('my_var', shape=[20, 1])

        # The only variables in the system are the three we've created.
        six.assertCountEqual(self, [w1, w2, w3, v1], slim.get_variables())

        # We get just the three model variables without any excludes.
        six.assertCountEqual(self, [w1, w2, w3],
                             model.variables_to_restore_from_model())
        # As well as when exclude_scopes is an empty list.
        six.assertCountEqual(
            self, [w1, w2, w3],
            model.variables_to_restore_from_model(exclude_scopes=[]))

        # Excluding model/l1 variables gives us w2 and w3.
        six.assertCountEqual(
            self, [w2, w3],
            model.variables_to_restore_from_model(exclude_scopes=['model/l1']))
        # Excluding model/l2 gives us just w1 back.
        six.assertCountEqual(
            self, [w1],
            model.variables_to_restore_from_model(exclude_scopes=['model/l2']))
        # Excluding multiple scopes works as expected.
        six.assertCountEqual(
            self, [],
            model.variables_to_restore_from_model(
                exclude_scopes=['model/l1', 'model/l2']))
        # Excluding the root model scope also produces no variables..
        six.assertCountEqual(
            self, [],
            model.variables_to_restore_from_model(exclude_scopes=['model']))
 def assert_depth_match_extra(self, markers, extra, *depths_set):
     """Verify that the set of markers resolves to the provided set of
     depths (in any order). Allows extra constraints."""
     solutions = derive_depths(markers, extra)
     results = {tuple(a.depth for a in s) for s in solutions}
     six.assertCountEqual(self, results, {tuple(s) for s in depths_set})
示例#39
0
    def test_get_ssc_aggregates(self):

        result = self.ssc_library.get_ssc_aggregates()

        six.assertCountEqual(self, list(fake.SSC_AGGREGATES), result)
示例#40
0
 def test_get_masters_programs(self):
     expected_programs = [self.masters_program_1, self.masters_program_2]
     six.assertCountEqual(self, expected_programs,
                          get_programs_by_type(self.site, 'masters'))
示例#41
0
    def test_list_blobs(self):
        """Test list_blobs."""
        mtime = datetime.datetime(2019, 1, 1)
        mtime_seconds = utils.utc_datetime_to_timestamp(mtime)

        self.fs.create_file('/local/test-bucket/objects/a',
                            st_size=11).st_mtime = mtime_seconds
        self.fs.create_file('/local/test-bucket/objects/b/c',
                            st_size=22).st_mtime = mtime_seconds
        self.fs.create_file('/local/test-bucket/objects/b/d/e',
                            st_size=33).st_mtime = mtime_seconds
        self.fs.create_file('/local/test-bucket/objects/f',
                            st_size=44).st_mtime = mtime_seconds
        self.fs.create_file(
            '/local/test-bucket/metadata/b/c',
            contents='{"key":"value"}').st_mtime = mtime_seconds

        result = list(self.provider.list_blobs('gs://test-bucket'))
        six.assertCountEqual(self, [{
            'bucket': 'test-bucket',
            'name': 'a',
            'updated': mtime,
            'size': 11,
            'metadata': {}
        }, {
            'bucket': 'test-bucket',
            'name': 'f',
            'updated': mtime,
            'size': 44,
            'metadata': {}
        }, {
            'bucket': 'test-bucket',
            'name': 'b/c',
            'updated': mtime,
            'size': 22,
            'metadata': {
                'key': 'value'
            }
        }, {
            'bucket': 'test-bucket',
            'name': 'b/d/e',
            'updated': mtime,
            'size': 33,
            'metadata': {}
        }], result)

        result = list(self.provider.list_blobs('gs://test-bucket/b'))
        six.assertCountEqual(self, [{
            'bucket': 'test-bucket',
            'name': 'b/c',
            'updated': mtime,
            'size': 22,
            'metadata': {
                'key': 'value'
            }
        }, {
            'bucket': 'test-bucket',
            'name': 'b/d/e',
            'updated': mtime,
            'size': 33,
            'metadata': {}
        }], result)

        result = list(self.provider.list_blobs('gs://test-bucket/b/d'))
        six.assertCountEqual(self, [{
            'bucket': 'test-bucket',
            'name': 'b/d/e',
            'updated': mtime,
            'size': 33,
            'metadata': {}
        }], result)

        result = list(
            self.provider.list_blobs('gs://test-bucket/', recursive=False))
        six.assertCountEqual(self, [{
            'bucket': 'test-bucket',
            'name': 'a',
            'updated': mtime,
            'size': 11,
            'metadata': {}
        }, {
            'bucket': 'test-bucket',
            'name': 'f',
            'updated': mtime,
            'size': 44,
            'metadata': {}
        }, {
            'bucket': 'test-bucket',
            'name': 'b',
        }], result)
示例#42
0
 def _check_filter(self, query_args, expected):
     response = self._get_ids(self.endpoint.filter(**query_args))
     expected = self._get_ids(expected)
     six.assertCountEqual(self, response, expected)
    def test_build_failures(self):
        """Test run with multiple build failures of different type."""
        def _mock_requests_get(url):
            """Mock requests.get."""
            if url == oss_fuzz_build_status.FUZZING_STATUS_URL:
                return MockResponse(
                    json.dumps({
                        'projects': [
                            # Both fuzzing and coverage build types are successful.
                            {
                                'history': [{
                                    'finish_time':
                                    '2018-02-01T00:00:00.000000Z',
                                    'build_id': 'proj0-id-f',
                                    'success': True
                                }],
                                'name':
                                'proj0',
                            },
                            # Only coverage build type is broken for a while.
                            {
                                'history': [{
                                    'finish_time':
                                    '2018-02-01T00:00:00.000000Z',
                                    'build_id': 'proj5-id-f',
                                    'success': True
                                }],
                                'name':
                                'proj5',
                            },
                            # Only coverage build type broken.
                            {
                                'history': [{
                                    'finish_time':
                                    '2018-02-01T00:00:00.000000Z',
                                    'build_id': 'proj6-id-f',
                                    'success': True
                                }],
                                'name':
                                'proj6',
                            },

                            # New failure (first 1).
                            {
                                'history': [{
                                    'finish_time':
                                    '2018-02-01T00:00:00.000000000Z',
                                    'build_id': 'proj1-id-f',
                                    'success': False
                                }],
                                'name':
                                'proj1',
                            },
                            # Seen failure (second consecutive).
                            {
                                'history': [{
                                    'finish_time':
                                    '2018-02-01T00:00:00.000000Z',
                                    'build_id': 'proj2-id-f',
                                    'success': False
                                }],
                                'name':
                                'proj2',
                            },
                            # Seen failure (not updated).
                            {
                                'history': [{
                                    'finish_time':
                                    '2018-01-31T00:00:00.000000Z',
                                    'build_id': 'proj3-id-f',
                                    'success': False
                                }],
                                'name':
                                'proj3',
                            },
                            # Seen failure (third consecutive, bug already filed).
                            {
                                'history': [{
                                    'finish_time':
                                    '2018-02-01T00:00:00.000000Z',
                                    'build_id': 'proj4-id-f',
                                    'success': False
                                }],
                                'name':
                                'proj4',
                            },
                        ]
                    }))

            assert url == oss_fuzz_build_status.COVERAGE_STATUS_URL
            return MockResponse(
                json.dumps({
                    'projects': [
                        # Both fuzzing and coverage build types are successful.
                        {
                            'history': [{
                                'finish_time': '2018-02-01T00:00:00.000000Z',
                                'build_id': 'proj0-id-c',
                                'success': True
                            }],
                            'name':
                            'proj0',
                        },

                        # New failure (first 1).
                        {
                            'history': [{
                                'finish_time':
                                '2018-02-01T00:00:00.000000000Z',
                                'build_id': 'proj1-id-c',
                                'success': False
                            }],
                            'name':
                            'proj1',
                        },
                        # Seen failure (second consecutive).
                        {
                            'history': [{
                                'name': 'proj2',
                                'finish_time': '2018-02-01T00:00:00.000000Z',
                                'success': False
                            }],
                            'name':
                            'proj2',
                        },
                        # Seen failure (not updated).
                        {
                            'history': [{
                                'finish_time': '2018-01-31T00:00:00.000000Z',
                                'build_id': 'proj3-id-c',
                                'success': False
                            }],
                            'name':
                            'proj3',
                        },
                        # Seen failure (third consecutive, bug already filed).
                        {
                            'history': [{
                                'finish_time': '2018-02-01T00:00:00.000000Z',
                                'build_id': 'proj4-id-c',
                                'success': False
                            }],
                            'name':
                            'proj4',
                        },
                        # Coverage build type is broken for a while.
                        {
                            'history': [{
                                'finish_time': '2018-02-01T00:00:00.000000Z',
                                'build_id': 'proj5-id-c',
                                'success': False
                            }],
                            'name':
                            'proj5',
                        },
                        # Only coverage build type broken (second consecutive).
                        {
                            'history': [{
                                'finish_time': '2018-02-01T00:00:00.000000Z',
                                'build_id': 'proj6-id-c',
                                'success': False
                            }],
                            'name':
                            'proj6',
                        },
                    ]
                }))

        self.mock.get.side_effect = _mock_requests_get

        data_types.OssFuzzBuildFailure(
            id='proj2',
            project_name='proj2',
            last_checked_timestamp=datetime.datetime(2018, 1, 31),
            consecutive_failures=1,
            build_type='fuzzing').put()

        data_types.OssFuzzBuildFailure(
            id='proj3',
            project_name='proj3',
            last_checked_timestamp=datetime.datetime(2018, 1, 31),
            consecutive_failures=1,
            build_type='fuzzing').put()

        data_types.OssFuzzBuildFailure(
            id='proj4',
            project_name='proj4',
            last_checked_timestamp=datetime.datetime(2018, 1, 31),
            issue_id='1337',
            consecutive_failures=2,
            build_type='fuzzing').put()

        data_types.OssFuzzBuildFailure(
            id='proj5-coverage',
            project_name='proj5',
            last_checked_timestamp=datetime.datetime(2018, 1, 31),
            issue_id='31337',
            consecutive_failures=5,
            build_type='coverage').put()

        data_types.OssFuzzBuildFailure(
            id='proj6-coverage',
            project_name='proj6',
            last_checked_timestamp=datetime.datetime(2018, 1, 31),
            issue_id=None,
            consecutive_failures=1,
            build_type='coverage').put()

        data_types.OssFuzzProject(id='proj2', name='proj2',
                                  ccs=['*****@*****.**']).put()
        data_types.OssFuzzProject(id='proj6', name='proj7',
                                  ccs=['*****@*****.**']).put()

        self.app.get('/build-status')
        six.assertCountEqual(self, [
            {
                'build_type': 'fuzzing',
                'consecutive_failures': 1,
                'issue_id': None,
                'last_checked_timestamp': datetime.datetime(2018, 2, 1, 0, 0),
                'project_name': u'proj1'
            },
            {
                'build_type': 'fuzzing',
                'consecutive_failures': 2,
                'issue_id': '1',
                'last_checked_timestamp': datetime.datetime(2018, 2, 1, 0, 0),
                'project_name': u'proj2'
            },
            {
                'build_type': 'fuzzing',
                'consecutive_failures': 1,
                'issue_id': None,
                'last_checked_timestamp': datetime.datetime(2018, 1, 31, 0, 0),
                'project_name': u'proj3'
            },
            {
                'build_type': 'fuzzing',
                'consecutive_failures': 3,
                'issue_id': '1337',
                'last_checked_timestamp': datetime.datetime(2018, 2, 1, 0, 0),
                'project_name': u'proj4'
            },
            {
                'build_type': 'coverage',
                'consecutive_failures': 6,
                'issue_id': '31337',
                'last_checked_timestamp': datetime.datetime(2018, 2, 1, 0, 0),
                'project_name': u'proj5'
            },
            {
                'build_type': 'coverage',
                'consecutive_failures': 2,
                'issue_id': '2',
                'last_checked_timestamp': datetime.datetime(2018, 2, 1, 0, 0),
                'project_name': u'proj6'
            },
        ], [
            failure.to_dict()
            for failure in data_types.OssFuzzBuildFailure.query()
        ])

        self.assertEqual(2, len(self.itm.issues))
        issue = self.itm.issues[1]
        six.assertCountEqual(self, ['*****@*****.**'], issue.cc)
        self.assertEqual('New', issue.status)
        self.assertEqual('proj2: Fuzzing build failure', issue.summary)
        self.assertEqual(
            'The last 2 builds for proj2 have been failing.\n'
            '<b>Build log:</b> '
            'https://oss-fuzz-build-logs.storage.googleapis.com/'
            'log-proj2-id-f.txt\n'
            'Build type: fuzzing\n\n'
            'To reproduce locally, please see: '
            'https://google.github.io/oss-fuzz/advanced-topics/reproducing'
            '#reproducing-build-failures\n\n'
            '<b>This bug tracker is not being monitored by OSS-Fuzz team.</b> '
            'If you have any questions, please create an issue at '
            'https://github.com/google/oss-fuzz/issues/new.\n\n'
            '**This bug will be automatically closed within a '
            'day once it is fixed.**', issue.body)

        self.assertTrue(issue.has_label('Proj-proj2'))
        self.assertTrue(issue.has_label('Type-Build-Failure'))

        issue = self.itm.issues[2]
        six.assertCountEqual(self, ['*****@*****.**'], issue.cc)
        self.assertEqual('New', issue.status)
        self.assertEqual('proj6: Coverage build failure', issue.summary)
        self.assertEqual(
            'The last 2 builds for proj6 have been failing.\n'
            '<b>Build log:</b> '
            'https://oss-fuzz-build-logs.storage.googleapis.com/'
            'log-proj6-id-c.txt\n'
            'Build type: coverage\n\n'
            'To reproduce locally, please see: '
            'https://google.github.io/oss-fuzz/advanced-topics/reproducing'
            '#reproducing-build-failures\n\n'
            '<b>This bug tracker is not being monitored by OSS-Fuzz team.</b> '
            'If you have any questions, please create an issue at '
            'https://github.com/google/oss-fuzz/issues/new.\n\n'
            '**This bug will be automatically closed within a '
            'day once it is fixed.**', issue.body)

        self.assertTrue(issue.has_label('Proj-proj6'))
        self.assertTrue(issue.has_label('Type-Build-Failure'))
示例#44
0
    def _fit_restore_fit_test_template(self, estimator_fn, test_saved_model):
        """Tests restoring previously fit models."""
        temp_dir = self.get_temp_dir()
        model_dir = tempfile.mkdtemp(dir=temp_dir)
        exogenous_feature_columns = (
            tf.feature_column.numeric_column("exogenous"), )
        first_estimator = estimator_fn(model_dir, exogenous_feature_columns)
        train_input_fn = _build_input_fn_with_seed(2)
        eval_input_fn = _build_input_fn_with_seed(3)
        first_estimator.train(input_fn=train_input_fn, steps=1)
        first_evaluation = first_estimator.evaluate(input_fn=eval_input_fn,
                                                    steps=1)
        first_loss_before_fit = first_evaluation["loss"]
        self.assertAllEqual(first_loss_before_fit,
                            first_evaluation["average_loss"])
        self.assertAllEqual([], first_loss_before_fit.shape)
        first_estimator.train(input_fn=train_input_fn, steps=1)
        first_loss_after_fit = first_estimator.evaluate(input_fn=eval_input_fn,
                                                        steps=1)["loss"]
        self.assertAllEqual([], first_loss_after_fit.shape)
        second_estimator = estimator_fn(model_dir, exogenous_feature_columns)
        second_estimator.train(input_fn=train_input_fn, steps=1)
        second_evaluation = second_estimator.evaluate(input_fn=eval_input_fn,
                                                      steps=1)
        exogenous_values_ten_steps = {
            "exogenous": tf.range(10, dtype=tf.dtypes.float32)[None, :, None]
        }
        input_receiver_fn = first_estimator.build_raw_serving_input_receiver_fn(
        )
        export_location = first_estimator.export_saved_model(
            temp_dir, input_receiver_fn)
        if not test_saved_model:
            return
        with tf.Graph().as_default():
            with tf.compat.v1.Session() as sess:
                signatures = tf.compat.v1.saved_model.load(
                    sess, [tf.saved_model.SERVING], export_location)
                # Test that prediction and filtering can continue from evaluation output
                _ = saved_model_utils.predict_continuation(
                    continue_from=second_evaluation,
                    steps=10,
                    exogenous_features=exogenous_values_ten_steps,
                    signatures=signatures,
                    session=sess)
                times, values, _ = _generate_data()
                first_filtering = saved_model_utils.filter_continuation(
                    continue_from=second_evaluation,
                    features={
                        feature_keys.FilteringFeatures.TIMES:
                        times[None, -1] + 2,
                        feature_keys.FilteringFeatures.VALUES:
                        values[None, -1] + 2.,
                        "exogenous": values[None, -1, None] + 12.
                    },
                    signatures=signatures,
                    session=sess)
                # Test that prediction and filtering can continue from filtering output
                second_saved_prediction = saved_model_utils.predict_continuation(
                    continue_from=first_filtering,
                    steps=1,
                    exogenous_features={
                        "exogenous":
                        tf.range(1, dtype=tf.dtypes.float32)[None, :, None]
                    },
                    signatures=signatures,
                    session=sess)
                self.assertEqual(
                    times[-1] + 3,
                    tf.compat.v1.squeeze(second_saved_prediction[
                        feature_keys.PredictionResults.TIMES]))
                saved_model_utils.filter_continuation(
                    continue_from=first_filtering,
                    features={
                        feature_keys.FilteringFeatures.TIMES: times[-1] + 3,
                        feature_keys.FilteringFeatures.VALUES: values[-1] + 3.,
                        "exogenous": values[-1, None] + 13.
                    },
                    signatures=signatures,
                    session=sess)

                # Test cold starting
                six.assertCountEqual(
                    self, [
                        feature_keys.FilteringFeatures.TIMES,
                        feature_keys.FilteringFeatures.VALUES, "exogenous"
                    ],
                    signatures.signature_def[feature_keys.SavedModelLabels.
                                             COLD_START_FILTER].inputs.keys())
                batched_times = tf.tile(
                    tf.range(30, dtype=tf.dtypes.int64)[None, :], (10, 1))
                batched_values = tf.ones([10, 30, 1])
                state = saved_model_utils.cold_start_filter(
                    signatures=signatures,
                    session=sess,
                    features={
                        feature_keys.FilteringFeatures.TIMES: batched_times,
                        feature_keys.FilteringFeatures.VALUES: batched_values,
                        "exogenous": 10. + batched_values
                    })
                predict_times = math_ops.tile(
                    tf.range(30, 45, dtype=tf.dtypes.int64)[None, :], (10, 1))
                predictions = saved_model_utils.predict_continuation(
                    continue_from=state,
                    times=predict_times,
                    exogenous_features={
                        "exogenous":
                        math_ops.tile(tf.range(15, dtype=tf.dtypes.float32),
                                      (10, ))[None, :, None]
                    },
                    signatures=signatures,
                    session=sess)
                self.assertAllEqual([10, 15, 1], predictions["mean"].shape)
示例#45
0
 def testOrderNodes1(self):
     # n1 --> n2
     n1 = self.prog.NewCFGNode("n1")
     n2 = n1.ConnectNew("n2")
     order = cfg_utils.order_nodes([n1, n2])
     six.assertCountEqual(self, [n1, n2], order)
示例#46
0
    def test_import_projects(self):
        """Import a set of new projects, stacks and stack groups. This tests
        only the actual import. Retrieving the data to import from different
        sources is not part of this test.
        """
        project_url = 'https://catmaid-test/'
        data_folder = '/tmp/catmaid-test/'
        existing_projects = list(Project.objects.all())
        existing_project_ids = [p.id for p in existing_projects]

        p1_config = {
            'project': {
                'title': 'test-no-stacks',
            }
        }

        p2_config = {
            'project': {
                'title':
                'test-two-stacks',
                'stacks': [
                    {
                        # A basic stack, only with required information
                        'title':
                        'test-stack-1',
                        'dimension':
                        '(7, 17, 23)',
                        'resolution':
                        '(2, 3, 5)',
                        'zoomlevels':
                        -1,
                        'mirrors': [{
                            'title': 'test-mirror-1',
                            'fileextension': 'jpg'
                        }]
                    },
                    {
                        # A basic stack with a little more information
                        'title':
                        'test-stack-2',
                        'dimension':
                        '(7, 17, 23)',
                        'resolution':
                        '(2, 3, 5)',
                        'zoomlevels':
                        -1,
                        'mirrors': [{
                            'title': 'test-mirror-2',
                            'fileextension': 'jpg',
                            'url': 'https://this.is.my.stack/'
                        }]
                    },
                    {
                        # A stack with all optional properties
                        'title':
                        'test-stack-3',
                        'dimension':
                        '(4, 34, 9)',
                        'resolution':
                        '(1, 2, 3)',
                        'metadata':
                        'Test meta data',
                        'zoomlevels':
                        -1,
                        'translation':
                        '(10, 20, 30)',
                        'mirrors': [{
                            'title': 'test-mirror-3',
                            'folder': 'abc/',
                            'fileextension': 'jpg',
                            'tile_width': 123,
                            'tile_height': 456,
                            'tile_source_type': 2,
                        }],
                        'stackgroups': [{
                            # Add a single stack group with only this stack
                            # in it.
                            'title': 'Test group 1',
                            'relation': 'view',
                        }],
                    }
                ]
            }
        }

        pre_projects = [
            importer.PreProject(p1_config, project_url, data_folder),
            importer.PreProject(p2_config, project_url, data_folder),
        ]

        tags = []
        permissions = []
        default_tile_width = 256
        default_tile_height = 512
        default_tile_source_type = 5
        default_position = 0
        cls_graph_ids_to_link = []
        remove_unref_stack_data = False

        imported, not_imported = importer.import_projects(
            self.user, pre_projects, tags, permissions, default_tile_width,
            default_tile_height, default_tile_source_type,
            cls_graph_ids_to_link, remove_unref_stack_data)

        self.assertListEqual(pre_projects, imported)
        self.assertListEqual([], not_imported)

        new_projects = list(
            Project.objects.exclude(
                id__in=existing_project_ids).order_by('title'))
        self.assertEqual(2, len(new_projects))

        # Projects should be ordered by name, so the first project will be based
        # on p1_config. Test p1 first, it is not expected to have any stacks.
        p1 = new_projects[0]
        self.assertEqual(p1_config['project']['title'], p1.title)
        self.assertEqual(0, p1.stacks.all().count())

        # Test p2.
        p2 = new_projects[1]
        self.assertEqual(p2_config['project']['title'], p2.title)
        p2_stacks = p2.stacks.all().order_by('title')
        self.assertEqual(3, len(p2_stacks))
        p2cfg_stacks = p2_config['project']['stacks']
        for n, p2s in enumerate(p2_stacks):
            stack = p2cfg_stacks[n]

            # Test required fields
            self.assertEqual(stack['title'], p2s.title)
            six.assertCountEqual(self, literal_eval(stack['dimension']),
                                 literal_eval(str(p2s.dimension)))
            six.assertCountEqual(self, literal_eval(stack['resolution']),
                                 literal_eval(str(p2s.resolution)))
            self.assertEqual(stack['zoomlevels'], p2s.num_zoom_levels)

            # Test mirrors
            mirrors = p2s.stackmirror_set.all().order_by('title')
            self.assertEqual(len(stack['mirrors']), len(mirrors))
            for m, omirror in enumerate(mirrors):
                mirror = stack['mirrors'][m]

                self.assertEqual(mirror['title'], omirror.title)
                self.assertEqual(mirror['fileextension'],
                                 omirror.file_extension)

                # Test fields with potential default values
                self.assertEqual(mirror.get('position', default_position),
                                 omirror.position)
                self.assertEqual(mirror.get('tile_width', default_tile_width),
                                 omirror.tile_width)
                self.assertEqual(
                    mirror.get('tile_height', default_tile_height),
                    omirror.tile_height)
                self.assertEqual(
                    mirror.get('tile_source_type', default_tile_source_type),
                    omirror.tile_source_type)

                if 'url' in mirror:
                    image_base = mirror['url']
                else:
                    image_base = urljoin(
                        project_url,
                        urljoin(mirror.get('path', ''),
                                mirror.get('folder', '')))

                self.assertEqual(image_base, omirror.image_base)

            # Test project-stack link
            ps = ProjectStack.objects.get(project=p2.id, stack=p2s)
            six.assertCountEqual(
                self, literal_eval(stack.get('translation', '(0,0,0)')),
                literal_eval(str(ps.translation)))

            # Test stack groups
            ostack_group_links = StackStackGroup.objects.filter(
                stack=p2s).order_by('stack__title')
            stack_groups = stack.get('stackgroups', [])
            self.assertEqual(len(ostack_group_links), len(stack_groups))
            for m, sg_cfg in enumerate(stack_groups):
                ostack_group_link = ostack_group_links[m]
                ostack_group = ostack_group_link.stack_group
                self.assertEqual(sg_cfg['title'], ostack_group.title)
                self.assertEqual(sg_cfg['relation'],
                                 ostack_group_link.group_relation.name)
                self.assertEqual(sg_cfg.get('position', default_position),
                                 ostack_group_link.position)
示例#47
0
    def testComputePredecessors(self):
        # n7      n6
        #  ^      ^
        #  |      |
        #  |      |
        # n1 ---> n20 --> n3 --> n5 -+
        #         | ^            ^   |
        #         | |            |   |
        #         | +------------|---+
        #         v              |
        #         n4 ------------+
        n1 = self.prog.NewCFGNode("n1")
        n20 = n1.ConnectNew("n20")
        n3 = n20.ConnectNew("n3")
        n4 = n20.ConnectNew("n4")
        n5 = n3.ConnectNew("n5")
        n6 = n20.ConnectNew("n6")
        n7 = n1.ConnectNew("n7")
        n3.ConnectTo(n5)
        n4.ConnectTo(n5)
        n5.ConnectTo(n20)

        # Intentionally pick a non-root as nodes[0] to verify that the graph
        # will still be fully explored.
        nodes = [n7, n1, n20, n3, n4, n5, n6]
        r = cfg_utils.compute_predecessors(nodes)
        six.assertCountEqual(self, r[n1], {n1})
        six.assertCountEqual(self, r[n20], {n1, n20, n3, n4, n5})
        six.assertCountEqual(self, r[n3], {n1, n20, n3, n4, n5})
        six.assertCountEqual(self, r[n4], {n1, n20, n3, n4, n5})
        six.assertCountEqual(self, r[n5], {n1, n20, n3, n4, n5})
        six.assertCountEqual(self, r[n6], {n1, n20, n3, n4, n5, n6})
        six.assertCountEqual(self, r[n7], {n1, n7})
示例#48
0
 def testOrderNodes2(self):
     # n1   n2(dead)
     n1 = self.prog.NewCFGNode("n1")
     n2 = self.prog.NewCFGNode("n2")
     order = cfg_utils.order_nodes([n1, n2])
     six.assertCountEqual(self, [n1], order)
    def test_vocab_iter_respects_cutoff(self):
        vocab_counts = ["a", "b", "c", "d", "e", "f", "g", "w", "z"]
        vocab_items = ["a", "b", "d", "e", "<UNK>"]

        six.assertCountEqual(self, vocab_counts, list(self.vocab.counts.keys()))
        six.assertCountEqual(self, vocab_items, list(self.vocab))
示例#50
0
 def testOrderNodes0(self):
     order = cfg_utils.order_nodes([])
     six.assertCountEqual(self, order, [])
  def test_prune(self):
    """Test pruning."""
    self._setup_env(job_type='libfuzzer_asan_job')
    self.mock._record_cross_pollination_stats.side_effect = (
        self.get_mock_record_compare(
            project_qualified_name='test_fuzzer',
            method='random',
            sources='test2_fuzzer',
            tags='',
            initial_corpus_size=5,
            corpus_size=3,
            initial_edge_coverage=0,
            edge_coverage=0,
            initial_feature_coverage=0,
            feature_coverage=0))

    corpus_pruning_task.execute_task('libFuzzer_test_fuzzer',
                                     'libfuzzer_asan_job')

    corpus_dir = os.path.join(self.temp_dir, 'corpus')
    os.mkdir(corpus_dir)
    self.corpus.rsync_to_disk(corpus_dir)

    six.assertCountEqual(self, [
        '39e0574a4abfd646565a3e436c548eeb1684fb57',
        '7d157d7c000ae27db146575c08ce30df893d3a64',
        '31836aeaab22dc49555a97edb4c753881432e01d',
        '6fa8c57336628a7d733f684dc9404fbd09020543',
    ], os.listdir(corpus_dir))

    quarantine_dir = os.path.join(self.temp_dir, 'quarantine')
    os.mkdir(quarantine_dir)
    self.quarantine_corpus.rsync_to_disk(quarantine_dir)

    six.assertCountEqual(self,
                         ['crash-7acd6a2b3fe3c5ec97fa37e5a980c106367491fa'],
                         os.listdir(quarantine_dir))

    testcases = list(data_types.Testcase.query())
    self.assertEqual(1, len(testcases))
    self.assertEqual('Null-dereference WRITE', testcases[0].crash_type)
    self.assertEqual('Foo\ntest_fuzzer.cc\n', testcases[0].crash_state)
    self.assertEqual(1337, testcases[0].crash_revision)
    self.assertEqual('test_fuzzer',
                     testcases[0].get_metadata('fuzzer_binary_name'))

    self.mock.add_task.assert_has_calls([
        mock.call('minimize', testcases[0].key.id(), u'libfuzzer_asan_job'),
    ])

    today = datetime.datetime.utcnow().date()
    coverage_info = data_handler.get_coverage_information('test_fuzzer', today)
    coverage_info_without_backup = coverage_info.to_dict()
    del coverage_info_without_backup['corpus_backup_location']

    self.assertDictEqual(
        {
            'corpus_location':
                u'gs://{}/libFuzzer/test_fuzzer/'.format(self.corpus_bucket),
            'corpus_size_bytes':
                8,
            'corpus_size_units':
                4,
            'date':
                today,
            # Coverage numbers are expected to be None as they come from fuzzer
            # coverage cron task (see src/go/server/cron/coverage.go).
            'edges_covered':
                None,
            'edges_total':
                None,
            'functions_covered':
                None,
            'functions_total':
                None,
            'fuzzer':
                u'test_fuzzer',
            'html_report_url':
                None,
            'quarantine_location':
                u'gs://{}/libFuzzer/test_fuzzer/'.format(self.quarantine_bucket
                                                        ),
            'quarantine_size_bytes':
                2,
            'quarantine_size_units':
                1,
        },
        coverage_info_without_backup)

    self.assertEqual(
        coverage_info.corpus_backup_location,
        'gs://{}/corpus/libFuzzer/test_fuzzer/'.format(
            self.backup_bucket) + '%s.zip' % today)
示例#52
0
 def test_delete_name_foo(self):
     # delete name foo.example.com. from zone (and hence all
     # associated nodes for that name)
     self.zone.delete_name('foo.example.com.')
     expected = ['foofoo.example.com.', 'bar.example.com.', 'example.com.']
     assertCountEqual(self, self.zone.names.keys(), expected)
示例#53
0
    def test_2d5pt_Roofline(self):
        store_file = os.path.join(self.temp_dir, 'test_2d5pt_Roofline.pickle')
        output_stream = StringIO()

        parser = kc.create_parser()
        args = parser.parse_args([
            '-m',
            self._find_file('phinally_gcc.yaml'), '-p', 'Roofline',
            self._find_file('2d-5pt.c'), '-D', 'N', '1024-4096:3log2', '-D',
            'M', '50', '-vvv', '--store', store_file
        ])
        kc.check_arguments(args, parser)
        kc.run(parser, args, output_file=output_stream)

        results = pickle.load(open(store_file, 'rb'))

        # Check if results contains correct kernel
        self.assertEqual(list(results), ['2d-5pt.c'])

        # Check for correct variations of constants
        six.assertCountEqual(self, [
            sorted(map(str, r)) for r in results['2d-5pt.c']
        ], [
            sorted(map(str, r)) for r in [((sympy.var('M'), 50), (
                sympy.var('N'),
                1024)), ((sympy.var('M'), 50),
                         (sympy.var('N'),
                          2048)), ((sympy.var('M'), 50),
                                   (sympy.var('N'), 4096))]
        ])

        # Output of first result:
        result = results['2d-5pt.c'][[
            k for k in results['2d-5pt.c'] if (sympy.var('N'), 4096) in k
        ][0]]

        six.assertCountEqual(self, result, ['Roofline'])

        roofline = result['Roofline']
        self.assertAlmostEqual(roofline['min performance'],
                               5802500000.0,
                               places=0)
        self.assertEqual(roofline['bottleneck level'], 2)

        expected_btlncks = [{
            u'arithmetic intensity':
            0.11764705882352941,
            u'bandwidth':
            PrefixedUnit(122.97, u'G', u'B/s'),
            u'bw kernel':
            'copy',
            u'level':
            u'L1',
            u'performance':
            PrefixedUnit(14467058823.529411, u'', u'FLOP/s')
        }, {
            u'arithmetic intensity':
            0.1,
            u'bandwidth':
            PrefixedUnit(61.92, u'G', u'B/s'),
            u'bw kernel':
            'copy',
            u'level':
            u'L2',
            u'performance':
            PrefixedUnit(6192000000.0, u'', u'FLOP/s')
        }, {
            u'arithmetic intensity':
            1.0 / 6.0,
            u'bandwidth':
            PrefixedUnit(34815.0, 'M', 'B/s'),
            u'bw kernel':
            'copy',
            u'level':
            u'L3',
            u'performance':
            PrefixedUnit(5802500000.0, u'', u'FLOP/s')
        }, {
            u'arithmetic intensity':
            float('inf'),
            u'bandwidth':
            PrefixedUnit(12.01, u'G', u'B/s'),
            u'bw kernel':
            'load',
            u'level':
            u'MEM',
            u'performance':
            PrefixedUnit(float('inf'), u'', u'FLOP/s')
        }]

        for i, btlnck in enumerate(expected_btlncks):
            for k, v in btlnck.items():
                print(k, roofline['mem bottlenecks'][i][k], v)
                self.assertEqual(roofline['mem bottlenecks'][i][k], v)
示例#54
0
    def testPivots(self):
        values = {"x": {"0", "1"}, "y": {"0", "1"}}
        # x == 0 || x == 1
        equation = Or([Eq("x", "0"), Eq("x", "1")])
        six.assertCountEqual(self, ["0", "1"],
                             equation.extract_pivots(values)["x"])

        # x == 0 && x == 0
        equation = And([Eq("x", "0"), Eq("x", "0")])
        six.assertCountEqual(self, ["0"], equation.extract_pivots(values)["x"])

        # x == 0 && (x == 0 || x == 1)
        equation = And([Eq("x", "0"), Or([Eq("x", "0"), Eq("x", "1")])])
        six.assertCountEqual(self, ["0"], equation.extract_pivots(values)["x"])

        # x == 0 || x == 0
        equation = And([Eq("x", "0"), Eq("x", "0")])
        six.assertCountEqual(self, ["0"], equation.extract_pivots(values)["x"])

        # x == 0 || y == 0
        equation = Or([Eq("x", "0"), Eq("y", "0")])
        pivots = equation.extract_pivots(values)
        six.assertCountEqual(self, ["0"], pivots["x"])
        six.assertCountEqual(self, ["0"], pivots["y"])
示例#55
0
 def test_combine_unqiue_sets(self):
     a = [1, 2, 3]
     b = [3, 1, 4, -1]
     c = utils._combine_unique_sets(a, b)
     six.assertCountEqual(self, [1, 2, 3, 4, -1], c)
  def test_prune(self):
    """Basic pruning test."""
    corpus_pruning_task.execute_task('libFuzzer_test_fuzzer',
                                     'libfuzzer_asan_job')

    quarantined = os.listdir(self.quarantine_dir)
    self.assertEqual(1, len(quarantined))
    self.assertEqual(quarantined[0],
                     'crash-7acd6a2b3fe3c5ec97fa37e5a980c106367491fa')

    corpus = os.listdir(self.corpus_dir)
    self.assertEqual(4, len(corpus))
    six.assertCountEqual(self, [
        '39e0574a4abfd646565a3e436c548eeb1684fb57',
        '7d157d7c000ae27db146575c08ce30df893d3a64',
        '31836aeaab22dc49555a97edb4c753881432e01d',
        '6fa8c57336628a7d733f684dc9404fbd09020543',
    ], corpus)

    testcases = list(data_types.Testcase.query())
    self.assertEqual(1, len(testcases))
    self.assertEqual('Null-dereference WRITE', testcases[0].crash_type)
    self.assertEqual('Foo\ntest_fuzzer.cc\n', testcases[0].crash_state)
    self.assertEqual(1337, testcases[0].crash_revision)
    self.assertEqual('test_fuzzer',
                     testcases[0].get_metadata('fuzzer_binary_name'))
    self.assertEqual('label1,label2', testcases[0].get_metadata('issue_labels'))

    today = datetime.datetime.utcnow().date()
    # get_coverage_information on test_fuzzer rather than libFuzzer_test_fuzzer
    # since the libfuzzer_ prefix is removed when saving coverage info.
    coverage_info = data_handler.get_coverage_information('test_fuzzer', today)

    self.assertDictEqual(
        {
            'corpus_backup_location':
                u'backup_link',
            'corpus_location':
                u'gs://bucket/libFuzzer/test_fuzzer/',
            'corpus_size_bytes':
                8,
            'corpus_size_units':
                4,
            'date':
                today,
            # Coverage numbers are expected to be None as they come from fuzzer
            # coverage cron task (see src/go/server/cron/coverage.go).
            'edges_covered':
                None,
            'edges_total':
                None,
            'functions_covered':
                None,
            'functions_total':
                None,
            'fuzzer':
                u'test_fuzzer',
            'html_report_url':
                None,
            'quarantine_location':
                u'gs://bucket-quarantine/libFuzzer/test_fuzzer/',
            'quarantine_size_bytes':
                2,
            'quarantine_size_units':
                1,
        },
        coverage_info.to_dict())

    self.assertEqual(self.mock.unpack_seed_corpus_if_needed.call_count, 1)
示例#57
0
        def test(self):
            with self.assertRaises(AssertionError):
                six.assertCountEqual(self, (1, 2), [3, 4, 5])

            six.assertCountEqual(self, (1, 2), [2, 1])
示例#58
0
 def test_update_match_are_abstract(self):
     six.assertCountEqual(
         self, {'_update_helper', 'update', 'profile'},
         BaseColumnPrimitiveTypeProfiler.__abstractmethods__)
示例#59
0
 def test_feature_flags(self, *mocks):
     client = APIClient()
     feature_flags = client.feature_flags
     expected = ('python_3_scripts', 'container_scripts', 'pubnub')
     six.assertCountEqual(self, feature_flags, expected)
示例#60
0
def step_rpmdb_changes_are(ctx):
    """
    Compare saved by :ref:`When I save rpmdb` and current rpmdb's.

    Requires table with following headers:

    ======= ==========
     State   Packages 
    ======= ==========

    *State* is state of package

    ============ ============== ============= ===============================
       State      rpmdb before   rpmdb after        Additional comments      
    ============ ============== ============= ===============================
    installed    Not installed  Installed     Package has been installed     
    removed      Installed      Not installed Package has been removed       
    absent       Not installed  Not installed Package has not been installed 
    unchanged    Installed      Installed     Package was not changed        
    reinstalled  Installed      Installed     Same packages was reinstalled  
    updated      Installed      Installed     Package has been updated       
    downgraded   Installed      Installed     Package has been downgraded    
    ignored          -              -         Package will be ignored        
    ============ ============== ============= ===============================

    For each *State* you can specify multiple *Packages* which are separated
    by comma.

    For the *ignored* state you can use Unix shell-style wildcard to cover
    multiple package names. Packages with state explicitely stated won't be
    ignored even if they matches pattern.

    Examples:

    .. code-block:: gherkin

       Scenario: Detect reinstalled package
           When I save rpmdb
            And I successfully run "dnf -y reinstall util-linux"
           Then rpmdb changes are
             | State       | Packages   |
             | reinstalled | util-linux |

    .. code-block:: gherkin

       Scenario: Detect exact version
           When I save rpmdb
            And I successfully run "dnf -y update util-linux"
           Then rpmdb changes are
             | State   | Packages          |
             | updated | util-linux/2.29.0 |

    .. _automatic rules:

    **Automatic rules which are additionally applied**:

      - Packages except listed in table must not appear/disappear
      - Packages except listed in table classified as *unchanged*
    """
    ctx.assertion.assertIsNotNone(ctx.rpmdb, "Save rpmdb before comparison")
    table = table_utils.parse_kv_table(ctx, HEADINGS_RPMDB, rpm_utils.State)
    ctx.wipe_rpmdb = True
    rpmdb = rpm_utils.get_rpmdb()
    problems = []

    def unexpected_state(pkg, state, expected_state, pkg_pre, pkg_post):
        problems.append("Package {pkg!r} was supposed to be "
                        "{expected_state!r}, but has been {state!r} "
                        "({pkg_pre!r} -> {pkg_post!r})".format(
                            pkg=pkg,
                            state=state.value,
                            expected_state=expected_state.value,
                            pkg_pre=rpm_utils.hdr2nevra(pkg_pre),
                            pkg_post=rpm_utils.hdr2nevra(pkg_post)))

    def pkgs_split(pkgs):
        for pkg in pkgs.split(","):
            yield pkg.strip()

    # Let's check what user has requested in table
    ignore_list = ""
    for expected_state, packages in table.items():
        if expected_state == rpm_utils.State.ignored:
            ignore_list = packages
        else:
            for pkg in pkgs_split(packages):
                pkg_pre = rpm_utils.find_pkg(ctx.rpmdb, pkg, only_by_name=True)
                if pkg_pre:
                    ctx.rpmdb.remove(pkg_pre)
                pkg_post = rpm_utils.find_pkg(rpmdb, pkg)
                if pkg_post:
                    rpmdb.remove(pkg_post)
                state = rpm_utils.analyze_state(pkg_pre, pkg_post)
                if state != expected_state:
                    if state == State.unchanged and expected_state == State.reinstalled:
                        # workaround for unchanged rpmdb timestamp
                        text = getattr(ctx.cmd_result, 'stdout')
                        if "Reinstalling     : {}".format(pkg) in text:
                            continue
                    unexpected_state(pkg, state, expected_state, pkg_pre,
                                     pkg_post)

    # Now exclude packages matching regexp pattern in the ignore_list
    def filter_rpmdb(pkgs, filters):  # remove packages matching any filter
        remove = []
        for pkg in pkgs:
            for f in filters:
                if f and fnmatch.fnmatchcase(pkg.name, f):
                    remove.append(pkg)
                    break  # no need to process additional filters
        for pkg in remove:
            pkgs.remove(pkg)

    filter_rpmdb(rpmdb, list(pkgs_split(ignore_list)))
    filter_rpmdb(ctx.rpmdb, list(pkgs_split(ignore_list)))

    # Let's check if NEVRAs are still same
    def rpmdb2nevra(rpmdb):
        for hdr in rpmdb:
            yield rpm_utils.hdr2nevra(hdr)

    six.assertCountEqual(ctx.assertion, rpmdb2nevra(ctx.rpmdb),
                         rpmdb2nevra(rpmdb))

    # Even if we have same NEVRAs packages can be different or reinstalled
    for pkg_pre, pkg_post in zip(ctx.rpmdb, rpmdb):
        state = rpm_utils.analyze_state(pkg_pre, pkg_post)
        expected_state = rpm_utils.State.unchanged
        # At this point pkg_pre and pkg_post should have same name
        pkg = pkg_pre["name"].decode()
        if state != expected_state:
            unexpected_state(pkg, state, expected_state, pkg_pre, pkg_post)

    assert not problems, "\n{!s}".format("\n".join(problems))