def test_shoptimizer_plugin_errors_are_logged(self, mocked_requests):
        _, original_batch, _, _ = test_utils.generate_test_data(METHOD)
        shoptimizer_api_response_with_builtin_optimizer_error = """
    {
      "plugin-results": {
        "my-plugin": {
          "error_msg": "An unexpected error occurred",
          "num_of_products_optimized": 0,
          "result": "failure"
        }
      },
      "optimized-data": {
        "entries": [{
          "batchId": 1111,
          "merchantId": 1234567,
          "method": "insert"
        }]
      }
    }
    """
        mocked_requests.request.return_value = _create_mock_response(
            200, shoptimizer_api_response_with_builtin_optimizer_error)

        with mock.patch('shoptimizer_client.ShoptimizerClient._get_jwt',
                        return_value='jwt data'):
            with self.assertLogs(level='ERROR') as log:
                self.client.shoptimize(original_batch)

                self.assertIn(
                    f'ERROR:root:Request for batch #0 with operation upsert encountered '
                    'an error when running optimizer my-plugin. Error: An unexpected error occurred',
                    log.output)
    def test_empty_shoptimizer_url_does_not_call_api_and_returns_original_batch(
            self, mocked_requests):
        _, original_batch, _, _ = test_utils.generate_test_data(METHOD)

        with mock.patch('shoptimizer_client.constants') as mocked_constants:
            mocked_constants.SHOPTIMIZER_BASE_URL = ''
            returned_batch = self.client.shoptimize(original_batch)

            self.assertEqual(original_batch, returned_batch)
            mocked_requests.assert_not_called()
    def test_config_file_not_found_does_not_call_api_and_returns_original_batch(
            self, mocked_requests):
        _, original_batch, _, _ = test_utils.generate_test_data(METHOD)
        mocked_requests.exceptions.RequestException = requests.exceptions.RequestException

        with mock.patch('builtins.open', side_effect=FileNotFoundError):
            returned_batch = self.client.shoptimize(original_batch)

            self.assertEqual(original_batch, returned_batch)
            mocked_requests.assert_not_called()
    def test_create_delete_batch(self, is_mca, num_rows):
        constants.IS_MCA = is_mca
        method = constants.Method.DELETE
        batch_id = test_utils.BATCH_NUMBER
        item_rows, expected_batch, _, _ = test_utils.generate_test_data(
            method, num_rows)

        actual_batch, _, _ = batch_creator.create_batch(
            batch_id, item_rows, method)

        self.assertEqual(expected_batch, actual_batch)
    def test_empty_optimization_params_does_not_call_api_and_returns_original_batch(
            self, mocked_requests):
        _, original_batch, _, _ = test_utils.generate_test_data(METHOD)
        mocked_requests.exceptions.RequestException = requests.exceptions.RequestException

        with mock.patch('builtins.open',
                        mock.mock_open(read_data='')), mock.patch('json.load'):
            returned_batch = self.client.shoptimize(original_batch)

            self.assertEqual(original_batch, returned_batch)
            mocked_requests.assert_not_called()
Exemple #6
0
    def test_process_items(self, method, num_rows):
        _, batch, batch_id_to_item_id, expected_response = test_utils.generate_test_data(
            method, num_rows)
        self._api_service.products.return_value.custombatch.return_value.execute.return_value = expected_response

        successful_item_ids, item_failures = self._client.process_items(
            batch, DUMMY_BATCH_NUMBER, batch_id_to_item_id, method)

        self._api_service.products.return_value.custombatch.return_value.execute.assert_called(
        )
        self.assertEqual(num_rows, len(successful_item_ids))
        self.assertEqual(0, len(item_failures))
    def test_shoptimizer_request_exception_returns_original_batch(
            self, mocked_requests):
        _, original_batch, _, _ = test_utils.generate_test_data(METHOD)
        mocked_requests.exceptions.RequestException = requests.exceptions.RequestException
        mocked_requests.request.side_effect = requests.exceptions.RequestException(
            'Shoptimizer server connection error')

        with mock.patch('shoptimizer_client.ShoptimizerClient._get_jwt',
                        return_value='jwt data'):
            returned_batch = self.client.shoptimize(original_batch)

            self.assertEqual(original_batch, returned_batch)
    def test_get_jwt_exception_does_not_call_api_and_returns_original_batch(
            self, mocked_requests):
        _, original_batch, _, _ = test_utils.generate_test_data(METHOD)
        mocked_requests.exceptions.RequestException = requests.exceptions.RequestException

        with mock.patch('shoptimizer_client.ShoptimizerClient._get_jwt',
                        side_effect=requests.exceptions.RequestException(
                            'Token server connection error')):
            returned_batch = self.client.shoptimize(original_batch)

            self.assertEqual(original_batch, returned_batch)
            mocked_requests.assert_not_called()
    def test_successful_response_returns_optimized_batch(
            self, mocked_requests):
        _, original_batch, _, _ = test_utils.generate_test_data(METHOD)
        mocked_requests.request.return_value = _create_mock_response(
            200, SHOPTIMIZER_API_RESPONSE_SUCCESS)

        with mock.patch('shoptimizer_client.ShoptimizerClient._get_jwt',
                        return_value='jwt data'):
            optimized_batch = self.client.shoptimize(original_batch)
            optimized_product = optimized_batch['entries'][0]['product']

            self.assertNotIn('mpn', optimized_product)
            self.assertNotEqual(original_batch, optimized_batch)
    def test_create_batch_returns_batch_to_item_id_dict(self):
        constants.IS_MCA = True
        method = constants.Method.INSERT
        batch_id = test_utils.BATCH_NUMBER
        item_rows, _, _, _ = test_utils.generate_test_data(
            method, test_utils.MULTIPLE_ITEM_COUNT)

        _, _, batch_to_item_id_dict = batch_creator.create_batch(
            batch_id, item_rows, method)

        self.assertEqual(test_utils.MULTIPLE_ITEM_COUNT,
                         len(batch_to_item_id_dict))
        self.assertEqual('test id', batch_to_item_id_dict.get(0))
        self.assertEqual('test id', batch_to_item_id_dict.get(1))
    def test_create_batch_returns_skipped_items_when_merchant_id_missing(self):
        constants.IS_MCA = True
        method = constants.Method.INSERT
        batch_number = test_utils.BATCH_NUMBER
        remove_merchant_ids = True
        item_rows, _, _, _ = test_utils.generate_test_data(
            method, test_utils.MULTIPLE_ITEM_COUNT, remove_merchant_ids)

        _, skipped_item_ids, _ = batch_creator.create_batch(
            batch_number, item_rows, method)

        self.assertEqual(test_utils.MULTIPLE_ITEM_COUNT, len(skipped_item_ids))
        self.assertEqual('test id', skipped_item_ids[0])
        self.assertEqual('test id', skipped_item_ids[1])
Exemple #12
0
    def test_process_items_delete_returns_items_with_errors(self, num_rows):
        _, batch, batch_id_to_item_id, _ = test_utils.generate_test_data(
            constants.Method.DELETE, num_rows)
        response_with_errors = test_utils._generate_delete_response_with_errors(
            num_rows)
        self._api_service.products.return_value.custombatch.return_value.execute.return_value = response_with_errors

        successful_item_ids, item_failures = self._client.process_items(
            batch, DUMMY_BATCH_NUMBER, batch_id_to_item_id,
            constants.Method.DELETE)

        self._api_service.products.return_value.custombatch.return_value.execute.assert_called(
        )
        self.assertEqual(0, len(successful_item_ids))
        self.assertEqual(num_rows, len(item_failures))
    def test_request_includes_configuration_parameters(self, mocked_requests):
        _, original_batch, _, _ = test_utils.generate_test_data(METHOD)
        mocked_requests.request.return_value = _create_mock_response(
            200, SHOPTIMIZER_API_RESPONSE_SUCCESS)

        with mock.patch('shoptimizer_client.ShoptimizerClient._get_jwt',
                        return_value='jwt data'):
            self.client.shoptimize(original_batch)

            self.assertIn('lang',
                          mocked_requests.request.call_args[1]['params'])
            self.assertIn('country',
                          mocked_requests.request.call_args[1]['params'])
            self.assertIn('currency',
                          mocked_requests.request.call_args[1]['params'])
    def test_shoptimizer_response_contains_error_msg_returns_original_batch(
            self, mocked_requests):
        _, original_batch, _, _ = test_utils.generate_test_data(METHOD)
        shoptimizer_api_response_failure_bad_request = """{
        "error-msg": "Request must contain 'entries' as a key.",
        "optimization-results": {},
        "optimized-data": {},
        "plugin-results": {}
    }"""
        mocked_requests.request.return_value = _create_mock_response(
            400, shoptimizer_api_response_failure_bad_request)

        with mock.patch('shoptimizer_client.ShoptimizerClient._get_jwt',
                        return_value='jwt data'):
            returned_batch = self.client.shoptimize(original_batch)

            self.assertEqual(original_batch, returned_batch)
Exemple #15
0
def main(args):
    if args.eval_type == 'joint' and (args.rgb_weights_path is None or args.flow_weights_path is None):
        print('ERROR: when using `joint` model evaluation type, '
              'you must specify path to RGB and Optical flow weights respectively.')
        print('Exiting...')
        return
    elif args.eval_type == 'rgb' and args.rgb_weights_path is None:
        print('ERROR: when using only `rgb` model evaluation type, '
              'you must specify path to RGB weights.')
        print('Exiting...')
        return
    elif args.eval_type == 'flow' and args.flow_weights_path is None:
        print('ERROR: when using only `flow` model evaluation type, '
              'you must specify path to Optical Flow weights.')
        print('Exiting...')

    # define rgb model, load trained weights and evaluate test samples
    if args.eval_type in ['joint', 'rgb']:
        print('\nprocessing RGB video')
        print('path: %s' % rgb_video_path)
        rgb_model = build_model(NUM_FRAMES_PER_EXAMPLE, FRAME_HEIGHT, FRAME_WIDTH, 
                                NUM_RGB_CHANNELS, base_model_weights=None, freeze_base=False)
        rgb_model.load_weights(args.rgb_weights_path)
        # define data generator
        rgb_generator = generate_test_data(rgb_video_path, NUM_FRAMES_PER_EXAMPLE, 
                                           batch_size, two_channels_frame=False)
        # evaluate
        if RGB_NUM_TEST_EXAMPLES % batch_size == 0:
            num_batches = RGB_NUM_TEST_EXAMPLES // batch_size
        else:
            num_batches = (RGB_NUM_TEST_EXAMPLES // batch_size) + 1
        rgb_predictions = []
        step = 0
        while True:
            X_test = next(rgb_generator)
            preds = rgb_model.predict(X_test)
            rgb_predictions += preds.reshape((preds.shape[0],)).tolist()
            step += 1
            sys.stdout.write('num of batches processed: %d of %d\r' % (step, num_batches))
            if step >= num_batches:
                break
        rgb_predictions = np.asarray(rgb_predictions, dtype=np.float32)


    # define optical flow model, load trained weights and evaluate test samples
    if args.eval_type in ['joint', 'flow']:
        print('\n\nprocessing Optical Flow video')
        print('path: %s' % flow_video_path)
        flow_model = build_model(NUM_FRAMES_PER_EXAMPLE, FRAME_HEIGHT, FRAME_WIDTH, 
                                 NUM_FLOW_CHANNELS, base_model_weights=None, freeze_base=False)
        flow_model.load_weights(args.flow_weights_path)
        # define data generator
        flow_generator = generate_test_data(flow_video_path, NUM_FRAMES_PER_EXAMPLE, 
                                            batch_size, two_channels_frame=True)
        # evaluate
        if FLOW_NUM_TEST_EXAMPLES % batch_size == 0:
            num_batches = FLOW_NUM_TEST_EXAMPLES // batch_size
        else:
            num_batches = (FLOW_NUM_TEST_EXAMPLES // batch_size) + 1
        flow_predictions = []
        step = 0
        while True:
            X_test = next(flow_generator)
            preds = flow_model.predict(X_test)
            flow_predictions += preds.reshape((preds.shape[0],)).tolist()
            step += 1
            sys.stdout.write('num of batches processed: %d of %d\r' % (step, num_batches))
            if step >= num_batches:
                break
        flow_predictions = np.asarray(flow_predictions, dtype=np.float32)

    if args.eval_type == 'rgb':
        model_predictions = rgb_predictions
    elif args.eval_type == 'flow':
        # flow examples are one less than rgb examples
        # therefore, duplicate the first flow prediction
        # so that number of predictions is same as rgb examples
        model_predictions = np.zeros_like(flow_predictions.shape[0] + 1, dtype=np.float32)
        model_predictions[1:] = flow_predictions[:]
        model_predictions[0] = flow_predictions[0]
    else:
        model_predictions = np.zeros_like(rgb_predictions, dtype=np.float32)
        # combine rgb prediction and flow prediction
        model_predictions[1:] = (rgb_predictions[1:] + flow_predictions[:]) / 2.0
        model_predictions[0] = (rgb_predictions[0] + flow_predictions[0]) / 2.0
      
    # save predictions
    f = open(prediction_save_path, 'w')
    # use the first prediction in `model_predictions`
    # as the predictions for the first 39 frames in test video
    # since we started generating examples from the 40th frame onwards
    for _ in range(39):
        f.write('{0}\n'.format(model_predictions[0]))

    # save the predictions that starts from the 40th frame onwards
    for prediction in model_predictions:
        f.write('{0}\n'.format(prediction))

    f.close()

    print('\nprediction generation completed')
    print('prediction output file: %s' % prediction_save_path)


    return
Exemple #16
0
parser.add_argument("--per-centroid", default=1000, type=int)
parser.add_argument("--batch-size", default=3, type=int)
parser.add_argument("--thresh", default=0.7, type=int)
parser.add_argument("--loops", default=20, type=int)
parser.add_argument("--sort", type=bool, default=False)
parser.add_argument("--test-pytorch-impl", action='store_true')

args = parser.parse_args()

if args.test_pytorch_impl:
    print(
        'Chose to test PyTorch version, so setting batch size to 1 (was %i)' %
        args.batch_size)
    args.batch_size = 1

boxes_np, scores_np = generate_test_data(args.centroids, args.per_centroid,
                                         args.batch_size, args.sort)

t0 = datetime.now()
for _ in range(args.loops):
    [
        py_cpu_nms(
            np.concatenate((boxes_np[i], np.expand_dims(scores_np[i], -1)),
                           -1), args.thresh) for i in range(args.batch_size)
    ]
print("Python version took {} s to do {} loops".format(
    (datetime.now() - t0).total_seconds(), args.loops))

boxes = Variable(torch.Tensor(boxes_np))
scores = Variable(torch.Tensor(scores_np))

t0 = datetime.now()