示例#1
0
    def calc_performance(self, training_data: TrainingData, merchant_id: str):
        if not CALCULATE_PRODUCT_SPECIFIC_PERFORMANCE and not CALCULATE_UNIVERSAL_PERFORMANCE:
            return
        logging.debug('Calculating performance')
        sales_probabilities_ps = []
        sales_ps = []
        probability_per_offer = []
        sales_probabilities_uni = []
        sales_uni = []

        for joined_market_situations in training_data.joined_data.values():
            for jms in joined_market_situations.values():
                if merchant_id in jms.merchants:
                    for offer_id in jms.merchants[merchant_id].keys():
                        amount_sales = TrainingData.extract_sales(
                            jms.merchants[merchant_id][offer_id].product_id,
                            offer_id, jms.sales)
                        if CALCULATE_PRODUCT_SPECIFIC_PERFORMANCE:
                            features_ps = extract_features(
                                offer_id, TrainingData.create_offer_list(jms),
                                False, training_data.product_prices)
                        if CALCULATE_UNIVERSAL_PERFORMANCE:
                            features_uni = extract_features(
                                offer_id, TrainingData.create_offer_list(jms),
                                True, training_data.product_prices)
                        if amount_sales == 0:
                            self.__add_product_specific_probabilities(
                                features_ps, jms, offer_id,
                                sales_probabilities_ps, sales_ps, 0,
                                probability_per_offer)
                            self.__add_universal_probabilities(
                                features_uni, sales_probabilities_uni,
                                sales_uni, 0)
                        else:
                            for i in range(amount_sales):
                                self.__add_product_specific_probabilities(
                                    features_ps, jms, offer_id,
                                    sales_probabilities_ps, sales_ps, 1,
                                    probability_per_offer)
                                self.__add_universal_probabilities(
                                    features_uni, sales_probabilities_uni,
                                    sales_uni, 1)
        if CALCULATE_PRODUCT_SPECIFIC_PERFORMANCE:
            self.__process_performance_calculation(
                sales_probabilities_ps, sales_ps,
                NUM_OF_PRODUCT_SPECIFIC_FEATURES, "Product-specific")
        if CALCULATE_UNIVERSAL_PERFORMANCE:
            self.__process_performance_calculation(sales_probabilities_uni,
                                                   sales_uni,
                                                   NUM_OF_UNIVERSAL_FEATURES,
                                                   "Universal")
    def calculate_sales_probality_per_offer(self):
        probability_per_offer = []

        for joined_market_situations in self.testing_data.joined_data.values():
            for jms in joined_market_situations.values():
                if self.settings["initial_merchant_id"] in jms.merchants:
                    for offer_id in jms.merchants[self.settings["initial_merchant_id"]].keys():
                        features_ps = extract_features(offer_id, TrainingData.create_offer_list(jms), False, self.testing_data.product_prices)
                        probability = self.ml_engine.predict(jms.merchants[self.settings["initial_merchant_id"]][offer_id].product_id, [features_ps])
                        probability_per_offer.append((int(offer_id), probability[0]))
        write_calculations_to_file(probability_per_offer, self.settings['output_file'])
示例#3
0
    def __create_prediction_data(self, own_offer: Offer,
                                 current_offers: List[Offer],
                                 potential_prices: List[int], price: float,
                                 universal_features: bool):
        lst = []
        for potential_price in potential_prices:
            potential_price_candidate = potential_price / 10.0
            potential_price = price + potential_price_candidate

            setattr(
                next(offer for offer in current_offers
                     if offer.offer_id == own_offer.offer_id), "price",
                potential_price)
            prediction_data = extract_features(
                own_offer.offer_id, current_offers, universal_features,
                self.training_data.product_prices)
            lst.append(prediction_data)
        return lst
示例#4
0
 def append_to_vectors_from_features(
         self, features_vector, sales_vector,
         joined_market_situation: JoinedMarketSituation, offer_list,
         product_id, universal_features, timestamp):
     if self.merchant_id in joined_market_situation.merchants:
         for offer_id in joined_market_situation.merchants[
                 self.merchant_id].keys():
             amount_sales = self.extract_sales(
                 product_id, offer_id, joined_market_situation.sales)
             features = extract_features(offer_id, offer_list,
                                         universal_features,
                                         self.product_prices)
             if amount_sales == 0:
                 self.append_n_times(features_vector, sales_vector,
                                     features, 0, timestamp)
             else:
                 for i in range(amount_sales):
                     self.append_n_times(features_vector, sales_vector,
                                         features, 1, timestamp)
示例#5
0
def extract_features_from_rawdata(chunk, header, period, features):
    with open(os.path.join(os.path.dirname(__file__), "resources/channel_info.json")) as channel_info_file:
        channel_info = json.loads(channel_info_file.read())
    data = [convert_to_dict(X, header, channel_info) for X in chunk]
    return extract_features(data, period, features)
示例#6
0
def extract_features(images,
                     model_options,
                     weight_decay=0.0001,
                     reuse=None,
                     is_training=False,
                     fine_tune_batch_norm=False,
                     nas_training_hyper_parameters=None):
  """Extracts features by the particular model_variant.

  Args:
    images: A tensor of size [batch, height, width, channels].
    model_options: A ModelOptions instance to configure models.
    weight_decay: The weight decay for model variables.
    reuse: Reuse the model variables or not.
    is_training: Is training or not.
    fine_tune_batch_norm: Fine-tune the batch norm parameters or not.
    nas_training_hyper_parameters: A dictionary storing hyper-parameters for
      training nas models. Its keys are:
      - `drop_path_keep_prob`: Probability to keep each path in the cell when
        training.
      - `total_training_steps`: Total training steps to help drop path
        probability calculation.

  Returns:
    concat_logits: A tensor of size [batch, feature_height, feature_width,
      feature_channels], where feature_height/feature_width are determined by
      the images height/width and output_stride.
    end_points: A dictionary from components of the network to the corresponding
      activation.
  """
  features, end_points = feature_extractor.extract_features(
      images,
      output_stride=model_options.output_stride,
      multi_grid=model_options.multi_grid,
      model_variant=model_options.model_variant,
      depth_multiplier=model_options.depth_multiplier,
      divisible_by=model_options.divisible_by,
      weight_decay=weight_decay,
      reuse=reuse,
      is_training=is_training,
      preprocessed_images_dtype=model_options.preprocessed_images_dtype,
      fine_tune_batch_norm=fine_tune_batch_norm,
      nas_stem_output_num_conv_filters=(
          model_options.nas_stem_output_num_conv_filters),
      nas_training_hyper_parameters=nas_training_hyper_parameters,
      use_bounded_activation=model_options.use_bounded_activation)

  if not model_options.aspp_with_batch_norm:
    return features, end_points
  else:
    if model_options.dense_prediction_cell_config is not None:
      tf.logging.info('Using dense prediction cell config.')
      dense_prediction_layer = dense_prediction_cell.DensePredictionCell(
          config=model_options.dense_prediction_cell_config,
          hparams={
              'conv_rate_multiplier': 16 // model_options.output_stride,
          })
      concat_logits = dense_prediction_layer.build_cell(
          features,
          output_stride=model_options.output_stride,
          crop_size=model_options.crop_size,
          image_pooling_crop_size=model_options.image_pooling_crop_size,
          weight_decay=weight_decay,
          reuse=reuse,
          is_training=is_training,
          fine_tune_batch_norm=fine_tune_batch_norm)
      return concat_logits, end_points
    else:
      # The following codes employ the DeepLabv3 ASPP module. Note that we
      # could express the ASPP module as one particular dense prediction
      # cell architecture. We do not do so but leave the following codes
      # for backward compatibility.
      batch_norm_params = {
          'is_training': is_training and fine_tune_batch_norm,
          'decay': 0.9997,
          'epsilon': 1e-5,
          'scale': True,
      }

      activation_fn = (
          tf.nn.relu6 if model_options.use_bounded_activation else tf.nn.relu)

      with slim.arg_scope(
          [slim.conv2d, slim.separable_conv2d],
          weights_regularizer=slim.l2_regularizer(weight_decay),
          activation_fn=activation_fn,
          normalizer_fn=slim.batch_norm,
          padding='SAME',
          stride=1,
          reuse=reuse):
        with slim.arg_scope([slim.batch_norm], **batch_norm_params):
          depth = 256
          branch_logits = []

          if model_options.add_image_level_feature:
            if model_options.crop_size is not None:
              image_pooling_crop_size = model_options.image_pooling_crop_size
              # If image_pooling_crop_size is not specified, use crop_size.
              if image_pooling_crop_size is None:
                image_pooling_crop_size = model_options.crop_size
              pool_height = scale_dimension(
                  image_pooling_crop_size[0],
                  1. / model_options.output_stride)
              pool_width = scale_dimension(
                  image_pooling_crop_size[1],
                  1. / model_options.output_stride)
              image_feature = slim.avg_pool2d(
                  features, [pool_height, pool_width],
                  model_options.image_pooling_stride, padding='VALID')
              resize_height = scale_dimension(
                  model_options.crop_size[0],
                  1. / model_options.output_stride)
              resize_width = scale_dimension(
                  model_options.crop_size[1],
                  1. / model_options.output_stride)
            else:
              # If crop_size is None, we simply do global pooling.
              pool_height = tf.shape(features)[1]
              pool_width = tf.shape(features)[2]
              image_feature = tf.reduce_mean(
                  features, axis=[1, 2], keepdims=True)
              resize_height = pool_height
              resize_width = pool_width
            image_feature = slim.conv2d(
                image_feature, depth, 1, scope=IMAGE_POOLING_SCOPE)
            image_feature = _resize_bilinear(
                image_feature,
                [resize_height, resize_width],
                image_feature.dtype)
            # Set shape for resize_height/resize_width if they are not Tensor.
            if isinstance(resize_height, tf.Tensor):
              resize_height = None
            if isinstance(resize_width, tf.Tensor):
              resize_width = None
            image_feature.set_shape([None, resize_height, resize_width, depth])
            branch_logits.append(image_feature)

          # Employ a 1x1 convolution.
          branch_logits.append(slim.conv2d(features, depth, 1,
                                           scope=ASPP_SCOPE + str(0)))

          if model_options.atrous_rates:
            # Employ 3x3 convolutions with different atrous rates.
            for i, rate in enumerate(model_options.atrous_rates, 1):
              scope = ASPP_SCOPE + str(i)
              if model_options.aspp_with_separable_conv:
                aspp_features = split_separable_conv2d(
                    features,
                    filters=depth,
                    rate=rate,
                    weight_decay=weight_decay,
                    scope=scope)
              else:
                aspp_features = slim.conv2d(
                    features, depth, 3, rate=rate, scope=scope)
              branch_logits.append(aspp_features)

          # Merge branch logits.
          concat_logits = tf.concat(branch_logits, 3)
          concat_logits = slim.conv2d(
              concat_logits, depth, 1, scope=CONCAT_PROJECTION_SCOPE)
          concat_logits = slim.dropout(
              concat_logits,
              keep_prob=0.9,
              is_training=is_training,
              scope=CONCAT_PROJECTION_SCOPE + '_dropout')

          return concat_logits, end_points
    def test_extract_universal_features(self):
        expected = [1, 1, 0, 0, 0]

        actual = feature_extractor.extract_features('1', self.generate_offer_list(), True, {'1': 10.0})

        self.assertListEqual(expected, actual)
    def test_extract_product_specific_features(self):
        expected = [1, 1, 0, 0, 0, 0.0, 0, 3, 0, 0.0, 0, 0, 0, 0]

        actual = feature_extractor.extract_features('1', self.generate_offer_list(), False, {'1': 10.0})

        self.assertListEqual(expected, actual)