Exemplo n.º 1
0
        def wrapped(*args, **kw):
            try:
                return f(*args, **kw)
            except Exception as e:
                # Save exception since it can be clobbered during processing
                # below before we can re-raise
                exc_info = sys.exc_info()

                if notifier:
                    payload = dict(args=args, exception=e)
                    payload.update(kw)

                    # Use a temp vars so we don't shadow
                    # our outer definitions.
                    temp_level = level
                    if not temp_level:
                        temp_level = notifier.ERROR

                    temp_type = event_type
                    if not temp_type:
                        # If f has multiple decorators, they must use
                        # six.wraps to ensure the name is
                        # propagated.
                        temp_type = f.__name__

                    notifier.notify(publisher_id, temp_type, temp_level,
                                    payload)

                # re-raise original exception since it may have been clobbered
                raise_(exc_info[0], exc_info[1], exc_info[2])
Exemplo n.º 2
0
    def __init__(self, parent, value, is_name=False, name=None):
        RingElement.__init__(self, parent)
        self._create = value
        if parent is None: return     # means "invalid element"
        # idea: Joe Wetherell -- try to find out if the output
        # is too long and if so get it using file, otherwise
        # don't.
        if isinstance(value, six.string_types) and parent._eval_using_file_cutoff and \
           parent._eval_using_file_cutoff < len(value):
            self._get_using_file = True

        if is_name:
            self._name = value
        else:
            try:
                self._name = parent._create(value, name=name)
            # Convert ValueError and RuntimeError to TypeError for
            # coercion to work properly.
            except (RuntimeError, ValueError) as x:
                self._session_number = -1
                raise_(TypeError, x, sys.exc_info()[2])
            except BaseException:
                self._session_number = -1
                raise
        self._session_number = parent._session_number
Exemplo n.º 3
0
def _resolve_dependent_style(style_path):
    """Get the independent style of a dependent style.

    :param path style_path: Path to a dependent style.
    :returns: Name of the independent style of the passed dependent style.
    :raises: StyleDependencyError: If no style could be found/parsed.

    CSL Styles are split into two categories, Independent and Dependent.
    Independent styles, as their name says, are self-sustained and contain all
    the necessary information in order to format a citation. Dependent styles
    on the other hand, depend on Independent styles, and actually just pose as
    aliases for them. For example 'nature-digest' is a dependent style that
    just points to the 'nature' style.

    .. seealso::

        `CSL Specification
         <http://docs.citationstyles.org/en/stable/specification.html#file-types>`_
    """
    try:
        # The independent style is mentioned inside a link element of
        # the form 'http://www.stylesite.com/stylename'.
        for _, el in iterparse(style_path, tag='{%s}link' % xml_namespace):
            if el.attrib.get('rel') == 'independent-parent':
                url = el.attrib.get('href')
                return url.rsplit('/', 1)[1]
    except Exception:
        # Invalid XML, missing info, etc. Preserve the original exception.
        stacktrace = sys.exc_info()[2]
    else:
        stacktrace = None

    raise_(StyleDependencyError('Dependent style {0} could not be parsed'
                                .format(style_path)), None, stacktrace)
Exemplo n.º 4
0
  def get_topologies(self, callback=None):
    """ get topologies """
    isWatching = False

    # Temp dict used to return result
    # if callback is not provided.
    ret = {
        "result": None
    }
    if callback:
      isWatching = True
    else:
      def callback(data):
        """Custom callback to get the topologies right now."""
        ret["result"] = data

    try:
      # Ensure the topology path exists. If a topology has never been deployed
      # then the path will not exist so create it and don't crash.
      # (fixme) add a watch instead of creating the path?
      self.client.ensure_path(self.get_topologies_path())

      self._get_topologies_with_watch(callback, isWatching)
    except NoNodeError:
      self.client.stop()
      path = self.get_topologies_path()
      raise_(StateException("Error required topology path '%s' not found" % (path),
                            StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2])

    # The topologies are now populated with the data.
    return ret["result"]
Exemplo n.º 5
0
    def __call__(self):
        """Return a co-routine which runs the task group."""
        raised_exceptions = []
        while any(six.itervalues(self._runners)):
            try:
                for k, r in self._ready():
                    r.start()
                    if not r:
                        del self._graph[k]

                yield

                for k, r in self._running():
                    if r.step():
                        del self._graph[k]
            except Exception:
                exc_info = sys.exc_info()
                if self.aggregate_exceptions:
                    self._cancel_recursively(k, r)
                else:
                    self.cancel_all(grace_period=self.error_wait_time)
                raised_exceptions.append(exc_info)
            except:  # noqa
                with excutils.save_and_reraise_exception():
                    self.cancel_all()

        if raised_exceptions:
            if self.aggregate_exceptions:
                raise ExceptionGroup(v for t, v, tb in raised_exceptions)
            else:
                exc_type, exc_val, traceback = raised_exceptions[0]
                raise_(exc_type, exc_val, traceback)
Exemplo n.º 6
0
	def sess(self):
		"""get session"""
		if self._sess:
			return self._sess

		# check if email server specified
		if not getattr(self, 'server'):
			err_msg = _('Email Account not setup. Please create a new Email Account from Setup > Email > Email Account')
			frappe.msgprint(err_msg)
			raise frappe.OutgoingEmailError(err_msg)

		try:
			if self.use_tls and not self.port:
				self.port = 587

			self._sess = smtplib.SMTP((self.server or "").encode('utf-8'),
				cint(self.port) or None)

			if not self._sess:
				err_msg = _('Could not connect to outgoing email server')
				frappe.msgprint(err_msg)
				raise frappe.OutgoingEmailError(err_msg)

			if self.use_tls:
				self._sess.ehlo()
				self._sess.starttls()
				self._sess.ehlo()

			if self.login and self.password:
				ret = self._sess.login((self.login or "").encode('utf-8'),
					(self.password or "").encode('utf-8'))

				# check if logged correctly
				if ret[0]!=235:
					frappe.msgprint(ret[1])
					raise frappe.OutgoingEmailError(ret[1])

			return self._sess

		except _socket.error as e:
			# Invalid mail server -- due to refusing connection
			frappe.msgprint(_('Invalid Outgoing Mail Server or Port'))
			traceback = sys.exc_info()[2]
			raise_(frappe.ValidationError, e, traceback)

		except smtplib.SMTPAuthenticationError as e:
			frappe.msgprint(_("Invalid login or password"))
			traceback = sys.exc_info()[2]
			raise_(frappe.ValidationError, e, traceback)

		except smtplib.SMTPException:
			frappe.msgprint(_('Unable to send emails at this time'))
			raise
Exemplo n.º 7
0
  def create_execution_state(self, topologyName, executionState):
    """ create execution state """
    if not executionState or not executionState.IsInitialized():
      raise_(StateException("Execution State protobuf not init properly",
                            StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2])

    path = self.get_execution_state_path(topologyName)
    LOG.info("Adding topology: {0} to path: {1}".format(
        topologyName, path))
    executionStateString = executionState.SerializeToString()
    try:
      self.client.create(path, value=executionStateString, makepath=True)
      return True
    except NoNodeError:
      raise_(StateException("NoNodeError while creating execution state",
                            StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2])
    except NodeExistsError:
      raise_(StateException("NodeExistsError while creating execution state",
                            StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[2])
    except ZookeeperError:
      raise_(StateException("Zookeeper while creating execution state",
                            StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2])
    except Exception:
      # Just re raise the exception.
      raise
Exemplo n.º 8
0
  def create_pplan(self, topologyName, pplan):
    """ create physical plan """
    if not pplan or not pplan.IsInitialized():
      raise_(StateException("Physical Plan protobuf not init properly",
                            StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2])

    path = self.get_pplan_path(topologyName)
    LOG.info("Adding topology: {0} to path: {1}".format(
        topologyName, path))
    pplanString = pplan.SerializeToString()
    try:
      self.client.create(path, value=pplanString, makepath=True)
      return True
    except NoNodeError:
      raise_(StateException("NoNodeError while creating pplan",
                            StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2])
    except NodeExistsError:
      raise_(StateException("NodeExistsError while creating pplan",
                            StateException.EX_TYPE_NODE_EXISTS_ERROR), sys.exc_info()[2])
    except ZookeeperError:
      raise_(StateException("Zookeeper while creating pplan",
                            StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2])
    except Exception:
      # Just re raise the exception.
      raise
Exemplo n.º 9
0
    def __init__(self, **kwargs):
        self.kwargs = kwargs

        try:
            self.message = self.msg_fmt % kwargs
        except KeyError:
            exc_info = sys.exc_info()
            # kwargs doesn't match a variable in the message
            # log the issue and the kwargs
            LOG.exception(_LE('Exception in string format operation'))
            for name, value in six.iteritems(kwargs):
                LOG.error("%s: %s" % (name, value))  # noqa

            if _FATAL_EXCEPTION_FORMAT_ERRORS:
                raise_(exc_info[0], exc_info[1], exc_info[2])
Exemplo n.º 10
0
    def __call__(self):
        """Return a co-routine which runs the task group."""
        raised_exceptions = []
        thrown_exceptions = []

        while any(six.itervalues(self._runners)):
            try:
                for k, r in self._ready():
                    r.start()
                    if not r:
                        del self._graph[k]

                if self._graph:
                    try:
                        yield
                    except Exception:
                        thrown_exceptions.append(sys.exc_info())
                        raise

                for k, r in self._running():
                    if r.step():
                        del self._graph[k]
            except Exception:
                exc_info = sys.exc_info()
                if self.aggregate_exceptions:
                    self._cancel_recursively(k, r)
                else:
                    self.cancel_all(grace_period=self.error_wait_time)
                raised_exceptions.append(exc_info)
                del exc_info
            except:  # noqa
                with excutils.save_and_reraise_exception():
                    self.cancel_all()

        if raised_exceptions:
            try:
                if self.aggregate_exceptions:
                    raise ExceptionGroup(v for t, v, tb in raised_exceptions)
                else:
                    if thrown_exceptions:
                        raise_(*thrown_exceptions[-1])
                    else:
                        raise_(*raised_exceptions[0])
            finally:
                del raised_exceptions
                del thrown_exceptions
Exemplo n.º 11
0
def import_string(import_name, silent=False):
    """Imports an object based on a string.  This is useful if you want to
    use import paths as endpoints or something similar.  An import path can
    be specified either in dotted notation (``xml.sax.saxutils.escape``)
    or with a colon as object delimiter (``xml.sax.saxutils:escape``).

    If `silent` is True the return value will be `None` if the import fails.

    For better debugging we recommend the new :func:`import_module`
    function to be used instead.

    :param import_name: the dotted name for the object to import.
    :param silent: if set to `True` import errors are ignored and
                   `None` is returned instead.
    :return: imported object

    :copyright: (c) 2011 by the Werkzeug Team
    """
    # force the import name to automatically convert to strings
    if isinstance(import_name, text_type):
        import_name = str(import_name)
    try:
        if ':' in import_name:
            module, obj = import_name.split(':', 1)
        elif '.' in import_name:
            module, obj = import_name.rsplit('.', 1)
        else:
            return __import__(import_name)
        # __import__ is not able to handle unicode strings in the fromlist
        # if the module is a package
        try:
            obj = obj.decode('utf-8')
        except:
            pass
        try:
            return getattr(__import__(module, None, None, [obj]), obj)
        except (ImportError, AttributeError):
            # support importing modules not yet set up by the parent module
            # (or package for that matter)
            modname = module + '.' + obj
            __import__(modname)
            return sys.modules[modname]
    except ImportError as e:
        if not silent:
            raise_(ImportStringError(import_name, e), None, sys.exc_info()[2])
Exemplo n.º 12
0
    def __init__(self, **kwargs):
        self.kwargs = kwargs

        try:
            self.message = self.msg_fmt % kwargs

            if self.error_code:
                self.message = "HEAT-E%s %s" % (self.error_code, self.message)
        except KeyError:
            exc_info = sys.exc_info()
            # kwargs doesn't match a variable in the message
            # log the issue and the kwargs
            LOG.exception(_LE("Exception in string format operation"))
            for name, value in six.iteritems(kwargs):
                LOG.error(_LE("%(name)s: %(value)s"), {"name": name, "value": value})  # noqa

            if _FATAL_EXCEPTION_FORMAT_ERRORS:
                raise_(exc_info[0], exc_info[1], exc_info[2])
Exemplo n.º 13
0
    def create_cursors(self, sess, distort, shuffle):
        # start the preloading threads.

        # tdata = self.read_and_decode(self.train_queue, self.conf)
        # vdata = self.read_and_decode(self.val_queue, self.conf)
        # self.train_data = tdata
        # self.val_data = vdata
        self.coord = tf.train.Coordinator()
        scale = self.scale

        train_threads = []
        val_threads = []

        if self.for_training == 0:
            # for training
            n_threads = 10
        elif self.for_training == 1:
            # for prediction
            n_threads = 0
        elif self.for_training == 2:
            # for cross validation
            n_threads = 1
        else:
            traceback = sys.exc_info()[2]
            raise_(ValueError, "Inocrrect value for for_training", traceback)

        for _ in range(n_threads):

            train_t = threading.Thread(target=self.read_image_thread,
                                 args=(sess, self.DBType.Train, distort, shuffle, scale))
            train_t.start()
            train_threads.append(train_t)

            val_t = threading.Thread(target=self.read_image_thread,
                                       args=(sess, self.DBType.Val, False, False, scale))
            val_t.start()
            val_threads.append(val_t)

        # self.threads = tf.train.start_queue_runners(sess=sess, coord=self.coord)
        # self.val_threads1 = self.val_qr.create_threads(sess, coord=self.coord, start=True)
        # self.train_threads1 = self.train_qr.create_threads(sess, coord=self.coord, start=True)
        self.train_threads = train_threads
        self.val_threads = val_threads
Exemplo n.º 14
0
    def __init__(self, **kwargs):
        self.kwargs = kwargs

        try:
            if self.error_code in ERROR_CODE_MAP:
                self.msg_fmt = ERROR_CODE_MAP[self.error_code]

            self.message = self.msg_fmt % kwargs

            if self.error_code:
                self.message = 'KING-E%s %s' % (self.error_code, self.message)
        except KeyError:
            exc_info = sys.exc_info()
            # kwargs doesn't match a variable in the message
            # log the issue and the kwargs
            LOG.exception(_LE('Exception in string format operation'))
            for name, value in six.iteritems(kwargs):
                LOG.error(_LE("%(name)s: %(value)s"),
                          {'name': name, 'value': value})  # noqa

            if _FATAL_EXCEPTION_FORMAT_ERRORS:
                raise_(exc_info[0], exc_info[1], exc_info[2])
Exemplo n.º 15
0
 def delete_execution_state(self, topologyName):
   """ delete execution state """
   path = self.get_execution_state_path(topologyName)
   LOG.info("Removing topology: {0} from path: {1}".format(
       topologyName, path))
   try:
     self.client.delete(path)
     return True
   except NoNodeError:
     raise_(StateException("NoNodeError while deleting execution state",
                           StateException.EX_TYPE_NO_NODE_ERROR), sys.exc_info()[2])
   except NotEmptyError:
     raise_(StateException("NotEmptyError while deleting execution state",
                           StateException.EX_TYPE_NOT_EMPTY_ERROR), sys.exc_info()[2])
   except ZookeeperError:
     raise_(StateException("Zookeeper while deleting execution state",
                           StateException.EX_TYPE_ZOOKEEPER_ERROR), sys.exc_info()[2])
   except Exception:
     # Just re raise the exception.
     raise
Exemplo n.º 16
0
def past_search_per(what, filter_col=None):
    """
    Section 7: Seller Analytics
    >>> past_search_per('Location')
    >>> past_search_per('SearchPriceBracket')
    >>> past_search_per('SearchKeywordsType')
    >>> past_search_per('SearchKeywordsFeature')

    >> past_search_per('Location', filter_col=('VisitedBy', 3))
    >>> past_search_per('SearchPriceBracket', filter_col=('VisitedBy', 3))
    >>> past_search_per('SearchKeywordsType', filter_col=('VisitedBy', 3))
    >>> past_search_per('SearchKeywordsFeature', filter_col=('VisitedBy', 3))
    """
    if what not in [
            'Location', 'SearchPriceBracket', 'SearchKeywordsType',
            'SearchKeywordsFeature'
    ]:
        if Constants.DEBUG:
            err_msg = 'Given what is not supported'
            raise_(AttributeError, AttributeError(err_msg))
        return False, None

    data_temp = None
    data = data_search
    if filter_col and len(filter_col) == 2:
        try:
            fc = filter_col[0]
            fv = filter_col[1]
            data_temp = data[data[fc] == fv].copy(deep=True)
        except KeyError:
            if Constants.DEBUG:
                msg = 'filter column {} is not present'.format(filter_col[0])
                raise_(AttributeError, AttributeError(msg))
            else:
                return False, None
    if data_temp is None:
        data_temp = data.copy(deep=True)

    if data_temp.shape[0] == 0:
        if Constants.DEBUG:
            print("The respective filter_col returns not result")
        dwrap = dict_wrap(override=None)
        output = lambda: {}
        return True, dwrap.wrap(functor=output)
    if what in ['Location', 'SearchPriceBracket']:
        ret = data_temp.groupby(what).VisitedBy.count().sort_values(
            ascending=False)
        output = lambda: ret.reset_index().set_index(what)
        if Constants.DEBUG:
            import sys
            jwrap = json_wrap(override=sys.stdout)
            print(jwrap.wrap(functor=output, indent=4))
            return True, None
        else:
            jwrap = json_wrap(override=None)
            return True, jwrap.wrap(functor=output, indent=4)
    else:
        typeSet = set()

        def _resolve_type_keys(data):
            allTypes = data.split('|')
            for each in allTypes:
                typeSet.add(each)
            return allTypes

        types = data_temp[what].apply(_resolve_type_keys)
        for each in typeSet:
            data_temp['type_' + str(each)] = 0

        ul = []
        for each in types:
            ul.append(each)
        from collections import Counter
        ret = dict(Counter(sum(ul, [])))

        output = lambda: ret
        if Constants.DEBUG:
            import sys
            dwrap = dict_wrap(override=sys.stdout)
            print(dwrap.wrap(functor=output))
            return True, None
        else:
            dwrap = dict_wrap(override=None)
            return True, dwrap.wrap(functor=output)
Exemplo n.º 17
0
def national_price_tally(n, national_price, filter_col=None, plot_type='bar'):
    """
    Compares the price of properties to check the top ```n``` above or
    below average prices compared to the national_prices based on the
    filter_type to screen for a filter.
    
    ~~~~~~~~~~  Examples  ~~~~~~~~~~
    Currents:
        # Top n properties above the NATIONAL PRICE among all properties
        >>> national_price_tally(n, national_price)
        
        # Top n properties below the NATIONAL PRICE among all properties
        >>> national_price_tally(-n, national_price)

        # Top n properties below the NATIONAL PRICE based on filter_col
        >>> visitor_stats(-n, national_price, filter_col=('added_by_id', 23))
        >>> visitor_stats(n, national_price, filter_col=('added_by_id', 23))

        NOTE: The exact column name may change for the filter_col
        NOTE2: National prices needs to be passed thoughtfully
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    :param plot_type: Plotting type
    :param n: number of properties above or below NP
    :param national_price: The national_price of the location
    :param filter_col: group of values (column and value) -> tuple
    """

    # This is to see if below NP or above NP properties are taken
    price_above_na = True
    if n < 0:
        price_above_na = False
    data_temp = None
    if filter_col and len(filter_col) == 2:
        try:
            fc = filter_col[0]
            fv = filter_col[1]
            data_temp = data[data[fc] == fv].copy(deep=True)
        except KeyError:
            if Constants.DEBUG:
                msg = 'filter column {} is not present'.format(filter_col[0])
                raise_(AttributeError, AttributeError(msg))
            else:
                return False, None

    if data_temp is None:
        data_temp = data.copy(deep=True)
    preprocess.generic_operations(data_temp, 'listing_price', 'size',
                                  'square_meter_price', '/')
    data_temp['more_than_NA'] = data_temp['square_meter_price'].apply(
        lambda x: x > national_price)
    preprocess.generic_operations(data_temp, 'square_meter_price',
                                  national_price, 'per_below_above_NA_temp',
                                  '-')
    preprocess.generic_operations(data_temp, 'per_below_above_NA_temp',
                                  national_price * 100, 'per_below_above_NA',
                                  '/')
    data_temp.drop(['per_below_above_NA_temp'], axis=1, inplace=True)
    if price_above_na:
        ret_data = data_temp[data_temp['more_than_NA'] == 1]
    else:
        ret_data = data_temp[data_temp['more_than_NA'] == 0]

    if ret_data.shape[0] == 0:
        if Constants.DEBUG:
            print('Operation returned no values')
        dwrap = dict_wrap(override=None)
        output = lambda: {}
        return True, dwrap.wrap(functor=output)
    ascend = not price_above_na
    ret_data.sort_values(by='per_below_above_NA',
                         inplace=True,
                         ascending=ascend)
    ret_data = ret_data[['id', 'per_below_above_NA']].head(abs(n))

    if plot_type == 'bar':
        ret_data.set_index('id', inplace=True)
        output = lambda: ret_data.reset_index(drop=True)
    else:
        if Constants.DEBUG:
            print('Plot type {} is not supported'.format(plot_type))
        return False, None
    if not Constants.DEBUG:
        jwrap = json_wrap(override=None)
        return True, jwrap.wrap(functor=output, indent=4)
    else:
        import sys
        jwrap = json_wrap(override=sys.stdout)
        print(jwrap.wrap(functor=output, indent=4))
        return True, None
Exemplo n.º 18
0
def property_discounts(n,
                       filter_col=None,
                       typ='aggregated',
                       focus='location',
                       plot_type='bar'):
    """
    Calculates the discounts offered by for different properties
    and returns the top ```n``` properties. It also can take
    ```filter_col``` as a tuple and filter the data according
    to the mentioned column and given value and then perform the
    discount calculations. This can be used to filter seller specific
    data.

    It supports the discount analysis focused on either **property_id**
    or on **location** as mentioned by ```focus```.

    For focus on location, it can either performed **aggregated** per location
    analysis or **individual** discount oriented analysis as
    mentioned by ```typ```.
    
    ~~~~~~~~~~  Examples  ~~~~~~~~~~
    Currents:
        # Top n locations based on the discounts of all properties
        >>> property_discounts(n, focus='location', typ='aggregated')
        
        # Top n properties based on the discounts with their location of all properties
        >>> property_discounts(n, focus='location', typ='individual')

        # Top n properties based on the discounts of all properties
        >>> property_discounts(n=10, focus='property')
        
        # Top n locations based on the discounts of filter_col properties
        >>> property_discounts(n, focus='location', typ='aggregated', filter_col=('added_by_id', 23))

        # Top n properties based on the discounts with their location of filter_col properties
        >>> property_discounts(n, focus='location', typ='individual', filter_col=('added_by_id', 23))
        
        # Top n properties based on the discounts of all properties
        >>> property_discounts(n=10, focus='property', filter_col=('added_by_id', 23))

        NOTE: The exact column name may change for the filter_col
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    :param n: Top n values. -> int
    :param filter_col: Filtering criteria -> tuple
    :param typ: Analysis type for focus on location -> str
    :param focus: Focus for either location or property -> str
    :param plot_type: Plotting type data munging -> str
    """
    if typ not in ['aggregated', 'individual']:
        if Constants.DEBUG:
            msg = '{} type is not implemented yet'.format(typ)
            raise_(NotImplementedError, NotImplementedError(msg))
        return False, None

    if focus not in ['location', 'property']:
        if Constants.DEBUG:
            msg = '{} focus is not implemented'.format(focus)
            raise_(NotImplementedError, NotImplementedError(msg))
        return False, None

    if n <= 0:
        if Constants.DEBUG:
            msg = '{} n value is not valid'.format(n)
            raise_(ValueError, ValueError(msg))
        return False, None
    data_temp = None
    if filter_col and len(filter_col) == 2:
        try:
            fc = filter_col[0]
            fv = filter_col[1]
            data_temp = data[data[fc] == fv].copy(deep=True)
        except KeyError:
            if Constants.DEBUG:
                msg = 'filter column {} is not present'.format(filter_col[0])
                raise_(AttributeError, AttributeError(msg))
            return False, None

    if data_temp is None:
        data_temp = data.copy(deep=True)

    if focus == 'location':
        preprocess.generic_operations(data_temp, 'bank_price', 'listing_price',
                                      'Sale', '-')
        if typ == 'aggregated':
            data_temp['Sale'] = data_temp['Sale'].apply(
                lambda x: 0 if x < 0 else x / data_temp.Sale.max())
            discount = data_temp.groupby('location').Sale.sum()
            discount.sort_values(ascending=False, inplace=True)
            data_temp = discount.reset_index()
            data_temp.columns = ['Location', 'Discount Rate']
            data_temp = data_temp.reset_index(drop=True).set_index('Location')
        elif typ == 'individual':
            data_temp['Sale'] = data_temp['Sale'].apply(lambda x: 0
                                                        if x < 0 else x)
            data_temp = data_temp[['id', 'location', 'Sale']]
            data_temp.sort_values(by='Sale', ascending=False, inplace=True)
            data_temp.columns = ['property_id', 'Location', 'Discount Price']
            data_temp = data_temp.reset_index(
                drop=True).set_index('property_id')
        else:
            if Constants.DEBUG:
                msg = 'Type {} is not implemented'.format(typ)
                raise_(NotImplementedError, NotImplementedError(msg))
            return False, None
    elif focus == 'property':
        data_temp = data_temp[data_temp.bank_certification == 1]
        preprocess.generic_operations(data_temp, 'bank_price', 'listing_price',
                                      'price_gap', '-')
        data_temp.sort_values('price_gap', ascending=False, inplace=True)
        data_temp = data_temp[['id', 'price_gap']]
        data_temp = data_temp.reset_index(drop=True).set_index('id')
    else:
        if Constants.DEBUG:
            msg = 'Focus {} is not supported yet'.format(focus)
            raise_(NotImplementedError, NotImplementedError(msg))
        return False, None

    if data_temp.shape[0] == 0:
        if Constants.DEBUG:
            print('Operation returned no data')
        dwrap = dict_wrap(override=None)
        output = lambda: {}
        return True, dwrap.wrap(functor=output)
    # elif data_temp.shape[0] < n:
    #     if Constants.DEBUG:
    #         msg = '{} n is too high'.format(n)
    #         raise_(AttributeError, AttributeError(msg))
    #     return False, None

    if plot_type == 'bar':
        output = lambda: data_temp.head(n=n)
        if Constants.DEBUG:
            import sys
            jwrap = json_wrap(override=sys.stdout)
            print(jwrap.wrap(functor=output, indent=4))
            return True, None
        else:
            jwrap = json_wrap(override=None)
            return True, jwrap.wrap(functor=output, indent=4)
    else:
        if Constants.DEBUG:
            msg = 'Plot type {} is not supported yet'.format(plot_type)
            raise_(NotImplementedError, NotImplementedError(msg))
        return False, None
Exemplo n.º 19
0
    def read_image_thread(self, sess, db_type, distort, shuffle, scale):
        # Thread that does the pre processing.

        if self.train_type == 0:
            if db_type == self.DBType.Val:
                filename = os.path.join(self.conf.cachedir, self.conf.valfilename) + '.tfrecords'
            elif db_type == self.DBType.Train:
                filename = os.path.join(self.conf.cachedir, self.conf.trainfilename) + '.tfrecords'
            else:
                traceback = sys.exc_info()[2]
                raise_(IOError, "Unspecified DB Type", traceback)

        else:
            filename = os.path.join(self.conf.cachedir, self.conf.trainfilename) + '.tfrecords'

        cur_db = multiResData.tf_reader(self.conf, filename, shuffle)
        placeholders = self.q_placeholders

        print('Starting preloading thread of type ... {}'.format(db_type))
        batch_np = {}
        while not self.coord.should_stop():
            batch_in = cur_db.next()
            batch_np['orig_images'] = batch_in[0]
            batch_np['orig_locs'] = batch_in[1]
            batch_np['info'] = batch_in[2]
            batch_np['extra_info'] = batch_in[3]
            xs, locs = PoseTools.preprocess_ims(batch_np['orig_images'], batch_np['orig_locs'], self.conf,
                                                distort, scale)

            batch_np['images'] = xs
            batch_np['locs'] = locs

            for fn in self.q_fns:
                fn(batch_np)

            food = {pl: batch_np[name] for (name, pl) in placeholders}

            success = False
            run_options = tf.RunOptions(timeout_in_ms=30000)
            try:
                while not success:

                    if sess._closed or self.coord.should_stop():
                        return

                    try:
                        if db_type == self.DBType.Val:
                            sess.run(self.val_enqueue_op, feed_dict=food,options=run_options)
                        elif db_type == self.DBType.Train:
                            sess.run(self.train_enqueue_op, feed_dict=food, options=run_options)
                        success = True

                    except tf.errors.DeadlineExceededError:
                        pass

            except (tf.errors.CancelledError,) as e:
                return
            except Exception as e:
                logging.exception('Error in preloading thread')
                self.close_cursors()
                sys.exit(1)
                return
Exemplo n.º 20
0
    def sess(self):
        """get session"""
        if self._sess:
            return self._sess

        # check if email server specified
        if not getattr(self, 'server'):
            err_msg = _(
                'Email Account not setup. Please create a new Email Account from Setup > Email > Email Account'
            )
            frappe.msgprint(err_msg)
            raise frappe.OutgoingEmailError(err_msg)

        try:
            if self.use_ssl:
                if not self.port:
                    self.smtp_port = 465

                self._sess = smtplib.SMTP_SSL((self.server
                                               or "").encode('utf-8'),
                                              cint(self.port) or None)
            else:
                if self.use_tls and not self.port:
                    self.port = 587

                self._sess = smtplib.SMTP(cstr(self.server or ""),
                                          cint(self.port) or None)

            if not self._sess:
                err_msg = _('Could not connect to outgoing email server')
                frappe.msgprint(err_msg)
                raise frappe.OutgoingEmailError(err_msg)

            if self.use_tls:
                self._sess.ehlo()
                self._sess.starttls()
                self._sess.ehlo()

            if self.login and self.password:
                ret = self._sess.login(str(self.login or ""),
                                       str(self.password or ""))

                # check if logged correctly
                if ret[0] != 235:
                    frappe.msgprint(ret[1])
                    raise frappe.OutgoingEmailError(ret[1])

            return self._sess

        except _socket.error as e:
            # Invalid mail server -- due to refusing connection
            frappe.msgprint(_('Invalid Outgoing Mail Server or Port'))
            traceback = sys.exc_info()[2]
            raise_(frappe.ValidationError, e, traceback)

        except smtplib.SMTPAuthenticationError as e:
            frappe.msgprint(_("Invalid login or password"))
            traceback = sys.exc_info()[2]
            raise_(frappe.ValidationError, e, traceback)

        except smtplib.SMTPException:
            frappe.msgprint(_('Unable to send emails at this time'))
            raise
Exemplo n.º 21
0
    def _eval_line(self, line, allow_use_file=True, wait_for_prompt=True, restart_if_needed=True):
        """
        Evaluate a line of commands.

        REMARK:

        By default, a long command (length exceeding ``self._eval_using_file_cutoff``)
        is evaluated using :meth:`_eval_line_using_file`.

        If the command can not be evaluated since the interface
        has crashed, it is automatically restarted and tried
        again *once*.

        If the optional ``wait_for_prompt`` is ``False`` then even a very
        long line will not be evaluated by :meth:`_eval_line_using_file`,
        since this does not support the ``wait_for_prompt`` option.

        INPUT:

        - ``line`` -- (string) a command.
        - ``allow_use_file`` (optional bool, default ``True``) --
          allow to evaluate long commands using :meth:`_eval_line_using_file`.
        - ``wait_for_prompt`` (optional bool, default ``True``) --
          wait until the prompt appears in the sub-process' output.
        - ``restart_if_needed`` (optional bool, default ``True``) --
          If it is ``True``, the command evaluation is evaluated
          a second time after restarting the interface, if an
          ``EOFError`` occured.

        TESTS::

            sage: singular._eval_line('def a=3;')
            ''
            sage: singular('a')
            3
            sage: singular.eval('quit;')
            ''
            sage: singular._eval_line('def a=3;')
            Singular crashed -- automatically restarting.
            ''
            sage: singular('a')
            3
            sage: singular.eval('kill a')
            ''

        We are now sending a command that would run forever. But since
        we declare that we are not waiting for a prompt, we can interrupt
        it without a KeyboardInterrupt. At the same time, we test that
        the line is not forwarded to :meth:`_eval_line_using_file`, since
        that method would not support the ``wait_for_prompt`` option.
        For reasons which are currently not understood, the ``interrupt``
        test usually returns immediately, but sometimes it takes a very
        long time on the same system. ::

            sage: cutoff = singular._eval_using_file_cutoff
            sage: singular._eval_using_file_cutoff = 4
            sage: singular._eval_line('for(int i=1;i<=3;i++){i=1;};', wait_for_prompt=False)
            ''
            sage: singular.interrupt()
            True
            sage: singular._eval_using_file_cutoff = cutoff

        The interface still works after this interrupt::

            sage: singular('2+3')
            5

        Last, we demonstrate that by default the execution of a command
        is tried twice if it fails the first time due to a crashed
        interface::

            sage: singular.eval('quit;')
            ''
            sage: singular._eval_line_using_file('def a=3;', restart_if_needed=False)
            Traceback (most recent call last):
            ...
            RuntimeError: Singular terminated unexpectedly while reading in a large line...

        Since the test of the next method would fail, we re-start
        Singular now. ::

            sage: singular('2+3')
            Singular crashed -- automatically restarting.
            5

        """
        if allow_use_file and wait_for_prompt and self._eval_using_file_cutoff and len(line) > self._eval_using_file_cutoff:
            return self._eval_line_using_file(line)
        try:
            if self._expect is None:
                self._start()
            E = self._expect
            try:
                if len(line) >= 4096:
                    raise RuntimeError("Sending more than 4096 characters with %s on a line may cause a hang and you're sending %s characters"%(self, len(line)))
                E.sendline(line)
                if wait_for_prompt == False:
                    return ''

            except OSError as msg:
                if restart_if_needed:
                    # The subprocess most likely crashed.
                    # If it's really still alive, we fall through
                    # and raise RuntimeError.
                    if sys.platform.startswith('sunos'):
                        # On (Open)Solaris, we might need to wait a
                        # while because the process might not die
                        # immediately. See Trac #14371.
                        for t in [0.5, 1.0, 2.0]:
                            if E.isalive():
                                time.sleep(t)
                            else:
                                break
                    if not E.isalive():
                        try:
                            self._synchronize()
                        except (TypeError, RuntimeError):
                            pass
                        return self._eval_line(line,allow_use_file=allow_use_file, wait_for_prompt=wait_for_prompt, restart_if_needed=False)
                raise_(RuntimeError, "%s\nError evaluating %s in %s"%(msg, line, self), sys.exc_info()[2])

            if len(line)>0:
                try:
                    if isinstance(wait_for_prompt, six.string_types):
                        E.expect(wait_for_prompt)
                    else:
                        E.expect(self._prompt)
                except pexpect.EOF as msg:
                    try:
                        if self.is_local():
                            tmp_to_use = self._local_tmpfile()
                        else:
                            tmp_to_use = self._remote_tmpfile()
                        if self._read_in_file_command(tmp_to_use) in line:
                            raise pexpect.EOF(msg)
                    except NotImplementedError:
                        pass
                    if self._quit_string() in line:
                        # we expect to get an EOF if we're quitting.
                        return ''
                    elif restart_if_needed==True: # the subprocess might have crashed
                        try:
                            self._synchronize()
                            return self._eval_line(line,allow_use_file=allow_use_file, wait_for_prompt=wait_for_prompt, restart_if_needed=False)
                        except (TypeError, RuntimeError):
                            pass
                    raise RuntimeError("%s\n%s crashed executing %s"%(msg,self, line))
                if self._terminal_echo:
                    out = E.before
                else:
                    out = E.before.rstrip('\n\r')
            else:
                if self._terminal_echo:
                    out = '\n\r'
                else:
                    out = ''
        except KeyboardInterrupt:
            self._keyboard_interrupt()
            raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
        if self._terminal_echo:
            i = out.find("\n")
            j = out.rfind("\r")
            return out[i+1:j].replace('\r\n','\n')
        else:
            return out.replace('\r\n','\n')
Exemplo n.º 22
0
    def raise_with_traceback(self, exception):

        raise_(type(exception), exception, self.traceback)
Exemplo n.º 23
0
def get_investment_hotspots(location, window, n, typ='N'):
    """
    Get the top investment spots in the mentioned
    ```location``` for last ```n``` months only
    restricted to the properties which are of type
    mentioned by ```typ```.

    :param location: Location mentioned
    :param window: Month window to count
    :param n: Count of last n months
    :param typ: Type of property
    """
    if typ not in ['Y', 'N', 'P']:
        if Constants.DEBUG:
            err_msg = 'Mentioned typ is not valid'
            raise_(AttributeError, AttributeError(err_msg))
        return False, None
    if n < 2 or n > 12:
        if Constants.DEBUG:
            err_msg = 'n value should be in range of 2 to 12'
            raise_(ValueError, ValueError(err_msg))
        return False, None
    if window < 2 or window > 8:
        if Constants.DEBUG:
            err_msg = 'Window value should be in range of 2 to 8'
            raise_(ValueError, ValueError(err_msg))
        return False, None
    data_invest.drop('id', axis=1, inplace=True)

    def _monotonic(row):
        col_list = [
            'priceOnMonth1',
            'priceOnMonth2',
            'priceOnMonth3',
            'priceOnMonth4',
            'priceOnMonth5',
            'priceOnMonth6',
            'priceOnMonth7',
            'priceOnMonth8',
            'priceOnMonth9',
            'priceOnMonth10',
            'priceOnMonth11',
            'priceOnMonth12']
        k = row[col_list]
        k.dropna(inplace=True)
        investment_graded = []
        if len(k.tolist()) < window:
            return False
        for i in range(len(k)):
            temp = k[i: i + window]
            if len(temp) == window:
                investment_graded.append(temp.is_monotonic_increasing)
        return any(investment_graded[-n:])
    mask1 = data_invest.City == location
    mask2 = data_invest.Completed == typ
    data_loc = data_invest[mask1 & mask2].copy()
    data_loc['invest_hotspot'] = data_loc.apply(_monotonic, axis=1)
    ret = data_loc[data_loc.invest_hotspot == 1].PropertyId.tolist()
    output = lambda: {'investment_hotspot': ret}
    if Constants.DEBUG:
        import sys
        dwrap = dict_wrap(override=sys.stdout)
        print(dwrap.wrap(functor=output))
        return True, None
    else:
        dwrap = dict_wrap(override=None)
        return True, dwrap.wrap(functor=output)
Exemplo n.º 24
0
def seller_portfolio(seller_id, metric):
    """
    Section 6: Seller Analytics
    >>> seller_portfolio(seller_id, 'price_range')
    >>> seller_portfolio(seller_id, 'number_of_features')
    >>> seller_portfolio(seller_id, 'property_type')
    >>> seller_portfolio(seller_id, buyer_engagement_time)
    """
    metrics = [
        'price_range', 'number_of_features', 'property_type',
        'buyer_engagement_time'
    ]
    if metric not in metrics:
        if Constants.DEBUG:
            err_msg = 'Mentioned metric {} is not implemented'.format(metric)
            raise_(NotImplementedError, NotImplementedError(err_msg))
    if metric == 'price_range':
        if metric not in data_property.columns.values:
            success = preprocess.create_col(data=data_property,
                                            col_name=metric,
                                            used_cols=['listing_price'],
                                            operation='CREATE_RANGE',
                                            interval=1000000)
            if not success:
                if Constants.DEBUG:
                    print('Create Column operation was not successful')
                return False, None
        data_temp = data_property[data_property.added_by_id ==
                                  seller_id].copy()
        if data_temp.shape[0] == 0:
            if Constants.DEBUG:
                err_msg = 'seller_id: {} returned no entries'.format(seller_id)
                raise_(ValueError, ValueError(err_msg))
        ret = data_temp.groupby('price_range').id.count().reset_index()
        ret.columns = ['price_range', 'total_count']
        output = lambda: ret.set_index('price_range')
        if Constants.DEBUG:
            import sys
            jwrap = json_wrap(override=sys.stdout)
            print(jwrap.wrap(functor=output, indent=4))
            return True, None
        else:
            jwrap = json_wrap(override=None)
            return True, jwrap.wrap(functor=output, indent=4)
    elif metric == 'number_of_features':
        data_temp = data_property[data_property.added_by_id ==
                                  seller_id].copy()
        if data_temp.shape[0] == 0:
            if Constants.DEBUG:
                err_msg = 'seller_id: {} returned no entries'.format(seller_id)
                raise_(ValueError, ValueError(err_msg))
        ret = data_temp.sort_values(metric, ascending=False)
        output = lambda: ret[['id', metric]].set_index('id')
        if Constants.DEBUG:
            import sys
            jwrap = json_wrap(override=sys.stdout)
            print(jwrap.wrap(functor=output, indent=4))
            return True, None
        else:
            jwrap = json_wrap(override=None)
            return True, jwrap.wrap(functor=output, indent=4)
    elif metric == 'property_type':
        data_temp = data_property[data_property.added_by_id ==
                                  seller_id].copy()
        if data_temp.shape[0] == 0:
            if Constants.DEBUG:
                err_msg = 'seller_id: {} returned no entries'.format(seller_id)
                raise_(ValueError, ValueError(err_msg))
        ret = data_temp.groupby(metric).id.count().reset_index()
        ret.columns = ['id', 'property_type']
        output = lambda: ret.set_index('id')
        if Constants.DEBUG:
            import sys
            jwrap = json_wrap(override=sys.stdout)
            print(jwrap.wrap(functor=output, indent=4))
            return True, None
        else:
            jwrap = json_wrap(override=None)
            return True, jwrap.wrap(functor=output, indent=4)
    else:
        preprocess.extract_timeseries(data=data_visits,
                                      ts_col='VisitingTimeStart')
        preprocess.extract_timeseries(data=data_visits,
                                      ts_col='VisitingTimeEnd')
        preprocess.generic_operations(data_visits, 'VisitingTimeEnd',
                                      'VisitingTimeStart', 'spent', '-')
        preprocess.extract_timeseries(data=data_visits,
                                      into='minutes',
                                      ts_col='spent')
        ret = data_visits.groupby('PropertyID').minutes.mean()
        ret = ret.reset_index()
        ret.sort_values(by='minutes', ascending=False, inplace=True)
        output = lambda: ret.set_index('PropertyID')
        if Constants.DEBUG:
            import sys
            jwrap = json_wrap(override=sys.stdout)
            print(jwrap.wrap(functor=output, indent=4))
            return True, None
        else:
            jwrap = json_wrap(override=None)
            return True, jwrap.wrap(functor=output, indent=4)
Exemplo n.º 25
0
def number_of_properties(group, category=None, plot_type='heatmap'):
    """
    Finds number of properties for
    each category grouped by the group
    specified here.

    ~~~~~~~~~~  Examples  ~~~~~~~~~~
    Currents:
        # Properties with different price_range
        >>> number_of_properties(group='price_range')

        # Properties with different property_type
        >>> number_of_properties(group='property_type')

        # Properties with different price_range in different category
        >>> number_of_properties(group='price_range', category='location')

        # Properties with different property_type in different category
        >>> number_of_properties(group='property_type', category='location')

    Mile-stones:
        >>> for other type of ```category``` values
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    :param group: Feature the group the dataset
    :param category: Split category of the dataset
    :param plot: Plot type for which dataset is generated
    :return: Status and Wrapper subclass type
    """
    if group == 'price_range':
        # First check if the price_range
        # column is already present or not
        if group not in data.columns.values:
            success = preprocess.create_col(data=data,
                                            col_name=group,
                                            used_cols=['listing_price'],
                                            operation='CREATE_RANGE',
                                            interval=1000000)
            if not success:
                if Constants.DEBUG:
                    print('Create Column operation was not successful')
                return False, None
    # Check if the category is already present or not
    elif group not in ['price_range', 'property_type']:
        if Constants.DEBUG:
            msg = 'Bad mentioned group {}. Try using price_range'.format(group)
            raise_(ValueError, ValueError(msg))
        return False, None
    if category and category not in data.columns.values:
        if Constants.DEBUG:
            print('Category column is not present in the table')
        return False, None

    # Currently only 'heatmap' is supported
    if plot_type == 'heatmap':
        if category:
            searches = data.groupby([group, category]).count().reset_index()
            search_data = searches.iloc[:, 0:3]
            search_data.columns = [group, 'Locations', 'count']
            output = lambda: search_data.pivot('Locations', group, 'count'
                                               ).fillna(0).astype(int)
        else:
            searches = data.groupby([group]).count().reset_index()
            search_data = searches.iloc[:, 0:2]
            search_data.columns = [group, 'count']
            output = lambda: search_data.set_index(group)
        # Wrap into a wrapper class and return with status

        # Use file to dump the data
        # fil = open('test.json', 'w')
        # jwrap = json_wrap(override=fil)

        # Return data for API
        if not Constants.DEBUG:
            jwrap = json_wrap(override=None)
            return True, jwrap.wrap(functor=output, indent=4)
        # pretty print data for debug
        else:
            import sys
            jwrap = json_wrap(override=sys.stdout)
            print(jwrap.wrap(functor=output, indent=4))
            return True, None
    else:
        if Constants.DEBUG:
            print('Plot type {} is not supported'.format(plot_type))
        return False, None
Exemplo n.º 26
0
 def on_exception(self, event, exc_type, exc_obj, exc_tb):
     if self.post_mortem and isinstance(exc_obj, self.post_mortem):
         pdb.post_mortem()
     if self.reraise and isinstance(exc_obj, self.reraise):
         raise_(exc_type, exc_obj, exc_tb)
     return
Exemplo n.º 27
0
def timely_visits(typ, time_aggregation, filter_col=None):
    """
    Section 3: Seller_Analytics

    >>> timely_visits(typ='aggregated', time_aggregation='month')
    >>> timely_visits(typ='aggregated', time_aggregation='quarter')

    >>> timely_visits(typ='split', time_aggregation='month')
    >>> timely_visits(typ='split', time_aggregation='quarter')

    >>> timely_visits(typ='aggregated', time_aggregation='month', filter_col=('VisitedBy', 3))
    >>> timely_visits(typ='aggregated', time_aggregation='quarter', filter_col=('VisitedBy', 3))

    >>> timely_visits(typ='split', time_aggregation='month', filter_col=('VisitedBy', 3))
    >>> timely_visits(typ='split', time_aggregation='quarter', filter_col=('VisitedBy', 3))

    >>> timely_visits(typ='aggregated', time_aggregation='month', filter_col=('AddedBy', 3))
    >>> timely_visits(typ='aggregated', time_aggregation='quarter', filter_col=('AddedBy', 3))

    >>> timely_visits(typ='split', time_aggregation='month', filter_col=('AddedBy', 3))
    >>> timely_visits(typ='split', time_aggregation='quarter', filter_col=('AddedBy', 3))
    """
    if typ not in ['aggregated', 'split']:
        if Constants.DEBUG:
            err_msg = 'The mentioned typ is not supported!'
            raise_(NotImplementedError, NotImplementedError(err_msg))
        return False, None

    if time_aggregation not in ['month', 'quarter']:
        if Constants.DEBUG:
            err_msg = 'Given time_aggregation is not supported'
            raise_(NotImplementedError, NotImplementedError(err_msg))
        return False, None

    data_temp = None
    data = data_visits
    if filter_col and len(filter_col) == 2:
        try:
            fc = filter_col[0]
            fv = filter_col[1]
            data_temp = data[data[fc] == fv].copy(deep=True)
        except KeyError:
            if Constants.DEBUG:
                msg = 'filter column {} is not present'.format(filter_col[0])
                raise_(AttributeError, AttributeError(msg))
            else:
                return False, None
    if data_temp is None:
        data_temp = data.copy(deep=True)

    if data_temp.shape[0] == 0:
        if Constants.DEBUG:
            print("The respective filter_col returns not result")
        dwrap = dict_wrap(override=None)
        output = lambda: {}
        return True, dwrap.wrap(functor=output)

    preprocess.extract_timeseries(data=data_temp,
                                  into=time_aggregation,
                                  ts_col='VisitingTimeStart',
                                  encode=True)
    unique_properties = data_temp['PropertyID'].unique()
    count_matrix = data_temp.groupby(
        ['PropertyID', str(time_aggregation) + '_enc'])['VisitedBy'].count()
    # print(count_matrix)
    from copy import deepcopy
    from collections import OrderedDict
    if time_aggregation == 'quarter':
        dtable = OrderedDict({
            'Quarter 1': 0,
            'Quarter 2': 0,
            'Quarter 3': 0,
            'Quarter 4': 0
        })
    elif time_aggregation == 'month':
        dtable = OrderedDict({
            'Jan': 0,
            'Feb': 0,
            'Mar': 0,
            'Apr': 0,
            'May': 0,
            'Jun': 0,
            'Jul': 0,
            'Aug': 0,
            'Sep': 0,
            'Oct': 0,
            'Nov': 0,
            'Dec': 0
        })
    ret = {}
    for each in unique_properties:
        dt = deepcopy(dtable)
        values = [count_matrix[each].get(key, 0) for key in dt]
        dt = OrderedDict(zip(dt.keys(), values))
        ret[each] = dt
    ret = preprocess.make_frame(ret)
    if typ == 'split':
        output = lambda: ret.transpose()[list(dtable.keys())]
    else:
        ret = ret.mean().reset_index()
        ret.columns = ['property_id', 'visit_count']
        output = lambda: ret.set_index('property_id')
    if Constants.DEBUG:
        import sys
        jwrap = json_wrap(override=sys.stdout)
        print(jwrap.wrap(functor=output, indent=4))
        return True, None
    else:
        jwrap = json_wrap(override=None)
        return True, jwrap.wrap(functor=output, indent=4)
Exemplo n.º 28
0
def visitor_stats(n, typ, filter_col=None, plot_type='bar'):
    """
    This first filter the data according to the ```filter``` column
    and then find the individual property statistics of type ```typ```
    for top ```n``` values to yield the plot type of ```plot_type```.

    ~~~~~~~~~~  Examples  ~~~~~~~~~~
    Currents:
        # Top n properties in terms of unique visitors of all properties
        >>> visitor_stats(n, type='unique')

        # Top n properties in terms of total leads generated of all properties
        >>> visitor_stats(n, type='leads')

        # Top n properties in terms of unique visitors of all properties
        # for the mentioned filter_col like specific seller or buyer id.
        >>> visitor_stats(n, type='unique', filter_col=('seller_id', id))
        >>> visitor_stats(n, type='leads', filter_col=('buyer_id', id))

        NOTE: The exact column name may change for the filter_col
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    :param n: number of top values -> int
    :param typ: Type of aggregation (Unique, leads) -> str
    :param filter_col: group of values (column and value) -> tuple
    :param plot_type: Type of the plot
    :return: Status and Wrapper subclass type
    """
    data_temp = None
    if filter_col and len(filter_col) == 2:
        try:
            fc = filter_col[0]
            fv = filter_col[1]
            data_temp = data[data[fc] == fv].copy(deep=True)
        except KeyError:
            if Constants.DEBUG:
                msg = 'filter column {} is not present'.format(filter_col[0])
                raise_(AttributeError, AttributeError(msg))
            else:
                return False, None

    if typ.lower() not in ['unique', 'leads']:
        if Constants.DEBUG:
            msg = "{} is a invalid aggregate function".format(typ)
            raise_(ValueError, ValueError(msg))
        else:
            return False, None
    else:
        typ = typ.lower()

    if data_temp is None:
        data_temp = data.copy(deep=True)

    if typ == 'unique':
        data_temp.sort_values(by='searched_by_user',
                              inplace=True,
                              ascending=False)
        data_temp.reset_index(inplace=True, drop=True)
        data_temp = data_temp[['id', 'searched_by_user']]
        data_temp.columns = ['property_id', 'unique_visits']
    elif typ == 'leads':
        data_temp.sort_values(by='user_taken_action',
                              inplace=True,
                              ascending=False)
        data_temp.reset_index(inplace=True, drop=True)
        data_temp = data_temp[['id', 'user_taken_action']]
        data_temp.columns = ['property_id', 'user_actions']

    # if n > data_temp.shape[0] or n < 0:
    #     if Constants.DEBUG:
    #         msg = "{} is a invalid number of rows to return".format(n)
    #         raise_(ValueError, ValueError(msg))
    #     else:
    #         return False, None
    # else:
    data_temp = data_temp.head(n=n)

    if data_temp.shape[0] == 0:
        if Constants.DEBUG:
            print('The return operation has no entries')
        dwrap = dict_wrap(override=None)
        output = lambda: {}
        return True, dwrap.wrap(functor=output)

    if plot_type == 'bar':
        data_temp.set_index('property_id', inplace=True)
        output = lambda: data_temp.reset_index(drop=True)
    else:
        if Constants.DEBUG:
            print('Plotting type {} is not supported'.format(plot_type))
        return False, None
    # Wrap into a wrapper class and return with status

    # Use file to dump the data
    # fil = open('test.json', 'w')
    # jwrap = json_wrap(override=fil)

    # Return data for API
    if not Constants.DEBUG:
        jwrap = json_wrap(override=None)
        return True, jwrap.wrap(functor=output, indent=4)
    else:
        import sys
        jwrap = json_wrap(override=sys.stdout)
        print(jwrap.wrap(functor=output, indent=4))
        return True, None