Esempio n. 1
0
    def get_topstats(self):
        """
        Generate JSON with Top Scorers & Assists for a specific league/competition
        :return: JSON string
        """
        if self.scorer:
            html = requests.get(self.PRE_LINK + self.league_name + self.POST_LINK + "scorers")
        elif self.assist:
            html = requests.get(self.PRE_LINK + self.league_name + self.POST_LINK + "assists")

        # log("Used Cache: {0}".format(html.from_cache))
        soup = BeautifulSoup(html.content, "lxml")

        try:
            c = soup.find('div', {'class': 'stats-top-scores'}).table.find_all('td')
            data = collections.OrderedDict({})

            for x, i in enumerate(range(0, len(c), 4)):
                if i < 40:                                      # Gets the top 10 players (4 td's per row)
                    # update the dict with scraped content
                    data.update({str(x + 1): [{
                            "rank": x + 1,
                            "name": c[i + 1].text.strip(),
                            "club": c[i + 2].text.strip(),
                            "total": c[i + 3].text.strip()
                        }]
                    })

            return json.dumps(data)

        except AttributeError as e:
            log('No Table found in TopStats ==> ' + str(e))
            return False
Esempio n. 2
0
    def execute_action(cls, context, input):
        util.log('Execute action: %s.%s' %
                 (context.model.__name__, context.action.key_id_str))
        util.log('Arguments: %s' % (context.input))

        def execute_plugins(plugins):
            for plugin in plugins:
                util.log('Running plugin: %s.%s' %
                         (plugin.__module__, plugin.__class__.__name__))
                plugin.run(context)

        if hasattr(context.model, 'get_plugin_groups') and callable(
                context.model.get_plugin_groups):
            try:
                plugin_groups = context.model.get_plugin_groups(context.action)
                if len(plugin_groups):
                    for group in plugin_groups:
                        if len(group.plugins):
                            if group.transactional:
                                orm.transaction(
                                    lambda: execute_plugins(group.plugins),
                                    xg=True)
                            else:
                                execute_plugins(group.plugins)
            except orm.TerminateAction as e:
                pass
            except Exception as e:
                raise
Esempio n. 3
0
def load_ajax():
    """
    AJAX requests to retrieve highlights from Reddit API
    :return: JSON response with the list of highlights content
    """
    cache_db = "highlights_cache.sqlite"
    requests_cache.install_cache('highlights_cache', expire_after=600)          # Cached for 15 mins

    if os.path.isfile(cache_db) and len(request.get_data()) > 0:
        log("Replacing Highlights Cache at: " + str(gmt_time()))
        os.remove(cache_db)

    html = requests.get("https://footylinks.herokuapp.com/rest-api/highlights")     # cache DB is created on requests
    log("Used Cache for Highlights: {0}".format(html.from_cache))

    try:
        json_highlights = html.json()                              # Retrieve JSON string from GET request
    except json.JSONDecodeError:                                   # If url/rest-api/highlights is down for some reason
        highlights_data, size = highlights.get_highlights()        # Provide content without caching
        json_highlights = {
            'highlights': highlights_data,
            'size': size
        }

    # Create Flask response and add headers to optimize delivery
    response = make_response(jsonify({
            'success': 200,
            'list': json_highlights['highlights'],
            'size': json_highlights['size']
        }))
    response.headers['Cache-Control'] = 'public, max-age=600'

    return response
Esempio n. 4
0
def parse_stream_title(title, has_time):
    """
    Parses the post title to extract game time and name (if exists)
    :param title: submission.title
    :param has_time: true if "GMT" in title, false otherwise
    :return: formatted game time and game name
    """
    try:
        # Parses the submission.title, try to account for all human errors (not reading subreddit rules...)
        if has_time:
            game_name = title[title.index(']') + 1:].strip()
            game_time = title[:title.index(']') + 1].strip().replace(
                "[", "").replace("]", "")
            print(game_time, game_name)
        elif not has_time:
            game_name = title
            game_time = ''
        else:
            # Stream post is not formatted properly so skip it
            game_time, game_name = '', ''

        return game_time, game_name

    except ValueError as e:
        # Something went wrong in parsing the title (malformed or not valid) -> skip to the next title
        log("Error msg: " + str(e))
        return False, False
Esempio n. 5
0
def standings_ajax():
    """
    AJAX requests to find Standings
    :return: JSON response with the list of fixtures content
    """
    cache_db = "highlights_cache.sqlite"
    requests_cache.install_cache('standings_cache',
                                 expire_after=1800)  # Cached for 15 mins

    # request.form.get('scheduler')            # Make this true
    if os.path.isfile(cache_db) and request.form.get('scheduler'):
        log("Replacing Standings Cache")
        os.remove(cache_db)

    league = request.form['league']
    html = requests.get(
        "https://footylinks.herokuapp.com/stats/rest-api/standings?league=" +
        league)
    log("Used Cache for Standings " + league + ": {0}".format(html.from_cache))

    try:
        standing = html.json()  # Retrieve JSON string from GET request
    except json.JSONDecodeError:  # If url/rest-api/highlights is down for some reason
        stats = Standings(league=league)  # Provide content without caching
        standing = stats.get_standings()

    # Create Flask response and add headers to optimize delivery
    response = make_response(json.dumps(standing))
    response.headers[
        'Cache-Control'] = 'public, max-age=1800'  # Cached for 15 min

    return response
Esempio n. 6
0
def fixtures_ajax():
    """
    AJAX requests to find fixtures
    :return: JSON response with the list of fixtures content
    """
    cache_db = "fixtures_cache.sqlite"
    requests_cache.install_cache('fixtures_cache',
                                 expire_after=1800)  # Cached for 15 mins

    if os.path.isfile(cache_db) and request.form.get(
            'scheduler'
    ):  # and value of league = epl? first one in the worker's recaching call
        log("Replacing Fixtures Cache")
        os.remove(cache_db)

    league = request.form['league']
    html = requests.get(
        "https://footylinks.herokuapp.com/stats/rest-api/fixtures?league=" +
        league)
    log("Used Cache for Fixtures " + league + ": {0}".format(html.from_cache))

    try:
        fixtures = html.json()  # Retrieve JSON string from GET request
    except json.JSONDecodeError:  # If url/rest-api/highlights is down for some reason
        fixtures = Fixtures(league=league)  # Provide content without caching
        fixtures = fixtures.get_fixtures()

    # Create Flask response and add headers to optimize delivery
    response = make_response(json.dumps(fixtures))
    response.headers[
        'Cache-Control'] = 'public, max-age=1800'  # Cached for 15 min

    return response
Esempio n. 7
0
def parse_highlight_title(game_name, t):
    try:
        # Parses the submission.title, try to account for all human errors (not reading subreddit rules...)
        if "–" in game_name:
            highlight_league = t[t.index('–') + 1:t.index(',')].strip()
            name = format_match_names(game_name[:game_name.index('–')])

        elif "-" in game_name:
            highlight_league = t[t.index('-') + 1:t.index(',')].strip()
            name = format_match_names(game_name[:game_name.index('-')])

        elif "," in game_name:
            first = t[t.index(',') + 1:]
            highlight_league = first[:first.index(',')].strip()
            name = format_match_names(game_name[:game_name.index(',')])

        else:
            # Post is not formatted properly so skip it
            name, highlight_league = '', ''

        return name, highlight_league

    except ValueError as e:
        log("Error msg: " + str(e))
        # Something went wrong in parsing the title (malformed or not valid) -> skip to the next title
        return False, False
Esempio n. 8
0
 def process_action_input(cls, context, input):
     input_error = {}
     for key, argument in context.action.arguments.items():
         value = input.get(key, util.Nonexistent)
         if argument and hasattr(argument, 'value_format'):
             try:
                 value = argument.value_format(value)
                 if value is util.Nonexistent:
                     continue  # If the formatter returns util.Nonexistent, that means we have to skip setting context.input[key] = value.
                 if hasattr(
                         argument, '_validator'
                 ) and argument._validator:  # _validator is a custom function that is available by orm.
                     argument._validator(argument, value)
                 context.input[key] = value
             except orm.PropertyError as e:
                 if e.message not in input_error:
                     input_error[e.message] = []
                 input_error[e.message].append(
                     key
                 )  # We group argument exceptions based on exception messages.
             except Exception as e:
                 # If this is not defined it throws an error.
                 if 'non_property_error' not in input_error:
                     input_error['non_property_error'] = []
                 input_error['non_property_error'].append(
                     key
                 )  # Or perhaps, 'non_specific_error', or something simmilar.
                 util.log(e, 'exception')
     if len(input_error):
         raise InputError(input_error)
Esempio n. 9
0
 def respond(self):
   models = io.Engine.get_schema()
   kinds = ['0', '6', '83', '5', '35', '36', '62', '61', '39', '38', '60', '8', '57', '77', '10', '15', '16', '17', '18', '19', '49', '47']
   namespaces = metadata.get_namespaces()
   indexes = []
   keys_to_delete = []
   if self.request.get('kinds'):
     kinds = self.request.get('kinds').split(',')
   
   util.log('DELETE KINDS %s' % kinds)
   
   ignore = ['15', '16', '17', '18', '19']
   @orm.tasklet
   def wipe(kind):
     util.log(kind)
     @orm.tasklet
     def generator():
       model = models.get(kind)
       if model and not kind.startswith('__'):
         keys = yield model.query().fetch_async(keys_only=True)
         keys_to_delete.extend(keys)
         indexes.append(search.Index(name=kind))
         for namespace in namespaces:
           util.log(namespace)
           keys = yield model.query(namespace=namespace).fetch_async(keys_only=True)
           keys_to_delete.extend(keys)
           indexes.append(search.Index(name=kind, namespace=namespace))
     yield generator()
   if self.request.get('delete'):
     futures = []
     for kind in kinds:
       if kind not in ignore:
         futures.append(wipe(kind))
     orm.Future.wait_all(futures)
   if self.request.get('and_system'):
     futures = []
     for kind in kinds:
       if kind in ignore:
         futures.append(wipe(kind))
     orm.Future.wait_all(futures)
   if keys_to_delete:
     datastore.Delete([key.to_old_key() for key in keys_to_delete])
   indexes.append(search.Index(name='catalogs'))
   # empty catalog index!
   for index in indexes:
     while True:
       document_ids = [document.doc_id for document in index.get_range(ids_only=True)]
       if not document_ids:
         break
       try:
         index.delete(document_ids)
       except:
         pass
   mem.flush_all()
Esempio n. 10
0
def sportyhl(href):
    # Link to highlight request gets cached for 1 hour
    requests_cache.install_cache('sportyhl_cache', expire_after=3600)

    html = requests.get(href)
    log("Highlights Cache - " + href + ": {0}".format(html.from_cache))
    soup = BeautifulSoup(html.content, "lxml")

    vid = soup.find('iframe').get('src')

    return vid
Esempio n. 11
0
def fullmatchesandshows(href):
    build_link1 = "https://cdn.video.playwire.com/"
    build_link2 = "/videos/"
    build_link3 = "/video-sd.mp4"

    # Link to highlight request gets cached for 1 hour
    requests_cache.install_cache('fullmatchandshows_cache', expire_after=3600)

    html = requests.get(href)
    log("Highlights Cache - " + href + ": {0}".format(html.from_cache))
    soup = BeautifulSoup(html.content, "lxml")

    # Extracting the Video source and removing as many Ads as possible
    try:
        video_link = soup.find(
            "script", {"src": "//cdn.playwire.com/bolt/js/zeus/embed.js"})
        link_content = video_link.get('data-config').split("/")
        full_link = build_link1 + link_content[3] + build_link2 + link_content[
            6] + build_link3

        return full_link
    except AttributeError:
        pass
        # log("Not from Playwire.com")

    try:
        return soup.find('div', {
            'class': 'spoiler'
        }).find_next_siblings('div')[0].iframe.get('data-lazy-src')
    except AttributeError:
        pass
        # log("Not from Streamable.com")

    try:
        vid = soup.find('div', {
            'class': 'acp_content'
        }).video.source.get('src')
        if "drive.google.com" not in vid:
            return vid
        else:
            return None
    except AttributeError:
        pass
        # log("Not an embedded Playwire URL")

    try:
        return soup.find('div', {
            'class': 'acp_content'
        }).find('iframe').get('data-lazy-src')
    except AttributeError:
        pass
        # log("Not an embedded link into an iframe...")

    return None  # Couldn't figure out how to get the video source, this shouldn't happen
Esempio n. 12
0
 def generator():
   model = models.get(kind)
   if model and not kind.startswith('__'):
     keys = yield model.query().fetch_async(keys_only=True)
     keys_to_delete.extend(keys)
     indexes.append(search.Index(name=kind))
     for namespace in namespaces:
       util.log(namespace)
       keys = yield model.query(namespace=namespace).fetch_async(keys_only=True)
       keys_to_delete.extend(keys)
       indexes.append(search.Index(name=kind, namespace=namespace))
Esempio n. 13
0
def ajax_get_links():
    if request.args.get('post_id', False) is not False:
        reddit_link = request.args.get('post_id')
        final_links = highlights.parse_submission(reddit_link)
        log(final_links)

    else:
        final_links = False

    return jsonify({
        'links': final_links
    })
Esempio n. 14
0
 def process_blob_output(cls):
     blobs = mem.temp_get(settings.BLOBKEYMANAGER_KEY, None)
     if blobs is not None:
         save_blobs = blobs.get('collect', None)
         delete_blobs = blobs.get('delete', None)
         if delete_blobs:
             if save_blobs:
                 for blob in save_blobs:
                     if blob in delete_blobs:
                         delete_blobs.remove(blob)
             if delete_blobs:
                 util.log('DELETED %s BLOBS.' % len(delete_blobs))
                 blobstore.delete(delete_blobs)
Esempio n. 15
0
def motdtvblogspot(href):
    requests_cache.install_cache('motdtvblogspot_cache', expire_after=3600)

    html = requests.get(href)
    log("Used Cache: {0}".format(html.from_cache))
    soup = BeautifulSoup(html.content, "lxml")
    try:
        full_link = soup.find("iframe").get('src')
        return full_link
    except:
        pass
        # log("MOTDTV: No video link found")
        return None
Esempio n. 16
0
File: views.py Progetto: lxynew1/DMS
def get_user_json():
    return_dict = {"return_info": '', 'return_code': '200'}
    if request.args is None:
        return_dict['return_code'] = '50004'
        return_dict['return_info'] = '传入参数为空'
        return json.dumps(return_dict,
                          ensure_ascii=False)  # ensure_ascii=False才能输出中文
    # 获取传入的参数
    GET_UID = request.args.to_dict()
    UID = GET_UID.get('UID')
    # 对参数进行操作
    # CALENDARS = USER_CALENDAR.query.filter_by(UID=UID).all()
    log(comments(USER_CALENDAR, UID))
    return_dict['return_info'] = comments(USER_CALENDAR, UID)
    return json.dumps(return_dict, ensure_ascii=False)
Esempio n. 17
0
def vidme(href):
    requests_cache.install_cache('vidme_cache', expire_after=3600)

    html = requests.get(href)
    log("Used Cache: {0}".format(html.from_cache))
    soup = BeautifulSoup(html.content, "lxml")
    try:
        full_link = soup.find("meta", {
            "name": "twitter:player:stream"
        }).get("content")
        return full_link
    except:
        pass
        # log("Vidme: No video link found")
        return None
Esempio n. 18
0
def get_wsgi_config():
    '''Config for wsgi instance. Prepares all variables and routes for webapp2 WSGI constructor'''

    global __WSGI_CONFIG

    if __WSGI_CONFIG is not None:
        return __WSGI_CONFIG

    TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'template'), )

    for a in webclient_settings.ACTIVE_CONTROLLERS:
        importlib.import_module('webclient.controller.%s' % a)

    # It won't change, so convert it to a tuple to save memory.
    ROUTES = tuple(get_routes())

    JINJA_GLOBALS.update({
        'uri_for': webapp2.uri_for,
        'ROUTES': ROUTES,
        'settings': settings,
        'webclient_settings': webclient_settings
    })
    TEMPLATE_LOADER = FileSystemLoader(TEMPLATE_DIRS)

    util.log('Webapp2 started, compiling stuff')

    WSGI_CONFIG = {}
    WSGI_CONFIG.update(webclient_settings.WEBAPP2_EXTRAS)
    WSGI_CONFIG['webapp2_extras.jinja2'] = {
        'template_path': 'templates',
        'globals': JINJA_GLOBALS,
        'filters': JINJA_FILTERS,
        'environment_args': {
            'extensions': ['jinja2.ext.autoescape', 'jinja2.ext.loopcontrols'],
            'autoescape': True,
            'loader': TEMPLATE_LOADER,
            'cache_size': webclient_settings.TEMPLATE_CACHE
        }
    }

    __WSGI_CONFIG = dict(WSGI_CONFIG=WSGI_CONFIG,
                         ROUTES=ROUTES,
                         JINJA_GLOBALS=JINJA_GLOBALS,
                         JINJA_FILTERS=JINJA_FILTERS,
                         TEMPLATE_DIRS=TEMPLATE_DIRS,
                         TEMPLATE_LOADER=TEMPLATE_LOADER)
    return __WSGI_CONFIG
Esempio n. 19
0
File: views.py Progetto: lxynew1/DMS
def store_user_calendar():
    DATA = request.args.to_dict()
    ID = str(uuid.uuid4())
    TITLE = str(DATA.get('title'))
    START = str(DATA.get('start'))
    END = str(DATA.get('end'))
    UID = str(DATA.get('UID'))
    URL = str(DATA.get('URL')) if len(str(DATA.get('URL'))) == 0 else '#'
    log(id, TITLE, START, END, UID, URL)
    UC = USER_CALENDAR(id=ID,
                       UID=UID,
                       TITLE=TITLE,
                       START=START,
                       END=END,
                       URL=URL)
    db.session.add(UC)
    db.session.commit()
    # log(id, TITLE, START, END, UID)
    return_dict = {'return_code': '200', 'return_info': '', 'result': False}
    return json.dumps(return_dict, ensure_ascii=False)
Esempio n. 20
0
def load_ajax():
    """
    AJAX requests to retrieve livestreams from Reddit API
    :return: JSON response with the list of livestreams matches
    """
    cache_db = "livestreams_cache.sqlite"
    requests_cache.install_cache('livestreams_cache', expire_after=600)          # Cached for 10 mins

    if os.path.isfile(cache_db) and len(request.get_data()) > 0:
        log("Replacing Livestreams Cache at: " + str(gmt_time()))
        os.remove(cache_db)

    html = requests.get("https://footylinks.herokuapp.com/rest-api/livestreams")     # cache DB is created on requests
    log("Used Cache for Livestreams: {0}".format(html.from_cache))

    try:
        json_livestreams = html.json()                              # Retrieve JSON string from GET request
    except json.JSONDecodeError as e:
        log("JSON DECODE ERROR Caught - Not cached")
        streams_data, size = livestreams.get_streams()
        json_livestreams = {
            'livestreams': streams_data,
            'size': size
        }

    # Create Flask response and add headers to optimize delivery
    response = make_response(jsonify({
            'success': 200,
            'list': json_livestreams['livestreams'],
            'size': json_livestreams['size'],
            'gmt_time': gmt_time()
    }))
    response.headers['Cache-Control'] = 'public, max-age=600'

    return response
Esempio n. 21
0
 def run(cls, input):
     util.log('Payload: %s' % input)
     context = Context()
     cls.process_blob_input(
         input
     )  # This is the most efficient strategy to handle blobs we can think of!
     try:
         cls.init()
         cls.get_models(context)
         cls.get_model(context, input)
         cls.get_action(context, input)
         cls.process_action_input(context, input)
         cls.execute_action(context, input)
         cls.process_blob_state(
             'success'
         )  # Delete and/or save all blobs that have to be deleted and/or saved on success.
     except Exception as e:
         cls.process_blob_state(
             'error'
         )  # Delete and/or save all blobs that have to be deleted and/or saved or error.
         throw = True
         if isinstance(e.message, dict):
             # Here we handle our exceptions.
             for key, value in e.message.iteritems():
                 context.error(key, value)
                 throw = False
         if isinstance(e, datastore_errors.Timeout):
             context.error('transaction', 'timeout')
             throw = False
         if isinstance(e, datastore_errors.TransactionFailedError):
             context.error('transaction', 'failed')
             throw = False
         if throw:
             raise  # Here we raise all other unhandled exceptions!
     finally:
         cls.process_blob_output(
         )  # Delete all blobs that are marked to be deleted no matter what happens!
     return context.output
Esempio n. 22
0
def ourmatch(href):
    build_link1 = "https://cdn.video.playwire.com/"
    build_link2 = "/videos/"
    build_link3 = "/video-sd.mp4"

    requests_cache.install_cache('ourmatch_cache', expire_after=3600)

    html = requests.get(href)
    log("Used Cache: {0}".format(html.from_cache))
    soup = BeautifulSoup(html.content, "lxml")
    try:
        video_link = soup.find("div", {
            "class": "video-tabs-labels"
        }).script.text
        video_link = video_link[video_link.index("cdn.video.playwire.com/"):
                                video_link.index("/video-sd")].split("/")
        video_id1 = video_link[1]
        video_id2 = video_link[3]
        full_link = build_link1 + video_id1 + build_link2 + video_id2 + build_link3
        return full_link
    except:
        pass
        # log("OurMatch: No video link found")
        return None
Esempio n. 23
0
def landSellSearchTableAdder():
    column_order = ["FID",
                    "REGION_NAME",
                    "NOTICE_NUM",
                    "LAND_LOCATION",
                    "TOTAL_AREA",
                    "CONSTRUCTION_AREA",
                    "PLAN_BUILD_AREA",
                    # "USE_NAME",
                    "PLAN_USE_CUSTOM",
                    "FLOOR_AREA_RATIO",
                    "GREENING_RATE",
                    "BUSSINESS_PROPORTION",
                    "BUILDING_DENSITY",
                    "ASSIGNMENT_METHOD",
                    "ASSIGNMENT_LIMIT",
                    "DATE_BEGIN",
                    "DATE_END",
                    "PRICE_BEGIN",
                    "SECURITY_DEPOSIT",
                    "NOTICE_USE"
                    ]
    if request.method == 'POST':
        land_search_data = json.loads(request.get_data())
        # log("land_search_data:", land_search_data)
        draw = land_search_data['draw']
        start = land_search_data['start']
        length = land_search_data['length']
        page = land_search_data['page']
        s_day = land_search_data['s_day']
        search_value = land_search_data['search_value']
        if s_day == '':
            s_day = '1900-01-01'
        else:
            mon = s_day[0:2]
            day = s_day[3:5]
            year = s_day[6:10]
            s_day = year + '-' + mon + '-' + day
        e_day = land_search_data['e_day']
        if e_day == '':
            e_day = '2300-01-01'
        else:
            mon = e_day[0:2]
            day = e_day[3:5]
            year = e_day[6:10]
            e_day = year + '-' + mon + '-' + day
        # log(e_day, s_day)
        region_name_list = [w.REGION_CODE for w in DICT_REGION.query.filter(
            DICT_REGION.REGION_NAME.in_(land_search_data['region_name_list'])).all()]
        land_location = '%' + land_search_data['land_location'] + '%'
        assignment_method_list = land_search_data['assignment_method_list']  # 出让方式
        assignment_limit_list = land_search_data['assignment_limit_list']  # 出让年限
        # plan_use_list = [w.USE_CODE for w in DICT_LAND_USE.query.filter(
        #     DICT_LAND_USE.USE_NAME.in_(land_search_data['plan_use_list'])).all()]  # 用途分类
        plan_use_list = land_search_data['plan_use_list']
        log(plan_use_list)
        order = land_search_data['order']  # 排序方式

        # 查询条件定义
        # 行政区+公告时间+土地坐落
        if region_name_list == [] or str(region_name_list).__contains__('全部'):
            region_rules = and_(*[LAND_SELL_INFO.REGION_CODE.like(w) for w in ['%']],
                                LAND_SELL_INFO.LAND_LOCATION.like(land_location), LAND_SELL_INFO.DATE_BEGIN >= s_day,
                                LAND_SELL_INFO.DATE_END <= e_day)
        else:
            region_rules = and_(*[LAND_SELL_INFO.REGION_CODE.in_(w) for w in [region_name_list]],
                                LAND_SELL_INFO.LAND_LOCATION.like(land_location), LAND_SELL_INFO.DATE_BEGIN >= s_day,
                                LAND_SELL_INFO.DATE_END <= e_day)

        # 出让方式
        if assignment_method_list == [] or str(assignment_method_list).__contains__('全部'):
            assignment_method_rules = and_(*[LAND_SELL_INFO.ASSIGNMENT_METHOD.like(w) for w in ['%']])
        else:
            assignment_method_rules = and_(*[LAND_SELL_INFO.ASSIGNMENT_METHOD.in_(w) for w in [assignment_method_list]])

        # 出让年限
        if assignment_limit_list == [] or str(assignment_limit_list).__contains__('全部'):
            assignment_limit_rules = and_(*[LAND_SELL_INFO.ASSIGNMENT_LIMIT.like(w) for w in ['%']])
        else:
            assignment_limit_rules = and_(*[LAND_SELL_INFO.ASSIGNMENT_LIMIT.in_(w) for w in [assignment_limit_list]])

        # 用途分类
        if plan_use_list == [] or str(assignment_limit_list).__contains__('全部'):
            plan_use_rules = and_(*[LAND_SELL_INFO.PLAN_USE.like(w) for w in ['%']])
        else:
            plan_use_rules = and_(*[LAND_SELL_INFO.PLAN_USE_CUSTOM.in_(w) for w in [plan_use_list]])
            log(plan_use_rules)
        if (order[0].get('dir') == 'desc'):  # 确定排序方法
            recordsFiltered = LAND_SELL_INFO.query \
                .filter(region_rules,
                        assignment_method_rules,
                        assignment_limit_rules,
                        plan_use_rules).count()  # 记录数
            # 这边用paginate来获取请求页码的数据
            pagination = LAND_SELL_INFO.query \
                .join(DICT_REGION,
                      LAND_SELL_INFO.REGION_CODE == DICT_REGION.REGION_CODE) \
                .with_entities(
                LAND_SELL_INFO.FID,
                LAND_SELL_INFO.NOTICE_NUM,
                LAND_SELL_INFO.LAND_LOCATION,
                LAND_SELL_INFO.TOTAL_AREA,
                LAND_SELL_INFO.CONSTRUCTION_AREA,
                LAND_SELL_INFO.PLAN_BUILD_AREA,
                LAND_SELL_INFO.NOTICE_USE,
                # DICT_LAND_USE.USE_NAME,
                LAND_SELL_INFO.PLAN_USE_CUSTOM,
                LAND_SELL_INFO.PLAN_USE_CUSTOM,
                LAND_SELL_INFO.FLOOR_AREA_RATIO,
                LAND_SELL_INFO.GREENING_RATE,
                LAND_SELL_INFO.BUSSINESS_PROPORTION,
                LAND_SELL_INFO.BUILDING_DENSITY,
                LAND_SELL_INFO.ASSIGNMENT_METHOD,
                LAND_SELL_INFO.ASSIGNMENT_LIMIT,
                LAND_SELL_INFO.DATE_BEGIN,
                LAND_SELL_INFO.DATE_END,
                DICT_REGION.REGION_NAME,
                LAND_SELL_INFO.PRICE_BEGIN,
                LAND_SELL_INFO.SECURITY_DEPOSIT,
                LAND_SELL_INFO.CREATE_BY,
                LAND_SELL_INFO.CREATE_TIME,
                LAND_SELL_INFO.MODIFIER_BY,
                LAND_SELL_INFO.MODIFIER_TIME
            ).filter(region_rules,
                     assignment_method_rules,
                     assignment_limit_rules,
                     plan_use_rules) \
                .order_by(desc(column_order[order[0].get('column')])) \
                .paginate(page=page, per_page=length, error_out=True)
        else:
            recordsFiltered = LAND_SELL_INFO.query \
                .filter(region_rules,
                        assignment_method_rules,
                        assignment_limit_rules,
                        plan_use_rules).count()  # 记录数
            # 这边用paginate来获取请求页码的数据
            pagination = LAND_SELL_INFO.query \
                .join(DICT_REGION,
                      LAND_SELL_INFO.REGION_CODE == DICT_REGION.REGION_CODE) \
                .with_entities(
                LAND_SELL_INFO.FID,
                LAND_SELL_INFO.NOTICE_NUM,
                LAND_SELL_INFO.LAND_LOCATION,
                LAND_SELL_INFO.TOTAL_AREA,
                LAND_SELL_INFO.CONSTRUCTION_AREA,
                LAND_SELL_INFO.PLAN_BUILD_AREA,
                LAND_SELL_INFO.NOTICE_USE,
                # DICT_LAND_USE.USE_NAME,
                LAND_SELL_INFO.PLAN_USE_CUSTOM,
            LAND_SELL_INFO.PLAN_USE_CUSTOM,
                LAND_SELL_INFO.FLOOR_AREA_RATIO,
                LAND_SELL_INFO.GREENING_RATE,
                LAND_SELL_INFO.BUSSINESS_PROPORTION,
                LAND_SELL_INFO.BUILDING_DENSITY,
                LAND_SELL_INFO.ASSIGNMENT_METHOD,
                LAND_SELL_INFO.ASSIGNMENT_LIMIT,
                LAND_SELL_INFO.DATE_BEGIN,
                LAND_SELL_INFO.DATE_END,
                DICT_REGION.REGION_NAME,
                LAND_SELL_INFO.PRICE_BEGIN,
                LAND_SELL_INFO.SECURITY_DEPOSIT,
                LAND_SELL_INFO.CREATE_BY,
                LAND_SELL_INFO.CREATE_TIME,
                LAND_SELL_INFO.MODIFIER_BY,
                LAND_SELL_INFO.MODIFIER_TIME
            ).filter(region_rules,
                     assignment_method_rules,
                     assignment_limit_rules,
                     plan_use_rules) \
                .order_by(asc(column_order[order[0].get('column')])) \
                .paginate(page=page, per_page=length, error_out=True)

        recordsTotal = recordsFiltered
        objs = pagination.items

        # 把页面对象变成数组,用数组作为数据源
        data = []
        num = 0
        for obj in objs:
            num = num + 1
            objSearchValue(num, search_value, data, obj)
        res = {
            # 看文档 这四个都是必要的参数,还有个error可传可不传
            'draw': draw,
            'recordsTotal': recordsTotal,
            'recordsFiltered': recordsFiltered,
            'data': data,
            'search_value': search_value
        }
        # log(jsonify(res))
        return jsonify(res)
Esempio n. 24
0
 def execute_plugins(plugins):
     for plugin in plugins:
         util.log('Running plugin: %s.%s' %
                  (plugin.__module__, plugin.__class__.__name__))
         plugin.run(context)
Esempio n. 25
0
def highlightsfootball(href):
    html = requests.get(href)
    soup = BeautifulSoup(html.content, "lxml")
    log(soup.prettify())
Esempio n. 26
0
 def respond(self):
     util.log('Begin IOEngineRun execute')
     input = self.get_input()
     io.Engine.run(input)
     util.log('End IOEngineRun execute')
Esempio n. 27
0
 def respond(self, action_id):
     util.log('Begin IOEngineCronRun execute')
     io.Engine.run({'action_model': '83', 'action_id': action_id})
     util.log('End IOEngineCronRun execute')