コード例 #1
0
ファイル: reporting.py プロジェクト: Smarsh/norc
def save_csv(csv, fn):
    log.info("Savin' CSV to '%s'" % (fn))
    fh = open(fn, 'w')
    for line in csv:
        fh.write(','.join(map(str, line)))
        fh.write('\n')
    fh.close()
コード例 #2
0
ファイル: actions.py プロジェクト: codenrhoden/rdopkg
def copr_build(srpm_url, release, dist, package, version,
               update_file=None, copr_owner='jruzicka', skip_build=False):
    if skip_build:
        log.info("\nSkipping copr build due to -s/--skip-build")
    else:
        copr = _copr.RdoCoprs()
        copr_name = _copr.rdo_copr_name(release, dist)
        repo_url = copr.get_repo_url(release, dist)
        web_url = copr.get_builds_url(release, dist)
        log.info("\n{t.bold}copr:{t.normal} {owner} / {copr}\n"
                 "{t.bold}SRPM:{t.normal} {srpm}\n"
                 "{t.bold}repo:{t.normal} {repo}\n"
                 "{t.bold}web: {t.normal} {web}".format(
                 owner=copr_owner,
                 copr=copr_name,
                 srpm=srpm_url,
                 repo=repo_url,
                 web=web_url,
                 t=log.term))
        copr.new_build(srpm_url, release, dist, watch=True)
    build = rdoupdate.core.Build(id=_copr.copr_fetcher_id(srpm_url),
                                 repo=release,
                                 dist=dist,
                                 source='copr-jruzicka')
    _show_update_entry(build)
    if update_file:
        _update.dump_build(build, update_file)
コード例 #3
0
ファイル: views.py プロジェクト: knv/NewsBlur
def opml_upload(request):
    xml_opml = None
    message = "OK"
    code = 1
    payload = {}
    
    if request.method == 'POST':
        if 'file' in request.FILES:
            logging.info(" ---> [%s] ~FR~SBOPML upload starting..." % request.user)
            file = request.FILES['file']
            xml_opml = file.read()
            opml_importer = OPMLImporter(xml_opml, request.user)
            folders = opml_importer.process()

            feeds = UserSubscription.objects.filter(user=request.user).values()
            payload = dict(folders=folders, feeds=feeds)
            logging.info(" ---> [%s] ~FR~SBOPML Upload: ~SK%s~SN~SB~FR feeds" % (request.user, len(feeds)))
            
            request.session['import_from_google_reader'] = False
        else:
            message = "Attach an .opml file."
            code = -1
            
    data = json.encode(dict(message=message, code=code, payload=payload))
    return HttpResponse(data, mimetype='text/plain')
コード例 #4
0
ファイル: rename_lang_xls.py プロジェクト: esozh/eso_zh_ui
def rename_file(filename, suffix):
    """按规则重命名文件"""
    category = get_category(filename)
    path_name = os.path.dirname(filename)
    new_name = os.path.join(path_name, 'ESO_%s_%s.xlsx' % (category_names[category], suffix))
    log.info('%s -> %s' % (filename, new_name))
    shutil.move(filename, new_name)
コード例 #5
0
ファイル: views.py プロジェクト: dkeskar/NewsBlur
def exception_change_feed_link(request):
    feed_id = request.POST['feed_id']
    feed = get_object_or_404(Feed, pk=feed_id)
    feed_link = request.POST['feed_link']
    code = -1
    
    if not feed.has_page_exception and not feed.has_feed_exception:
        logging.info(" ***********> [%s] Incorrect feed link change: %s" % (request.user, feed))
        # This Forbidden-403 throws an error, which sounds pretty good to me right now
        return HttpResponseForbidden()
    
    feed_address = feedfinder.feed(feed_link)
    if feed_address:
        code = 1
        feed.has_page_exception = False
        feed.active = True
        feed.fetched_once = False
        feed.feed_link = feed_link
        feed.feed_address = feed_address
        feed.next_scheduled_update = datetime.datetime.now()
        try:
            feed.save()
        except IntegrityError:
            original_feed = Feed.objects.get(feed_address=feed_address)
            original_feed.next_scheduled_update = datetime.datetime.now()
            original_feed.has_page_exception = False
            original_feed.active = True
            original_feed.save()
            merge_feeds(original_feed.pk, feed.pk)
    
    return {'code': code}
    
    
コード例 #6
0
ファイル: views.py プロジェクト: tosh/NewsBlur
def mark_story_as_read(request):
    story_ids = request.REQUEST.getlist('story_id')
    feed_id = int(request.REQUEST['feed_id'])
    
    usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
    if not usersub.needs_unread_recalc:
        usersub.needs_unread_recalc = True
        usersub.save()
        
    data = dict(code=0, payload=story_ids)
    
    if len(story_ids) > 1:
        logging.debug(" ---> [%s] Read %s stories in feed: %s" % (request.user, len(story_ids), usersub.feed))
    else:
        logging.debug(" ---> [%s] Read story in feed: %s" % (request.user, usersub.feed))
        
    for story_id in story_ids:
        story = MStory.objects(story_feed_id=feed_id, story_guid=story_id)[0]
        now = datetime.datetime.utcnow()
        m = MUserStory(story=story, user_id=request.user.pk, feed_id=feed_id, read_date=now)
        try:
            m.save()
        except OperationError:
            logging.info(' ---> [%s] *** Marked story as read: Duplicate Story -> %s' % (request.user, story_id))
    
    return data
コード例 #7
0
ファイル: route.py プロジェクト: bk7477890/cobra
    def __init__(self):
        log.info('Initialization HTTP Server')

        @self.web.route('/', methods=['GET'])
        def homepage():
            log.debug('In homepage Route')
            return render_template('index.html')

        @self.web.route('/blank')
        def blank():
            log.debug('In blank Route')
            return render_template('blank.html')

        @self.web.route('/add', methods=['POST'])
        def add():
            log.debug('In add Route')
            return jsonify(code=1001, msg='success', id=123)

        @self.web.route('/status/<int:id>', methods=['GET'])
        def status(id):
            log.debug('In status Route')
            return jsonify(code=1001, msg='success', status='running')

        @self.web.errorhandler(404)
        def page_not_found(e):
            log.debug('In 404 Route')
            return render_template('404.html'), 404
コード例 #8
0
ファイル: views.py プロジェクト: francois2metz/NewsBlur
def save_classifier(request):
    post = request.POST
    logging.info(" ---> [%s] ~FGSaving classifier: ~FW%s" % (request.user, post))
    feed_id = int(post['feed_id'])
    feed = get_object_or_404(Feed, pk=feed_id)
    code = 0
    message = 'OK'
    payload = {}

    # Mark subscription as dirty, so unread counts can be recalculated
    usersub = UserSubscription.objects.get(user=request.user, feed=feed)
    if not usersub.needs_unread_recalc or not usersub.is_trained:
        usersub.needs_unread_recalc = True
        usersub.is_trained = True
        usersub.save()
        
        
    def _save_classifier(ClassifierCls, content_type):
        classifiers = {
            'like_'+content_type: 1, 
            'dislike_'+content_type: -1,
            'remove_like_'+content_type: 0,
            'remove_dislike_'+content_type: 0,
        }
        for opinion, score in classifiers.items():
            if opinion in post:
                post_contents = post.getlist(opinion)
                for post_content in post_contents:
                    if not post_content: continue
                    classifier_dict = {
                        'user_id': request.user.pk,
                        'feed_id': feed_id,
                        'defaults': {
                            'score': score
                        }
                    }
                    if content_type in ('author', 'tag', 'title'):
                        classifier_dict.update({content_type: post_content})
                    
                    classifier, created = ClassifierCls.objects.get_or_create(**classifier_dict)
                    if score == 0:
                        classifier.delete()
                    elif classifier.score != score:
                        if score == 0:
                            if ((classifier.score == 1 and opinion.startswith('remove_like'))
                                or (classifier.score == -1 and opinion.startswith('remove_dislike'))):
                                classifier.delete()
                        else:
                            classifier.score = score
                            classifier.save()
                        
    _save_classifier(MClassifierAuthor, 'author')
    _save_classifier(MClassifierTag, 'tag')
    _save_classifier(MClassifierTitle, 'title')
    _save_classifier(MClassifierFeed, 'feed')
    
    logging.info(" ---> [%s] ~FGFeed training: ~SB%s" % (request.user, feed))

    response = dict(code=code, message=message, payload=payload)
    return response
コード例 #9
0
ファイル: views.py プロジェクト: dkeskar/NewsBlur
def exception_change_feed_address(request):
    feed_id = request.POST['feed_id']
    feed = get_object_or_404(Feed, pk=feed_id)
    feed_address = request.POST['feed_address']
    
    if not feed.has_feed_exception and not feed.has_page_exception:
        logging.info(" ***********> [%s] Incorrect feed address change: %s" % (request.user, feed))
        return HttpResponseForbidden()
        
    feed.has_feed_exception = False
    feed.active = True
    feed.fetched_once = False
    feed.feed_address = feed_address
    feed.next_scheduled_update = datetime.datetime.now()
    try:
        feed.save()
    except IntegrityError:
        original_feed = Feed.objects.get(feed_address=feed_address)
        original_feed.next_scheduled_update = datetime.datetime.now()
        original_feed.has_feed_exception = False
        original_feed.active = True
        original_feed.save()
        merge_feeds(original_feed.pk, feed.pk)
    
    return {'code': 1}
コード例 #10
0
ファイル: forms.py プロジェクト: 76/NewsBlur
 def clean(self):
     username = self.cleaned_data.get('username', '').lower()
     password = self.cleaned_data.get('password', '')
     
     user = User.objects.filter(Q(username__iexact=username) | Q(email=username))
     if user:
         user = user[0]
     if username and user:
         self.user_cache = authenticate(username=user.username, password=password)
         if self.user_cache is None:
             blank = blank_authenticate(user.username)
             if blank:
                 user.set_password(user.username)
                 user.save()
             self.user_cache = authenticate(username=user.username, password=user.username)
         if self.user_cache is None:
             email_user = User.objects.filter(email=username)
             if email_user:
                 email_user = email_user[0]
                 self.user_cache = authenticate(username=email_user.username, password=password)
                 if self.user_cache is None:
                     blank = blank_authenticate(email_user.username)
                     if blank:
                         email_user.set_password(email_user.username)
                         email_user.save()
                     self.user_cache = authenticate(username=email_user.username, password=email_user.username)
         if self.user_cache is None:
             logging.info(" ***> [%s] Bad Login" % username)
             raise forms.ValidationError(_("Whoopsy-daisy, wrong password. Try again."))
     elif username and not user:
         raise forms.ValidationError(_("That username is not registered. Please try again."))
         
     return self.cleaned_data
コード例 #11
0
ファイル: views.py プロジェクト: mrcrabby/NewsBlur
def exception_change_feed_address(request):
    feed_id = request.POST['feed_id']
    feed = get_object_or_404(Feed, pk=feed_id)
    feed_address = request.POST['feed_address']
    
    if not feed.has_feed_exception and not feed.has_page_exception:
        logging.info(" ***> [%s] ~BRIncorrect feed address change: ~SB%s" % (request.user, feed))
        return HttpResponseForbidden()
        
    feed.has_feed_exception = False
    feed.active = True
    feed.fetched_once = False
    feed.feed_address = feed_address
    feed.next_scheduled_update = datetime.datetime.utcnow()
    retry_feed = feed
    duplicate_feed_id = feed.save()
    if duplicate_feed_id:
        original_feed = Feed.objects.get(pk=duplicate_feed_id)
        retry_feed = original_feed
        original_feed.next_scheduled_update = datetime.datetime.utcnow()
        original_feed.has_feed_exception = False
        original_feed.active = True
        original_feed.save()
        merge_feeds(original_feed.pk, feed.pk)
    
    logging.user(request, "~FRFixing feed exception by address: ~SB%s" % (retry_feed.feed_address))
    retry_feed.update()
    
    usersub = UserSubscription.objects.get(user=request.user, feed=retry_feed)
    usersub.calculate_feed_scores(silent=False)
    
    feeds = {feed.pk: usersub.canonical(full=True)}
    return {'code': 1, 'feeds': feeds}
コード例 #12
0
ファイル: views.py プロジェクト: vvarp/NewsBlur
def mark_story_as_unread(request):
    story_id = request.POST["story_id"]
    feed_id = int(request.POST["feed_id"])

    try:
        usersub = UserSubscription.objects.select_related("feed").get(user=request.user, feed=feed_id)
    except Feed.DoesNotExist:
        duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
        if duplicate_feed:
            try:
                usersub = UserSubscription.objects.get(user=request.user, feed=duplicate_feed[0].feed)
            except Feed.DoesNotExist:
                return dict(code=-1)

    if not usersub.needs_unread_recalc:
        usersub.needs_unread_recalc = True
        usersub.save()

    data = dict(code=0, payload=dict(story_id=story_id))
    logging.info(" ---> [%s] ~FY~SBUnread~SN story in feed: %s" % (request.user, usersub.feed))

    story = MStory.objects(story_feed_id=feed_id, story_guid=story_id)[0]
    now = datetime.datetime.utcnow()
    m = MUserStory.objects(story=story, user_id=request.user.pk, feed_id=feed_id)
    m.delete()

    return data
コード例 #13
0
ファイル: views.py プロジェクト: vvarp/NewsBlur
def save_feed_chooser(request):
    from apps.feed_import.models import queue_new_feeds

    approved_feeds = [int(feed_id) for feed_id in request.POST.getlist("approved_feeds")][:64]
    activated = 0
    usersubs = UserSubscription.objects.filter(user=request.user)

    for sub in usersubs:
        try:
            if sub.feed.pk in approved_feeds:
                sub.active = True
                activated += 1
                sub.save()
            elif sub.active:
                sub.active = False
                sub.save()
        except Feed.DoesNotExist:
            pass

    queue_new_feeds(request.user)

    logging.info(
        " ---> [%s] ~BB~FW~SBActivated standard account: ~FC%s~SN/~SB%s" % (request.user, activated, usersubs.count())
    )
    return {"activated": activated}
コード例 #14
0
ファイル: action.py プロジェクト: djipko/rdopkg
 def run_action(self, action, args=None):
     if not args:
         args = {}
     if not action.atomic:
         log.info(log.term.bold("## %s" % action.name))
     for carg in action.const_args:
         args[carg] = action.const_args[carg]
     action_fun = action.action_fun
     if not action_fun:
         action_fun, action_module = self._get_action_fun(action.name)
         if not action_fun:
             raise exception.ActionFunctionNotAvailable(action=action.name)
     argspec = inspect.getargspec(action_fun)
     fun_args = []
     if argspec.defaults:
         n_defaults = len(argspec.defaults)
     else:
         n_defaults = 0
     n_required = len(argspec.args) - n_defaults
     for i, arg in enumerate(argspec.args):
         if arg in args:
             fun_args.append(args[arg])
         else:
             if i < n_required:
                 raise exception.RequiredActionArgumentNotAvailable(
                     action=action.name, arg=arg)
             else:
                 fun_args.append(argspec.defaults[i - n_required])
     return action_fun(*fun_args)
コード例 #15
0
ファイル: forms.py プロジェクト: AnushPrem/NewsBlur
 def clean(self):
     username = self.cleaned_data.get('username', '').lower()
     password = self.cleaned_data.get('password', '')
     
     user = User.objects.filter(Q(username__iexact=username) | Q(email=username))
     if username and user:
         self.user_cache = authenticate(username=user[0].username, password=password)
         if self.user_cache is None:
             email_username = User.objects.filter(email=username)
             if email_username:
                 self.user_cache = authenticate(username=email_username[0].username, password=password)
             if self.user_cache is None:
                 # logging.info(" ***> [%s] Bad Login: TRYING JK-LESS PASSWORD" % username)
                 jkless_password = password.replace('j', '').replace('k', '')
                 self.user_cache = authenticate(username=username, password=jkless_password)
                 if self.user_cache is None:
                     logging.info(" ***> [%s] Bad Login" % username)
                     raise forms.ValidationError(_("Whoopsy-daisy. Try again."))
                 else:
                     # Supreme f**k-up. Accidentally removed the letters J and K from
                     # all user passwords. Re-save with correct password.
                     logging.info(" ***> [%s] FIXING JK-LESS PASSWORD" % username)
                     self.user_cache.set_password(password)
                     self.user_cache.save()
             elif not self.user_cache.is_active:
                 raise forms.ValidationError(_("This account is inactive."))
     elif username and not user:
         raise forms.ValidationError(_("That username is not registered. Create an account with it instead."))
         
     return self.cleaned_data
コード例 #16
0
ファイル: resolver.py プロジェクト: berenm/gentulu
    def cache(system_url):
        url = urlparse(system_url)
        if url.scheme == "":
            if url.path.startswith(resolver.CACHE_PATH):
                system_url = system_url.replace(resolver.CACHE_PATH, "file://")
                url = urlparse(system_url)

        if url.scheme in ["http", "https", "file"]:
            url_path, url_file = os.path.split(url.path)
            url_path = url_path.replace("/", os.sep)

            if url.hostname is None:
                url_path = "localhost" + url_path
            else:
                url_path = url.hostname + url_path

            local_path = os.path.join(resolver.CACHE_PATH, url_path)
            local_file = os.path.join(local_path, url_file)

            if not os.path.exists(local_path):
                os.makedirs(local_path)

            if local_file.endswith(os.sep):
                local_file += "INDEX"

            if not os.path.exists(local_file):
                log.info("retrieving %s..." % system_url)

                urlretrieve(system_url, local_file)

            return local_file

        return None
コード例 #17
0
ファイル: actions.py プロジェクト: ktdreyer/rdopkg
def get_package_env(version=None, release=None, dist=None,
                    patches_branch=None, local_patches_branch=None):
    branch = git.current_branch()
    if branch.endswith('-patches'):
        branch = branch[:-8]
        if git.branch_exists(branch):
            log.info("This looks like -patches branch. Assuming distgit branch: "
                    "%s" % branch)
            git.checkout(branch)
        else:
            raise exception.InvalidUsage(
                why="This action must be run on a distgit branch.")
    args = {
        'package': guess.package(),
        'branch': branch,
    }
    if not release or not dist:
        _release, _dist = guess.osreleasedist(branch, default=(None, None))
        if not release and _release:
            args['release'] = _release
        if not dist and _dist:
            args['dist'] = _dist
    osdist = guess.osdist()
    if osdist == 'RHOS':
        log.info("RHOS package detected.")
        args['fedpkg'] = ['rhpkg']
    if not patches_branch:
        patches_branch = guess.patches_branch(branch, pkg=args['package'],
                                              osdist=osdist)
    args['patches_branch'] = patches_branch
    if not local_patches_branch:
        args['local_patches_branch'] = patches_branch.partition('/')[2]
    if not version:
        args['version'] = guess.current_version()
    return args
コード例 #18
0
ファイル: views.py プロジェクト: francois2metz/NewsBlur
def save_feed_chooser(request):
    approved_feeds = [int(feed_id) for feed_id in request.POST.getlist('approved_feeds')][:64]
    activated = 0
    usersubs = UserSubscription.objects.filter(user=request.user)
    
    for sub in usersubs:
        try:
            if sub.feed.pk in approved_feeds:
                sub.active = True
                activated += 1
                sub.save()
                sub.feed.count_subscribers()
            elif sub.active:
                sub.active = False
                sub.save()
        except Feed.DoesNotExist:
            pass
            
    
    logging.info(' ---> [%s] ~BB~FW~SBActivated standard account: ~FC%s~SN/~SB%s' % (request.user, 
                                                                   activated, 
                                                                   usersubs.count()))        
    request.user.profile.queue_new_feeds()
    request.user.profile.refresh_stale_feeds(exclude_new=True)

    return {'activated': activated}
コード例 #19
0
ファイル: tasks.py プロジェクト: bodegard/NewsBlur
    def run(self, feed_pks, **kwargs):
        from apps.rss_feeds.models import Feed
        from apps.statistics.models import MStatistics
        r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)

        mongodb_replication_lag = int(MStatistics.get('mongodb_replication_lag', 0))
        compute_scores = bool(mongodb_replication_lag < 10)
        
        profiler = DBProfilerMiddleware()
        profiler_activated = profiler.process_celery()
        if profiler_activated:
            mongo_middleware = MongoDumpMiddleware()
            mongo_middleware.process_celery(profiler)
            redis_middleware = RedisDumpMiddleware()
            redis_middleware.process_celery(profiler)
        
        options = {
            'quick': float(MStatistics.get('quick_fetch', 0)),
            'updates_off': MStatistics.get('updates_off', False),
            'compute_scores': compute_scores,
            'mongodb_replication_lag': mongodb_replication_lag,
        }
        
        if not isinstance(feed_pks, list):
            feed_pks = [feed_pks]
            
        for feed_pk in feed_pks:
            feed = Feed.get_by_id(feed_pk)
            if not feed or feed.pk != int(feed_pk):
                logging.info(" ---> ~FRRemoving feed_id %s from tasked_feeds queue, points to %s..." % (feed_pk, feed and feed.pk))
                r.zrem('tasked_feeds', feed_pk)
            if feed:
                feed.update(**options)
                if profiler_activated: profiler.process_celery_finished()
コード例 #20
0
ファイル: views.py プロジェクト: francois2metz/NewsBlur
def load_starred_stories(request):
    user = get_user(request)
    offset = int(request.REQUEST.get('offset', 0))
    limit = int(request.REQUEST.get('limit', 10))
    page = int(request.REQUEST.get('page', 0))
    if page: offset = limit * page
        
    mstories = MStarredStory.objects(user_id=user.pk).order_by('-starred_date')[offset:offset+limit]
    stories = Feed.format_stories(mstories)
    
    for story in stories:
        story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
        now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
        story['short_parsed_date'] = format_story_link_date__short(story_date, now)
        story['long_parsed_date'] = format_story_link_date__long(story_date, now)
        starred_date = localtime_for_timezone(story['starred_date'], user.profile.timezone)
        story['starred_date'] = format_story_link_date__long(starred_date, now)
        story['read_status'] = 1
        story['starred'] = True
        story['intelligence'] = {
            'feed': 0,
            'author': 0,
            'tags': 0,
            'title': 0,
        }
    
    logging.info(" ---> [%s] ~FCLoading starred stories: ~SB%s stories" % (request.user, len(stories)))
    
    return dict(stories=stories)
コード例 #21
0
ファイル: models.py プロジェクト: TKupels/NewsBlur
 def switch_feed(self, new_feed, old_feed):
     # Rewrite feed in subscription folders
     try:
         user_sub_folders = UserSubscriptionFolders.objects.get(user=self.user)
     except Exception, e:
         logging.info(" *** ---> UserSubscriptionFolders error: %s" % e)
         return
コード例 #22
0
ファイル: actions.py プロジェクト: ktdreyer/rdopkg
def new_version_setup(new_version=None):
    args = {}
    if not new_version:
        ub = guess.upstream_branch()
        if not git.ref_exists('refs/remotes/%s' % ub):
            msg=("Upstream branch not found: %s\n"
                 "Can't guess latest version.\n\n"
                 "a) provide new version (git tag) yourself\n"
                 "   $ rdopkg new-version 1.2.3\n\n"
                 "b) add upstream git remote:\n"
                 "   $ git remote add -f upstream GIT_URL\n"
                 % ub)
            raise exception.CantGuess(msg=msg)
        new_version = git.get_latest_tag(ub)
        args['new_version'] = new_version
        log.info("Latest version detected from %s: %s" % (ub, new_version))
    args['changes'] = ['Update to upstream %s' % new_version]
    args['new_patches_base'] = new_version
    spec = specfile.Spec()
    rpm_version = spec.get_tag('Version')
    new_rpm_version, new_milestone = specfile.version_parts(new_version)
    args['new_rpm_version'] = new_rpm_version
    if new_milestone:
        args['new_milestone'] = new_milestone
    if rpm_version != new_rpm_version:
        if new_milestone:
            args['new_release'] = '0.1'
        else:
            args['new_release'] = '1'
    return args
コード例 #23
0
ファイル: views.py プロジェクト: tosh/NewsBlur
def load_feed_statistics(request):
    stats = dict()
    feed_id = request.GET['feed_id']
    feed = get_object_or_404(Feed, pk=feed_id)
    feed.save_feed_story_history_statistics()
    
    # Dates of last and next update
    stats['last_update'] = relative_timesince(feed.last_update)
    stats['next_update'] = relative_timeuntil(feed.next_scheduled_update)
    
    # Minutes between updates
    update_interval_minutes, random_factor = feed.get_next_scheduled_update()
    stats['update_interval_minutes'] = update_interval_minutes
    
    # Stories per month - average and month-by-month breakout
    average_stories_per_month, story_count_history = feed.average_stories_per_month, feed.story_count_history
    stats['average_stories_per_month'] = average_stories_per_month
    stats['story_count_history'] = story_count_history and json.decode(story_count_history)
    
    # Subscribers
    stats['subscriber_count'] = feed.num_subscribers
    
    logging.info(" ---> [%s] Statistics: %s" % (request.user, feed))
    
    return stats
コード例 #24
0
ファイル: models.py プロジェクト: TKupels/NewsBlur
 def collect_orphan_feeds(cls, user):
     us = cls.objects.filter(user=user)
     try:
         usf = UserSubscriptionFolders.objects.get(user=user)
     except UserSubscriptionFolders.DoesNotExist:
         return
     us_feed_ids = set([sub.feed_id for sub in us])
     folders = json.decode(usf.folders)
     
     def collect_ids(folders, found_ids):
         for item in folders:
             # print ' --> %s' % item
             if isinstance(item, int):
                 # print ' --> Adding feed: %s' % item
                 found_ids.add(item)
             elif isinstance(item, dict):
                 # print ' --> Descending folder dict: %s' % item.values()
                 found_ids.update(collect_ids(item.values(), found_ids))
             elif isinstance(item, list):
                 # print ' --> Descending folder list: %s' % len(item)
                 found_ids.update(collect_ids(item, found_ids))
         # print ' --> Returning: %s' % found_ids
         return found_ids
     found_ids = collect_ids(folders, set())
     diff = len(us_feed_ids) - len(found_ids)
     if diff > 0:
         logging.info(" ---> Collecting orphans on %s. %s feeds with %s orphans" % (user.username, len(us_feed_ids), diff))
         orphan_ids = us_feed_ids - found_ids
         folders.extend(list(orphan_ids))
         usf.folders = json.encode(folders)
         usf.save()
コード例 #25
0
ファイル: actions.py プロジェクト: codenrhoden/rdopkg
def rebase_nightly(upstream_branch, local_patches_branch, branch=None,
                   lame_patches=None):
    log.info("Rebasing upstream %s" % upstream_branch)
    nightly.rebase_nightly(upstream_branch,
                           lame_patches=lame_patches,
                           patches_branch=local_patches_branch,
                           distgit_branch=branch)
コード例 #26
0
ファイル: views.py プロジェクト: francois2metz/NewsBlur
def feed_autocomplete(request):
    query = request.GET['term']
    feeds = []
    for field in ['feed_address', 'feed_link', 'feed_title']:
        if not feeds:
            feeds = Feed.objects.filter(**{
                '%s__icontains' % field: query,
                'num_subscribers__gt': 1,
            }).exclude(
                Q(**{'%s__icontains' % field: 'token'}) |
                Q(**{'%s__icontains' % field: 'private'})
            ).only(
                'feed_title', 
                'feed_address', 
                'num_subscribers'
            ).order_by('-num_subscribers')[:5]
    
    logging.info(" ---> [%s] ~FRAdd Search: ~SB%s ~FG(%s matches)" % (request.user, query, len(feeds),))
    
    feeds = [{
        'value': feed.feed_address,
        'label': feed.feed_title,
        'num_subscribers': feed.num_subscribers,
    } for feed in feeds]
    
    return feeds
コード例 #27
0
ファイル: models.py プロジェクト: TKupels/NewsBlur
    def switch_feed(cls, user_id, old_feed_id, new_feed_id):
        r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
        # r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
        p = r.pipeline()
        # p2 = r2.pipeline()
        story_hashes = cls.get_stories(user_id, old_feed_id, r=r)
        
        for story_hash in story_hashes:
            _, hash_story = MStory.split_story_hash(story_hash)
            new_story_hash = "%s:%s" % (new_feed_id, hash_story)
            read_feed_key = "RS:%s:%s" % (user_id, new_feed_id)
            p.sadd(read_feed_key, new_story_hash)
            # p2.sadd(read_feed_key, new_story_hash)
            p.expire(read_feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
            # p2.expire(read_feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)

            read_user_key = "RS:%s" % (user_id)
            p.sadd(read_user_key, new_story_hash)
            # p2.sadd(read_user_key, new_story_hash)
            p.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
            # p2.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
        
        p.execute()
        # p2.execute()
        
        if len(story_hashes) > 0:
            logging.info(" ---> %s read stories" % len(story_hashes))
コード例 #28
0
ファイル: models.py プロジェクト: TKupels/NewsBlur
 def switch_hash(cls, feed_id, old_hash, new_hash):
     r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
     # r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
     p = r.pipeline()
     # p2 = r2.pipeline()
     UNREAD_CUTOFF = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
     
     usersubs = UserSubscription.objects.filter(feed_id=feed_id, last_read_date__gte=UNREAD_CUTOFF)
     logging.info(" ---> ~SB%s usersubs~SN to switch read story hashes..." % len(usersubs))
     for sub in usersubs:
         rs_key = "RS:%s:%s" % (sub.user.pk, feed_id)
         read = r.sismember(rs_key, old_hash)
         if read:
             p.sadd(rs_key, new_hash)
             # p2.sadd(rs_key, new_hash)
             p.expire(rs_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
             # p2.expire(rs_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
             
             read_user_key = "RS:%s" % sub.user.pk
             p.sadd(read_user_key, new_hash)
             # p2.sadd(read_user_key, new_hash)
             p.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
             # p2.expire(read_user_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
     
     p.execute()
コード例 #29
0
ファイル: tasks.py プロジェクト: carlchen0928/rssEngine
    def run(self, feed_pks, **kwargs):
        try:
            from apps.rss_feeds.models import Feed
            #from apps.statistics.models import MStatistics
            r = redis.Redis(connection_pool=settings.REDIS_FEED_POOL)
            #mongodb_replication_lag = int(MStatistics.get('mongodb_replication_lag', 0))
            #compute_scores = bool(mongodb_replication_lag < 10)

            options = {
            #    'quick': float(MStatistics.get('quick_fetch', 0)),
            #    'compute_scores': compute_scores,
            #    'mongodb_replication_lag': mongodb_replication_lag,
            }

            if not isinstance(feed_pks, list):
                feed_pks = [feed_pks]

            for feed_pk in feed_pks:
                feed = Feed.get_by_id(feed_pk)
                if not feed or feed.pk != int(feed_pk):
                    logging.info(" ---> ~FRRemoving feed_id %s from tasked_feeds queue, points to %s..." % (feed_pk, feed and feed.pk))
                    r.zrem('tasked_feeds', feed_pk)
                if feed:
                    feed.update(**options)
        except Exception, e:
            logging.error(str(e)+\
                traceback.format_exc()+'\n'+\
                'error from:  UpdateFeeds\n')
            if settings.SEND_ERROR_MAILS:
                mail_admins("Error in UpdateFeeds",str(e)+'\n'+traceback.format_exc())
コード例 #30
0
ファイル: __init__.py プロジェクト: LoveWalter/cobra
    def run(self, target=None, tid=None):
        if target is None:
            log.critical("Please set --target param")
            sys.exit()
        if tid is None:
            log.critical("Please set --tid param")
            sys.exit()

        # Statistic Code
        p = subprocess.Popen(
            ['cloc', target], stdout=subprocess.PIPE)
        (output, err) = p.communicate()
        rs = output.split("\n")
        for r in rs:
            r_e = r.split()
            if len(r_e) > 3 and r_e[0] == 'SUM:':
                t = CobraTaskInfo.query.filter_by(id=tid).first()
                if t is not None:
                    t.code_number = r_e[4]
                    try:
                        db.session.add(t)
                        db.session.commit()
                        log.info("Statistic code number done")
                    except Exception as e:
                        log.error("Statistic code number failed" + str(e.message))
コード例 #31
0
def fetch(url=None):
    url = url or server + '/api/v3/addons/search/?sort=created&type=extension'
    log.info('Fetching: {}'.format(url))
    res = requests.get(url)
    res.raise_for_status()

    res_json = res.json()
    for addon in res_json['results']:
        serialize_addon_result(addon['id'], addon)

        compat = server + '/api/v3/addons/addon/{}/feature_compatibility/'.format(
            addon['id'])
        log.info('Fetching: {}'.format(compat))
        res = requests.get(compat)
        if res.status_code == 404:
            continue
        serialize_addon_result(addon['id'], res.json(), type='compat')

    if res_json['next']:
        fetch(res_json['next'])
コード例 #32
0
    def compact(self):
        folders = json.decode(self.folders)

        def _compact(folder):
            new_folder = []
            for item in folder:
                if isinstance(item, int) and item not in new_folder:
                    new_folder.append(item)
                elif isinstance(item, dict):
                    for f_k, f_v in item.items():
                        new_folder.append({f_k: _compact(f_v)})
            return new_folder

        new_folders = _compact(folders)
        logging.info(" ---> Compacting from %s to %s" % (folders, new_folders))
        new_folders = json.encode(new_folders)
        logging.info(" ---> Compacting from %s to %s" %
                     (len(self.folders), len(new_folders)))
        self.folders = new_folders
        self.save()
コード例 #33
0
    def get_osd(self):

        log.info('api osd command')
        api = self.construct_api()
        log.info(api)
        response = self.auth.request('GET', api, verify=False)
        response.raise_for_status()
        pretty_response = json.dumps(response.json(), indent=2)
        log.debug('pretty json response from  api')
        log.debug(pretty_response)
        self.json_osd = json.loads(pretty_response)

        log.debug('api with command')
        api = self.construct_api() + '/command'
        log.debug(api)
        response2 = self.auth.request('GET', api, verify=False)
        response2.raise_for_status()
        pretty_response2 = json.dumps(response2.json(), indent=2)
        log.debug('pretty json response from  api')
        log.debug(pretty_response2)
コード例 #34
0
def gen_all_pic():
    """
    生成全部图片
    :return:
    """
    from service import conf
    gen_count = conf['base']['count_per_process']

    index = 0
    while index < gen_count:
        log.info("-" * 20 + " generate new picture {index}/{gen_count}".format(
            index=index, gen_count=gen_count) + "-" * 20)
        dump_data = gen_pic()
        if dump_data:
            gen_label_data(dump_data)
            show_data(dump_data)
            # 生成voc
            if conf['base']['gen_voc']:
                gen_voc(dump_data)
            index += 1
コード例 #35
0
ファイル: models.py プロジェクト: venkat0708/NewsBlur
    def query(cls, text, max_subscribers=5):
        try:
            cls.ES().default_indices = cls.index_name()
            cls.ES().indices.refresh()
        except pyes.exceptions.NoServerAvailable:
            logging.debug(" ***> ~FRNo search server available.")
            return []
        
        if settings.DEBUG:
            max_subscribers = 1
        
        logging.info("~FGSearch ~FCfeeds~FG: ~SB%s" % text)
        q = pyes.query.BoolQuery()
        q.add_should(pyes.query.MatchQuery('address', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
        q.add_should(pyes.query.MatchQuery('link', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
        q.add_should(pyes.query.MatchQuery('title', text, analyzer="simple", cutoff_frequency=0.0005, minimum_should_match="75%"))
        q = pyes.Search(q, min_score=1)
        results = cls.ES().search(query=q, size=max_subscribers, doc_types=[cls.type_name()], sort="num_subscribers:desc")

        return results
コード例 #36
0
ファイル: server.py プロジェクト: pradeep5267/label-studio
def api_completions(task_id):
    """ Delete or save new completion to output_dir with the same name as task_id
    """
    global c

    if request.method == 'POST':
        completion = request.json
        completion.pop('state', None)  # remove editor state
        db.save_completion(task_id, completion)
        log.info(msg='Completion saved', extra={'task_id': task_id, 'output': request.json})
        return make_response(json.dumps({'id': random.randint(0, 1000)}), 201)

    elif request.method == 'DELETE':
        if c.get('allow_delete_completions', False):
            db.delete_completion(task_id)
            return make_response('deleted', 204)
        else:
            return make_response('Completion removing is not allowed in server config', 422)
    else:
        return make_response('Incorrect request method', 500)
コード例 #37
0
ファイル: models.py プロジェクト: victorvscn/NewsBlur
    def query(cls, feed_ids, query, order, offset, limit):
        cls.create_elasticsearch_mapping()
        cls.ES.indices.refresh()

        sort = "date:desc" if order == "newest" else "date:asc"
        string_q = pyes.query.StringQuery(query, default_operator="AND")
        feed_q = pyes.query.TermsQuery('feed_id', feed_ids)
        q = pyes.query.BoolQuery(must=[string_q, feed_q])
        results = cls.ES.search(q,
                                indices=cls.index_name(),
                                doc_types=[cls.type_name()],
                                partial_fields={},
                                sort=sort,
                                start=offset,
                                size=limit)
        logging.info(
            " ---> ~FG~SNSearch ~FCstories~FG for: ~SB%s~SN (across %s feed%s)"
            % (query, len(feed_ids), 's' if len(feed_ids) != 1 else ''))

        return [r.get_id() for r in results]
コード例 #38
0
    def candidate_dict(self):
        if self._candidate_dict is None:
            assert len(self.all_predicates) > 0
            assert self.propbank_reader is not None
            assert self.nombank_reader is not None
            assert self.corenlp_reader is not None
            log.info('Building candidate dict from Propbank and Nombank')
            self._candidate_dict = CandidateDict(
                propbank_reader=self.propbank_reader,
                nombank_reader=self.nombank_reader,
                corenlp_reader=self.corenlp_reader,
                max_dist=self.max_candidate_dist)

            for predicate in self.all_predicates:
                self._candidate_dict.add_candidates(
                    predicate.pred_pointer,
                    include_non_head_entity=self.include_non_head_entity)
            log.info('Done')

        return self._candidate_dict
コード例 #39
0
 def getMangaDataByPage(self, page):
     info("info", "excrawler.doCrawl", "爬取第" + str(page) + "页开始")
     #获取本子列表
     gidlist = self.getListByPage(page)
     #是否到达最后一页
     if len(gidlist) == 0:
         return None
     info("info", "excrawler.doCrawl", "已获取本子列表")
     res = self.getDataFromApi(gidlist)
     info("info", "excrawler.doCrawl", "已获取本子详细信息")
     gmetadata = res['gmetadata']
     info("info", "excrawler.doCrawl", "爬取第" + str(page) + "页结束")
     return gmetadata
コード例 #40
0
ファイル: server.py プロジェクト: zeroows/label-studio
def api_completions(task_id):
    """ Delete or save new completion to output_dir with the same name as task_id
    """
    global c

    if request.method == 'POST':
        completion = request.json
        completion.pop('state', None)  # remove editor state
        db.save_completion(task_id, completion)
        log.info(msg='Completion saved', extra={'task_id': task_id, 'output': request.json})
        return answer(201, 'ok')

    elif request.method == 'DELETE':
        if c.get('allow_delete_completions', False):
            db.delete_completion(task_id)
            return answer(204, 'deleted')
        else:
            return answer(422, 'Completion removing is not allowed in server config')
    else:
        return answer(500, 'Incorrect request method')
コード例 #41
0
    def build_rich_predicates(self,
                              use_corenlp_token=True,
                              labeled_arg_only=False):
        assert len(self.all_predicates) > 0
        if len(self.all_rich_predicates) > 0:
            log.warning('Overriding existing rich predicates')
            self.all_rich_predicates = []

        log.info('Building rich predicates with {}'.format(
            'CoreNLP tokens' if use_corenlp_token else 'PTB tokens'))
        for predicate in self.all_predicates:
            rich_predicate = RichPredicate.build(
                predicate,
                corenlp_reader=self.corenlp_reader,
                use_lemma=True,
                use_entity=True,
                use_corenlp_tokens=use_corenlp_token,
                labeled_arg_only=labeled_arg_only)
            self.all_rich_predicates.append(rich_predicate)
        log.info('Done')
コード例 #42
0
    def set_repo(self):

        log.info("setting repo for osds")

        repo_url = (
            "https://%s:%[email protected]/enterprise-testing/Ceph-1.3-Ubuntu-14.04-20150813.t.0/repos/debian/OSD"
            % (self.username, self.password))
        gpg_url = "https://download.inktank.com/keys/release.asc"

        osd_hostname = []

        for each in self.osds:
            osd_hostname.append(str(each.hostname))

        self.repo = ("ceph-deploy repo --repo-url " + repo_url +
                     " --gpg-url " + gpg_url + " OSD " +
                     " ".join(osd_hostname))

        log.debug(self.repo)
        os.system(self.repo)
コード例 #43
0
def boyer_moore_horspool_search(blob, word, table):
    blob_view = memoryview(blob)
    word_view = memoryview(word)
    n, m = len(blob), len(word)
    end = n - m
    results = []
    i = 0
    cnt = 0
    while i < end:
        if match_word(blob_view, word_view, i, m):
            prev_line_end = find_previous_line_end(blob_view, i)
            cur_line_end = find_current_line_end(blob_view, i + m, n)
            line = blob_view[prev_line_end:cur_line_end].tobytes()
            results.append(line)
            i = cur_line_end
        else:
            i += table[blob_view[i + m]]
        cnt += 1
    log.info('ループ回数: %d' % cnt)
    return results
コード例 #44
0
def query_file_head(url):
    headers = {"User-Agent": "pan.baidu.com"}
    res = requests.head(url, headers=headers)
    # md5_val = res.headers.get('Content-MD5', "")
    last_loc_url = url
    recursive_cnt = 10
    if res.status_code == 302:
        loc_url = res.headers.get('Location', '')
        last_loc_url = loc_url
        while recursive_cnt > 0 and res.status_code == 302 and loc_url:
            recursive_cnt = recursive_cnt - 1
            res = requests.head(loc_url, headers=headers)
            last_loc_url = loc_url
            loc_url = res.headers.get('Location', '')
    # print("recursive_cnt:", recursive_cnt)
    # print("header:", res.headers)
    # print("last_loc_url:", last_loc_url)
    # print("md5_val:", md5_val)
    logger.info("query_file_head status_code:{}".format(res.status_code))
    return last_loc_url
コード例 #45
0
 def parse_file(self, test_file):
     check_file = self.pfm.check_file_type(test_file)
     if check_file == "True":
         lines_in_file = self.pfm.get_lines_from_file(test_file)
         log.info("Lines in File: %s" % lines_in_file)
         largest_words = self.pfm.get_largest_word(lines_in_file)
         log.info("Largest words: %s" % largest_words)
         reversed_words = self.pfm.reverse_words(largest_words)
         log.info("Reversed words: %s" % reversed_words)
         return True
     else:
         log.info(check_file)
         return False
コード例 #46
0
def pan_mkdir(access_token, filepath):
    url_path = 'file'
    params = {"method": 'create', "access_token": access_token}
    datas = {"path": filepath, "size": 0, "isdir": 1, "rtype": 0}
    headers = {"User-Agent": "pan.baidu.com"}
    rs = requests.post("%s/%s" % (POINT, url_path),
                       params=params,
                       data=datas,
                       headers=headers)
    # print("content:", rs.content)
    jsonrs = rs.json()
    # print(jsonrs)
    err_no = jsonrs.get("errno", None)
    logger.info("restapi pan_mkdir:{}, err_no:{}".format(filepath, err_no))
    if err_no:
        err_msg = jsonrs.get("err_msg", "")
        if not err_msg:
            err_msg = PAN_ERROR_CODES.get(err_no, "")
            jsonrs["err_msg"] = err_msg
    return jsonrs
コード例 #47
0
def get_share_info(share_id, special_short_url, randsk):
    point = "{protocol}://{domain}".format(protocol=PAN_SERVICE['protocol'],
                                           domain="pan.baidu.com/api")
    url_path = 'shorturlinfo'
    url = "%s/%s" % (point, url_path)
    headers = {"User-Agent": "pan.baidu.com"}
    params = {
        "shareid": share_id,
        "shorturl": special_short_url,
        "spd": randsk
    }
    rs = requests.get(url, params=params, headers=headers, verify=False)
    jsonrs = rs.json()
    err_no = jsonrs["errno"]
    logger.info("restapi get_share_info share_id:{}, err_no:{}".format(
        share_id, err_no))
    if err_no:
        jsonrs["errno"] = 0
    logger.info("get_share_info:{}".format(jsonrs))
    return jsonrs
コード例 #48
0
    def get_metadata(file_path):
        '''获取媒体元数据。
        :param: file_path(str): 媒体文件路径。
        '''
        file_path = file_path.strip()
        # @decorator.Timekeep()

        @decorator.executor
        # @decorator.Executor()
        def get_metadata(file_path):
            return ['ffprobe', '-v', 'quiet', '-show_format',
                    '-show_streams', '-print_format', 'json', file_path]

        result = get_metadata(file_path)
        log.info('get_metadata result', result.get('result'))
        if result.get('returncode') == 0:
            metadata = json.loads(result.get('result'))
        else:
            raise TypeError('%s is not JSONable' % type(result))
        return metadata
コード例 #49
0
def df_to_gspread(spreadsheet_name, sheet_name, df, mfilter, columns=None):
    """
    Update a google spreadsheet based on a pandas dataframe row

    Args:
        spreadsheet_name:   name of the document
        sheet_name:         name of the sheet inside the document
        df:                 pandas dataframe
        mfilter:            rows that will be updated
        columns:            which columns to update
    """

    # Get worksheet
    wks = get_gdrive_sheet(spreadsheet_name, sheet_name)

    # If no columns are passed, update them all
    if columns is None:
        columns = df.columns.tolist()

    # Make sure columns is a list
    if not isinstance(columns, list):
        columns = [columns]

    # Extract range from coordinates and filter
    coordinates = get_coordinates(df).loc[mfilter, columns]

    if isinstance(coordinates, pd.Series):
        mrange = f"{coordinates.iloc[0]}:{coordinates.iloc[-1]}"
    else:
        mrange = f"{coordinates.iloc[0, 0]}:{coordinates.iloc[-1, -1]}"

    # Filter data to be updated
    values = df.loc[mfilter, columns].values.tolist()

    # Make sure that values is a list of lists
    if not isinstance(values[0], list):
        values = [values]

    # Update values in gspreadsheet
    log.info(f"Updating {spreadsheet_name}/{sheet_name}/{mrange}")
    wks.update(mrange, values)
コード例 #50
0
ファイル: parse.py プロジェクト: yaoyi2008/cobra
 def block_code(self, block_position):
     """
     Get block code
     :param block_position:
             0:up:1:down
     :return:
     """
     functions = self.functions()
     if functions:
         block_start = 0
         block_end = 0
         for function_name, function_value in functions.items():
             log.info("Function S-E: {0} ({1} - {2})".format(
                 function_name, function_value['start'],
                 function_value['end']))
             # log.debug('{0} < {1} < {2}'.format(function['start'], self.line, function['end']))
             if int(function_value['start']) < int(self.line) < int(
                     function_value['end']):
                 if block_position == 0:
                     block_start = function_value['start']
                     block_end = int(self.line) - 1
                 elif block_position == 1:
                     block_start = int(self.line) + 1
                     block_end = function_value['end']
         # get param block code
         param = [
             'sed', "-n", "{0},{1}p".format(block_start, block_end),
             self.file_path
         ]
         p = subprocess.Popen(param, stdout=subprocess.PIPE)
         result = p.communicate()
         if len(result[0]):
             param_block_code = result[0]
             if param_block_code == '':
                 param_block_code = False
         else:
             param_block_code = False
         return param_block_code
     else:
         log.info("Not found functions")
         return False
コード例 #51
0
def main():
    cd = sys.path[0]
    src_path = os.path.join(cd, '../translation/lang')
    translation_path = os.path.join(cd, '../translation/lang/translated')

    # load en from xlsx
    csv_list, _ = get_csv_from_xls(src_path, 'en')

    # load zh from xlsx
    zh_csv_list, ui_xls_file = get_csv_from_xls(translation_path, 'zh')

    # load en ui from translated xlsx
    if ui_xls_file is not None:
        csv_list_ui, _ = get_csv_from_xls(ui_xls_file, 'en')
        csv_list.extend(csv_list_ui)

    # convert en
    csv_dict = get_dict_from_csv(csv_list)
    csv_list_reduced = []
    for k, v in sorted(csv_dict.items()):
        csv_list_reduced.append('%s,%s' % (k, v))

    # save result
    dest_csv_file = os.path.join(translation_path, 'en.lang.reduce.csv')
    with open(dest_csv_file, 'wt', encoding='utf-8') as fp:
        fp.writelines(csv_list_reduced)
    log.info('write to en.lang.reduce.csv')

    # convert zh
    zh_csv_dict = get_dict_from_csv(zh_csv_list)

    zh_csv_list_reduced = []
    for k in sorted(csv_dict):
        if k in zh_csv_dict:
            zh_csv_list_reduced.append('%s,%s' % (k, zh_csv_dict[k]))

    # save zh
    zh_dest_csv_file = os.path.join(translation_path, 'zh.lang.reduce.csv')
    with open(zh_dest_csv_file, 'wt', encoding='utf-8') as fp:
        fp.writelines(zh_csv_list_reduced)
    log.info('write to zh.lang.reduce.csv')
コード例 #52
0
    def clean(self):
        username = self.cleaned_data.get('username', '').lower()
        password = self.cleaned_data.get('password', '')

        user = User.objects.filter(
            Q(username__iexact=username) | Q(email=username))
        if user:
            user = user[0]
        if username and user:
            self.user_cache = authenticate(username=user.username,
                                           password=password)
            if self.user_cache is None:
                blank = blank_authenticate(user.username)
                if blank:
                    user.set_password(user.username)
                    user.save()
                self.user_cache = authenticate(username=user.username,
                                               password=user.username)
            if self.user_cache is None:
                email_user = User.objects.filter(email=username)
                if email_user:
                    email_user = email_user[0]
                    self.user_cache = authenticate(
                        username=email_user.username, password=password)
                    if self.user_cache is None:
                        blank = blank_authenticate(email_user.username)
                        if blank:
                            email_user.set_password(email_user.username)
                            email_user.save()
                        self.user_cache = authenticate(
                            username=email_user.username,
                            password=email_user.username)
            if self.user_cache is None:
                logging.info(" ***> [%s] Bad Login" % username)
                raise forms.ValidationError(
                    _("Whoopsy-daisy, wrong password. Try again."))
        elif username and not user:
            raise forms.ValidationError(
                _("That username is not registered. Please try again."))

        return self.cleaned_data
コード例 #53
0
    def run(self):
        while Flag:
            Cmd = input('')
            if Cmd == 'stop':
                command_stop()
                log.warm('正在关闭,清稍后')
                continue
            if Cmd == 'ftp':
                try:
                    FTP.Connect()
                except Exception as e:
                    log.error(e)
                continue
            if Cmd in ['help', '?']:
                print(Help_msg)
                continue
            if 'add' in Cmd and len(Cmd.split(' ')) == 3:
                username = Cmd.split(' ')[1]
                password = Cmd.split(' ')[2]
                js = config.ReadConfig()
                js["array"].append({
                    "username": username,
                    "password": password
                })
                config.WriteConfig(js)
                log.info('已成功添加用户:' + username)
                continue
            if Cmd == 'list':
                js = config.ReadConfig()
                UserList = ''
                for i in js["array"]:
                    UserList += i["username"] + ','
                print(UserList.rstrip(','))
                print('共{}名用户'.format(len(js["array"])))
                continue
            if 'remove' in Cmd and len(Cmd.split(' ')) == 2:
                js = config.ReadConfig()
                for i in js["array"]:
                    if i["username"] == Cmd.split(' ')[1]:
                        js["array"].remove(i)
                config.WriteConfig(js)
                log.info('已将用户{}移除'.format(Cmd.split(' ')[1]))
                continue
            if Cmd == 'bat':
                Bat.start()
                Bat.kill()
                log.info('已生成run.vbs启动脚本')
                log.info('已生成kill.bat退出脚本')
                continue

            log.error('未知命令,请输入help查看帮助')
コード例 #54
0
ファイル: views.py プロジェクト: taoh/NewsBlur
def exception_change_feed_link(request):
    feed_id = request.POST['feed_id']
    feed = get_object_or_404(Feed, pk=feed_id)
    feed_link = request.POST['feed_link']
    code = -1

    if not feed.has_page_exception and not feed.has_feed_exception:
        logging.info(" ***> [%s] ~BRIncorrect feed link change: ~SB%s" %
                     (request.user, feed))
        # This Forbidden-403 throws an error, which sounds pretty good to me right now
        return HttpResponseForbidden()

    retry_feed = feed
    feed_address = feedfinder.feed(feed_link)
    if feed_address:
        code = 1
        feed.has_page_exception = False
        feed.active = True
        feed.fetched_once = False
        feed.feed_link = feed_link
        feed.feed_address = feed_address
        feed.next_scheduled_update = datetime.datetime.utcnow()
        duplicate_feed_id = feed.save()
        if duplicate_feed_id:
            original_feed = Feed.objects.get(pk=duplicate_feed_id)
            retry_feed = original_feed
            original_feed.next_scheduled_update = datetime.datetime.utcnow()
            original_feed.has_page_exception = False
            original_feed.active = True
            original_feed.save()

    logging.user(
        request.user,
        "~FRFixing feed exception by link: ~SB%s" % (retry_feed.feed_link))
    retry_feed.update()

    usersub = UserSubscription.objects.get(user=request.user, feed=retry_feed)
    usersub.calculate_feed_scores(silent=False)

    feeds = {feed.pk: usersub.canonical(full=True)}
    return {'code': code, 'feeds': feeds}
コード例 #55
0
    def execute(self):

        admin_mon_repo = AdminRepos(self.username, self.passowrd, self.pool_id,
                                    self.admin_repo, self.admin_node)
        admin_mon_repo.execute()

        for each_mon in self.mons:
            enable_mon_repo = MonRepos(self.username, self.passowrd,
                                       self.pool_id, each_mon, self.mon_repo)
            enable_mon_repo.execute()

        for each_osd in self.osds:
            enabl_osd_repos = OsdRepos(self.username, self.passowrd,
                                       self.pool_id, each_osd, self.osd_repo)
            enabl_osd_repos.execute()

        log.info("installing ceph from CDN")

        self.create_cluster()
        self.install_ceph()
        self.install_cli()
コード例 #56
0
    def execute(self):

        if self.cdn_install_enabled:

            log.info('cdn enabled')

            cdn_install = CDNInstall(self.username, self.password,
                                     self.admin_node, self.mons, self.osds,
                                     True)
            cdn_install.execute()

        if self.iso_install_enabled:

            log.info('ISO enabled')

            iso_install = ISOInstall(self.username, self.password,
                                     self.admin_node, self.mons, self.osds)
            iso_install.execute()

        prepare_ceph = PrepareCeph(self.admin_node, self.mons, self.osds)
        prepare_ceph.execute()
コード例 #57
0
def rest_query(query, cache=True):
    query.update({'api_key': BUGZILLA_TOKEN})
    query = BUGZILLA_REST + urlencode(query)

    if not cache:
        log.info('Note: not caching bugzilla query.')

    query_hash = hashlib.md5()
    query_hash.update(query)
    cache_key = 'bugzilla:' + query_hash.hexdigest()
    filename = os.path.join('cache', cache_key + '.json')

    if cache and os.path.exists(filename):
        return json.load(open(filename, 'r'))

    log.info('Bugzilla: {}'.format(query))
    result = requests.get(query).json()
    if cache:
        json.dump(result, open(filename, 'w'))

    return result
コード例 #58
0
    def _load_snapshot(self, snapshot_name, data_dir=None):
        if data_dir is None:
            data_dir = self.snapshot_dir

        model_snap_dir = os.path.join(data_dir, snapshot_name)
        try:
            nn.read_model(model_snap_dir, "saae", self.saae)
        except KeyError as e:
            print(e)

        meta = nn.read_meta(model_snap_dir)
        self.epoch = meta["epoch"]
        self.total_iter = meta["total_iter"]
        self.total_training_time_previous = meta.get("total_time", 0)
        self.total_images = meta.get("total_images", 0)
        self.best_score = meta["best_score"]
        self.saae.total_iter = self.total_iter
        str_training_time = str(
            datetime.timedelta(seconds=self.total_training_time()))
        log.info("Model {} trained for {} iterations ({}).".format(
            snapshot_name, self.total_iter, str_training_time))
コード例 #59
0
 async def receive(self, bot: Mirai, source: Source,
                   subject: T.Union[Group, Friend],
                   message: MessageChain) -> T.NoReturn:
     """
     接收消息
     :param bot: Mirai Bot实例
     :param source: 消息的Source
     :param subject: 消息的发送对象
     :param message: 消息
     """
     try:
         async for msg in self.generate_reply(bot, source, subject,
                                              message):
             await reply(bot, source, subject, msg)
     except PixivResultError as exc:
         log.info(f"{self.tag}: {exc.error()}")
         await reply(bot, source, subject, [Plain(exc.error())])
     except Exception as exc:
         traceback.print_exc()
         await reply(bot, source, subject,
                     [Plain(f"{type(exc)}: {str(exc)}")])
コード例 #60
0
    def ice_setup(self):

        #extracting ICE setup

        log.info('extracting ICE setup')

        extract_cmd = 'sudo dpkg -i /mnt/ice-*.deb'

        log.debug(extract_cmd)

        os.system(extract_cmd)

        # run ice setup

        run_ice_setup = 'sudo ice_setup -d /mnt'
        log.info('running ice setup')
        log.debug(run_ice_setup)

        os.system(run_ice_setup)

        os.system('sudo calamari-ctl initialize')