Beispiel #1
0
import hoshino
import nonebot
import os
import re
import traceback

HELP_STR = '''
公主连结活动日历
日历 : 查看本群订阅服务器日历
[国台日]服日历 : 查看指定服务器日程
[国台日]服日历 on/off : 订阅/取消订阅指定服务器的日历推送
日历 time 时:分 : 设置日历推送时间
日历 status : 查看本群日历推送设置
'''.strip()

sv = hoshino.Service('pcr_calendar', help_=HELP_STR, bundle='pcr查询')

group_data = {}


def load_data():
    path = os.path.join(os.path.dirname(__file__), 'data.json')
    if not os.path.exists(path):
        return
    try:
        with open(path, encoding='utf8') as f:
            data = json.load(f)
            for k, v in data.items():
                group_data[k] = v
    except:
        traceback.print_exc()
import hoshino
import asyncio
from .base import *
from nonebot import scheduler
from .config import get_config, get_group_config, get_group_info, load_config, set_group_config, group_list_check, set_group_list

HELP_MSG = '''
来 [num] 张 [keyword] 涩/色/瑟图 : 来num张keyword的涩图(不指定数量与关键字发送一张随机涩图)
涩/色/瑟图 : 发送一张随机涩图
本日涩图排行榜 [page] : 获取[第page页]p站排行榜(需开启acggov模块)
看涩图 [n] 或 [start end] : 获取p站排行榜[第n名/从start名到end名]色图(需开启acggov模块)
'''
sv = hoshino.Service('setu', bundle='pcr娱乐', help_=HELP_MSG)

#设置limiter
tlmt = hoshino.util.DailyNumberLimiter(get_config('base', 'daily_max'))
flmt = hoshino.util.FreqLimiter(get_config('base', 'freq_limit'))


def check_lmt(uid, num, gid):
    if uid in hoshino.config.SUPERUSERS:
        return 0, ''
    if group_list_check(gid) != 0:
        if group_list_check(gid) == 1:
            return 1, f'此功能启用了白名单模式,本群未在白名单中,请联系维护组解决'
        else:
            return 1, f'此功能已在本群禁用,可能因为人数超限或之前有滥用行为,请联系维护组解决'
    if not tlmt.check(uid):
        return 1, f"您今天已经冲过{get_config('base', 'daily_max')}次了,请明天再来~"
    if num > 1 and (get_config('base', 'daily_max') - tlmt.get_num(uid)) < num:
        return 1, f"您今天的剩余次数为{get_config('base', 'daily_max') - tlmt.get_num(uid)}次,已不足{num}次,请冲少点(恼)!"
Beispiel #3
0
    'last_time': {},
    'group_rss': {},
    'group_mode': {},
}

HELP_MSG = '''rss订阅
rss list : 查看订阅列表
rss add rss地址 : 添加rss订阅
rss addb up主id : 添加b站up主订阅
rss addr route : 添加rsshub route订阅
rss remove 序号 : 删除订阅列表指定项
rss mode 0/1 : 设置消息模式 标准/简略
详细说明见项目主页: https://github.com/zyujs/rss
'''

sv = hoshino.Service('rss', bundle='rss', help_= HELP_MSG)

def save_data():
    path = os.path.join(os.path.dirname(__file__), 'data.json')
    try:
        with open(path, 'w', encoding='utf8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
    except:
        traceback.print_exc()

def load_data():
    path = os.path.join(os.path.dirname(__file__), 'data.json')
    if not os.path.exists(path):
        save_data()
        return
    try:
Beispiel #4
0
import re
import hoshino
from hoshino import Service, R
import asyncio
from nonebot import *
import sys
import os
import json
print(os.getcwd())
sys.path.append(os.getcwd())
sys.path.append('C:/HoshinoBot/hoshino/modules/add_info')
from a import *
from hoshino.service import sucmd


sv = hoshino.Service('add')
_bot = get_bot()


# @sv.on_command('绑定帮助')
# async def Help(session):
#     msg = '''待更新'''
#     await session.send(msg)

def read_json(file):
    dict_temp = {}
    try:
        with open(file, 'r', encoding='utf-8') as f:
            dict_temp = json.load(f)
            return dict_temp
    except:
import hoshino
import asyncio
import aiohttp
import re
import os
import json
from sqlitedict import SqliteDict

sv = hoshino.Service('Lazy_Dog_Radar')


def get_path(*paths):
    return os.path.join(os.path.dirname(__file__), *paths)


def init_db(db_dir, db_name='group_data.sqlite'):
    return SqliteDict(get_path(db_dir, db_name),
                      encode=json.dumps,
                      decode=json.loads,
                      autocommit=True)


@sv.on_fullmatch(('Find_Lazy_Dog', '查代刀', '查懒狗'))
async def send_statics(bot, ev):
    db = init_db('./')
    if str(ev['group_id']) not in list(db.keys()):
        await bot.send(ev, f"未设置api地址")
        return
    api = db[ev['group_id']]['api']
    if 'police_id' in db[ev['group_id']].keys():
        police_id = db[ev['group_id']]['police_id']
import asyncio
import json
import platform
import secrets
from typing import List

import aiofiles
import feedparser
import json5
import peewee as pw
from quart import Blueprint

import hoshino
from utils import *

sv = hoshino.Service('mikanpro', enable_on_default=False, help_='蜜柑番剧下载推送')
loop = asyncio.get_event_loop()


class MikanConfig(dict):
    config_filepath = os.path.join(os.path.dirname(__file__), "mikanpro.json")

    @classmethod
    async def load(cls) -> Any:
        """读取配置文件"""
        if not os.path.exists(cls.config_filepath):
            shutil.copy(
                os.path.join(os.path.dirname(__file__),
                             "default_config.json5"),
                cls.config_filepath,
            )
Beispiel #7
0
import hoshino
import nonebot
import os
import re
import traceback

HELP_STR = '''
公主连结活动日历
日历 : 查看本群订阅服务器日历
[国台日]服日历 : 查看指定服务器日程
[国台日]服日历 on/off : 订阅/取消订阅指定服务器的日历推送
日历 time 时:分 : 设置日历推送时间
日历 status : 查看本群日历推送设置
'''.strip()

sv = hoshino.Service('pcr_calendar', help_=HELP_STR, bundle='pcr查询', enable_on_default=False)

group_data = {}

def load_data():
    path = os.path.join(os.path.dirname(__file__), 'data.json')
    if not os.path.exists(path):
        return
    try:
        with open(path, encoding='utf8') as f:
            data = json.load(f)
            for k, v in data.items():
                group_data[k] = v
    except:
        traceback.print_exc()
Beispiel #8
0
from collections import Generator
from typing import List, Callable, Dict
import bs4
import requests
import hoshino
from requests.structures import CaseInsensitiveDict
from bs4 import BeautifulSoup, NavigableString, Tag
from urllib.parse import urlencode

cmds: Dict[str, Callable] = {}
sv = hoshino.Service("shindan", bundle="pcr娱乐")


def get_data(qid, name):
    dat = {
        "_token": "FBNybh2cb2nn83ogt0x4doW8gGJu7XLme6SKwN9t",
        "name": name,
        "hiddenName": "名無しのV"
    }
    data = urlencode(dat)
    headers = CaseInsensitiveDict()
    headers["Host"] = "shindanmaker.com"
    headers[
        "User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:87.0) Gecko/20100101 Firefox/87.0"
    headers[
        "Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
    headers[
        "Accept-Language"] = "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2"
    headers["Accept-Encoding"] = "gzip, deflate, br"
    headers["Content-Type"] = "application/x-www-form-urlencoded"
    headers["Content-Length"] = "102"
Beispiel #9
0
import math

import hoshino
import datetime
import requests
from lxml import etree
from urllib import request
import time

sv = hoshino.Service('version', bundle='pcr娱乐')
url = 'https://minecraft.gamepedia.com/Java_Edition_'
headers = {
    'User-Agent':
    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 '
    'Safari/537.36'
}
s = "%B %d, %Y\n"
s2 = ": %B %d, %Y"


def get_date(v):
    detail_url = 'https://minecraft.gamepedia.com/Java_Edition_' + str(v)
    resp = requests.get(detail_url, headers=headers)
    text = resp.text
    html = etree.HTML(text)
    date = html.xpath('//*[@class="infobox-rows"]/tbody/tr[3]/td/p/text()')
    if not date:
        date = html.xpath('//*[@class="infobox-rows"]/tbody/tr[2]/td/p/text()')
        try:
            a = time.mktime(datetime.datetime.strptime(date[0], s).timetuple())
            return a