mirror of
https://github.com/tgbot-collection/YYeTsBot.git
synced 2025-11-25 03:15:05 +08:00
新增yysubs页脚,增加ck180,修复extra bug
This commit is contained in:
7
API.md
7
API.md
@@ -1,5 +1,7 @@
|
||||
# 需求与待开发功能
|
||||
|
||||
## FE
|
||||
|
||||
- [ ] group为admin特殊显示,评论接口已返回group信息
|
||||
- [ ] 评论楼中楼
|
||||
- [ ] 评论通知(浏览器通知)
|
||||
@@ -7,6 +9,7 @@
|
||||
- [ ] 联合搜索,当本地数据库搜索不到数据时,会返回extra字段
|
||||
|
||||
# BE
|
||||
|
||||
- [ ] 添加资源API
|
||||
- [x] 联合搜索:字幕侠、new字幕组、追新番
|
||||
- [ ] 评论通知,需要新接口
|
||||
@@ -97,8 +100,8 @@
|
||||
{
|
||||
"data": [],
|
||||
"extra": {
|
||||
"name": "东城梦魇",
|
||||
"url": "https://www.zimuxia.cn/portfolio/%e4%b8%9c%e5%9f%8e%e6%a2%a6%e9%ad%87"
|
||||
"东城梦魇": "https://www.zimuxia.cn/portfolio/%e4%b8%9c%e5%9f%8e%e6%a2%a6%e9%ad%87",
|
||||
"黑色止血钳": "https://www.zimuxia.cn/portfolio/%e9%bb%91%e8%89%b2%e6%ad%a2%e8%a1%80%e9%92%b3"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2
YYeTsFE
2
YYeTsFE
Submodule YYeTsFE updated: 5fe4a609c4...528dd3483e
@@ -31,6 +31,11 @@ WORKERS = "https://yyets.dmesg.app/resource.html?id={id}"
|
||||
# new zmz
|
||||
NEWZMZ_SEARCH = "https://newzmz.com/subres/index/getres.html?keyword={}"
|
||||
NEWZMZ_RESOURCE = "https://ysfx.tv/view/{}"
|
||||
|
||||
# CK180
|
||||
CK180_SEARCH = "https://www.ck180.net/?s={}"
|
||||
CK180_RESOURCE = "https://ysfx.tv/view/{}"
|
||||
|
||||
# authentication config
|
||||
TOKEN = os.getenv("TOKEN") or "TOKEN"
|
||||
USERNAME = os.getenv("USERNAME") or "USERNAME"
|
||||
@@ -45,4 +50,4 @@ MONGO = os.getenv("MONGO") or "mongo"
|
||||
MAINTAINER = os.getenv("MAINTAINER")
|
||||
REPORT = os.getenv("REPORT") or False
|
||||
# This name must match class name, other wise this bot won't running.
|
||||
FANSUB_ORDER: str = os.getenv("ORDER") or 'YYeTsOffline,ZimuxiaOnline,NewzmzOnline,ZhuixinfanOnline'
|
||||
FANSUB_ORDER: str = os.getenv("ORDER") or 'YYeTsOffline,ZimuxiaOnline,NewzmzOnline,ZhuixinfanOnline,CK180Online'
|
||||
|
||||
@@ -20,7 +20,8 @@ import fakeredis
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from config import (WORKERS, REDIS, FANSUB_ORDER, FIX_SEARCH, MONGO,
|
||||
ZHUIXINFAN_SEARCH, ZHUIXINFAN_RESOURCE, NEWZMZ_SEARCH, NEWZMZ_RESOURCE)
|
||||
ZHUIXINFAN_SEARCH, ZHUIXINFAN_RESOURCE, NEWZMZ_SEARCH, NEWZMZ_RESOURCE,
|
||||
CK180_SEARCH)
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(filename)s [%(levelname)s]: %(message)s')
|
||||
|
||||
@@ -202,7 +203,7 @@ class YYeTsOffline(BaseFansub):
|
||||
results["class"] = self.__class__.__name__
|
||||
return results
|
||||
|
||||
@Redis.result_cache(600)
|
||||
@Redis.result_cache(1800)
|
||||
def search_result(self, resource_url) -> dict:
|
||||
# yyets offline
|
||||
# https://yyets.dmesg.app/resource.html?id=37089
|
||||
@@ -239,7 +240,7 @@ class ZimuxiaOnline(BaseFansub):
|
||||
dict_result["class"] = self.__class__.__name__
|
||||
return dict_result
|
||||
|
||||
@Redis.result_cache(600)
|
||||
@Redis.result_cache(1800)
|
||||
def search_result(self, resource_url: str) -> dict:
|
||||
# zimuxia online
|
||||
logging.info("[%s] Loading detail page %s", self.__class__.__name__, resource_url)
|
||||
@@ -318,6 +319,38 @@ class NewzmzOnline(BaseFansub):
|
||||
return {"all": html, "share": url, "cnname": cnname}
|
||||
|
||||
|
||||
class CK180Online(BaseFansub):
|
||||
|
||||
@Redis.preview_cache(3600)
|
||||
def search_preview(self, search_text: str) -> dict:
|
||||
search_link = CK180_SEARCH.format(search_text)
|
||||
html_text = self.get_html(search_link)
|
||||
logging.info('[%s] Parsing html...', self.__class__.__name__)
|
||||
soup = BeautifulSoup(html_text, 'html.parser')
|
||||
link_list = soup.find_all("div", class_="post clearfix")
|
||||
dict_result = {}
|
||||
for div in link_list:
|
||||
name = div.h3.text
|
||||
url = div.h3.a["href"]
|
||||
url_hash = hashlib.sha1(url.encode('u8')).hexdigest()
|
||||
dict_result[url_hash] = {
|
||||
"url": url,
|
||||
"name": name,
|
||||
"class": self.__class__.__name__
|
||||
}
|
||||
|
||||
dict_result["class"] = self.__class__.__name__
|
||||
return dict_result
|
||||
|
||||
@Redis.result_cache(1800)
|
||||
def search_result(self, url: str) -> dict:
|
||||
logging.info("[%s] Loading detail page %s", self.__class__.__name__, url)
|
||||
html = self.get_html(url)
|
||||
soup = BeautifulSoup(html, 'html.parser')
|
||||
cnname = soup.title.text.split("_")[0]
|
||||
return {"all": html, "share": url, "cnname": cnname}
|
||||
|
||||
|
||||
class FansubEntrance(BaseFansub):
|
||||
order = FANSUB_ORDER.split(",")
|
||||
|
||||
@@ -370,9 +403,9 @@ for sub_name in globals().copy():
|
||||
vars()[cmd_name] = m
|
||||
|
||||
if __name__ == '__main__':
|
||||
sub = NewzmzOnline()
|
||||
# search = sub.search_preview("法")
|
||||
sub = CK180Online()
|
||||
# search = sub.search_preview("404")
|
||||
# print(search)
|
||||
uh = "914a549bc15e11a610293779761c5dd3f047ceb0"
|
||||
uh = "965892b6b3ecd5635b7df5af4fd8d246aef3986e"
|
||||
result = sub.search_result(uh)
|
||||
print(json.dumps(result, ensure_ascii=False))
|
||||
|
||||
@@ -35,8 +35,7 @@ angry_count = 0
|
||||
def send_welcome(message):
|
||||
bot.send_chat_action(message.chat.id, 'typing')
|
||||
bot.send_message(message.chat.id, '欢迎使用,直接发送想要的剧集标题给我就可以了,不需要其他关键字,我会帮你搜索。\n\n'
|
||||
'人人影视专注于欧美日韩剧集,请不要反馈“我搜不到喜羊羊与灰太狼/流浪地球”这种问题,'
|
||||
'我会生气的😠😡🤬😒\n\n'
|
||||
'别说了,现在连流浪地球都搜得到了。本小可爱再也不生气了😄,'
|
||||
f'目前搜索优先级 {FANSUB_ORDER}\n '
|
||||
f'另外,可以尝试使用一下 https://yyets.dmesg.app/ 哦!',
|
||||
parse_mode='html', disable_web_page_preview=True)
|
||||
|
||||
@@ -21,7 +21,7 @@ from database import (AnnouncementResource, BlacklistResource, CommentResource,
|
||||
GrafanaQueryResource, MetricsResource, NameResource, OtherResource,
|
||||
TopResource, UserLikeResource, UserResource, CaptchaResource, Redis)
|
||||
from utils import ts_date
|
||||
from fansub import ZhuixinfanOnline, ZimuxiaOnline, NewzmzOnline
|
||||
from fansub import ZhuixinfanOnline, ZimuxiaOnline, NewzmzOnline, CK180Online
|
||||
|
||||
mongo_host = os.getenv("mongo") or "localhost"
|
||||
|
||||
@@ -307,11 +307,12 @@ class ResourceMongoResource(ResourceResource, Mongo):
|
||||
class_ = globals().get(class_name)
|
||||
result = class_().search_preview(kw)
|
||||
result.pop("class")
|
||||
json_result = {} # name as key, url_hash as value
|
||||
json_result = {}
|
||||
print(result)
|
||||
if result:
|
||||
# this means we have search result, get it from redis cache with real name
|
||||
for values in result.values():
|
||||
json_result = {"name": values["name"], "url": values["url"]}
|
||||
json_result[values["name"]] = values["url"]
|
||||
return json_result
|
||||
|
||||
def get_resource_data(self, resource_id: int, username: str) -> dict:
|
||||
@@ -349,7 +350,8 @@ class ResourceMongoResource(ResourceResource, Mongo):
|
||||
else:
|
||||
extra = self.fansub_search(ZimuxiaOnline.__name__, keyword) or \
|
||||
self.fansub_search(NewzmzOnline.__name__, keyword) or \
|
||||
self.fansub_search(ZhuixinfanOnline.__name__, keyword)
|
||||
self.fansub_search(ZhuixinfanOnline.__name__, keyword) or \
|
||||
self.fansub_search(CK180Online.__name__, keyword)
|
||||
|
||||
returned["data"] = []
|
||||
returned["extra"] = extra
|
||||
|
||||
Reference in New Issue
Block a user